mirror of
https://github.com/python/cpython.git
synced 2025-07-24 19:54:21 +00:00
Merged revisions 55007-55179 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/p3yk ........ r55077 | guido.van.rossum | 2007-05-02 11:54:37 -0700 (Wed, 02 May 2007) | 2 lines Use the new print syntax, at least. ........ r55142 | fred.drake | 2007-05-04 21:27:30 -0700 (Fri, 04 May 2007) | 1 line remove old cruftiness ........ r55143 | fred.drake | 2007-05-04 21:52:16 -0700 (Fri, 04 May 2007) | 1 line make this work with the new Python ........ r55162 | neal.norwitz | 2007-05-06 22:29:18 -0700 (Sun, 06 May 2007) | 1 line Get asdl code gen working with Python 2.3. Should continue to work with 3.0 ........ r55164 | neal.norwitz | 2007-05-07 00:00:38 -0700 (Mon, 07 May 2007) | 1 line Verify checkins to p3yk (sic) branch go to 3000 list. ........ r55166 | neal.norwitz | 2007-05-07 00:12:35 -0700 (Mon, 07 May 2007) | 1 line Fix this test so it runs again by importing warnings_test properly. ........ r55167 | neal.norwitz | 2007-05-07 01:03:22 -0700 (Mon, 07 May 2007) | 8 lines So long xrange. range() now supports values that are outside -sys.maxint to sys.maxint. floats raise a TypeError. This has been sitting for a long time. It probably has some problems and needs cleanup. Objects/rangeobject.c now uses 4-space indents since it is almost completely new. ........ r55171 | guido.van.rossum | 2007-05-07 10:21:26 -0700 (Mon, 07 May 2007) | 4 lines Fix two tests that were previously depending on significant spaces at the end of a line (and before that on Python 2.x print behavior that has no exact equivalent in 3.0). ........
This commit is contained in:
parent
598d98a7e8
commit
805365ee39
150 changed files with 1412 additions and 1320 deletions
|
@ -23,7 +23,18 @@ __version__ = 'SPARK-0.7 (pre-alpha-5)'
|
|||
|
||||
import re
|
||||
import sys
|
||||
import string
|
||||
|
||||
# Compatability with older pythons.
|
||||
def output(string='', end='\n'):
|
||||
sys.stdout.write(string + end)
|
||||
|
||||
try:
|
||||
sorted
|
||||
except NameError:
|
||||
def sorted(seq):
|
||||
seq2 = seq[:]
|
||||
seq2.sort()
|
||||
return seq2
|
||||
|
||||
def _namelist(instance):
|
||||
namelist, namedict, classlist = [], {}, [instance.__class__]
|
||||
|
@ -57,10 +68,10 @@ class GenericScanner:
|
|||
rv.append(self.makeRE(name))
|
||||
|
||||
rv.append(self.makeRE('t_default'))
|
||||
return string.join(rv, '|')
|
||||
return '|'.join(rv)
|
||||
|
||||
def error(self, s, pos):
|
||||
print "Lexical error at position %s" % pos
|
||||
output("Lexical error at position %s" % pos)
|
||||
raise SystemExit
|
||||
|
||||
def tokenize(self, s):
|
||||
|
@ -79,7 +90,7 @@ class GenericScanner:
|
|||
|
||||
def t_default(self, s):
|
||||
r'( . | \n )+'
|
||||
print "Specification error: unmatched input"
|
||||
output("Specification error: unmatched input")
|
||||
raise SystemExit
|
||||
|
||||
#
|
||||
|
@ -172,7 +183,7 @@ class GenericParser:
|
|||
|
||||
def addRule(self, doc, func, _preprocess=1):
|
||||
fn = func
|
||||
rules = string.split(doc)
|
||||
rules = doc.split()
|
||||
|
||||
index = []
|
||||
for i in range(len(rules)):
|
||||
|
@ -296,7 +307,7 @@ class GenericParser:
|
|||
return None
|
||||
|
||||
def error(self, token):
|
||||
print "Syntax error at or near `%s' token" % token
|
||||
output("Syntax error at or near `%s' token" % token)
|
||||
raise SystemExit
|
||||
|
||||
def parse(self, tokens):
|
||||
|
@ -313,7 +324,7 @@ class GenericParser:
|
|||
self.states = { 0: self.makeState0() }
|
||||
self.makeState(0, self._BOF)
|
||||
|
||||
for i in xrange(len(tokens)):
|
||||
for i in range(len(tokens)):
|
||||
sets.append([])
|
||||
|
||||
if sets[i] == []:
|
||||
|
@ -419,8 +430,7 @@ class GenericParser:
|
|||
# need to know the entire set of predicted nonterminals
|
||||
# to do this without accidentally duplicating states.
|
||||
#
|
||||
core = predicted.keys()
|
||||
core.sort()
|
||||
core = sorted(predicted.keys())
|
||||
tcore = tuple(core)
|
||||
if tcore in self.cores:
|
||||
self.edges[(k, None)] = self.cores[tcore]
|
||||
|
@ -605,7 +615,7 @@ class GenericParser:
|
|||
rule = self.ambiguity(self.newrules[nt])
|
||||
else:
|
||||
rule = self.newrules[nt][0]
|
||||
#print rule
|
||||
#output(rule)
|
||||
|
||||
rhs = rule[1]
|
||||
attr = [None] * len(rhs)
|
||||
|
@ -624,7 +634,7 @@ class GenericParser:
|
|||
rule = choices[0]
|
||||
if len(choices) > 1:
|
||||
rule = self.ambiguity(choices)
|
||||
#print rule
|
||||
#output(rule)
|
||||
|
||||
rhs = rule[1]
|
||||
attr = [None] * len(rhs)
|
||||
|
@ -826,15 +836,15 @@ class GenericASTMatcher(GenericParser):
|
|||
|
||||
def _dump(tokens, sets, states):
|
||||
for i in range(len(sets)):
|
||||
print 'set', i
|
||||
output('set %d' % i)
|
||||
for item in sets[i]:
|
||||
print '\t', item
|
||||
output('\t', item)
|
||||
for (lhs, rhs), pos in states[item[0]].items:
|
||||
print '\t\t', lhs, '::=',
|
||||
print string.join(rhs[:pos]),
|
||||
print '.',
|
||||
print string.join(rhs[pos:])
|
||||
output('\t\t', lhs, '::=', end='')
|
||||
output(' '.join(rhs[:pos]), end='')
|
||||
output('.', end='')
|
||||
output(' '.join(rhs[pos:]))
|
||||
if i < len(tokens):
|
||||
print
|
||||
print 'token', str(tokens[i])
|
||||
print
|
||||
output()
|
||||
output('token %s' % str(tokens[i]))
|
||||
output()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue