mirror of
https://github.com/python/cpython.git
synced 2025-08-04 00:48:58 +00:00
Merge part of the trunk changes into the p3yk branch. This merges from 43030
(branch-creation time) up to 43067. 43068 and 43069 contain a little swapping action between re.py and sre.py, and this mightily confuses svn merge, so later changes are going in separately. This merge should break no additional tests. The last-merged revision is going in a 'last_merge' property on '.' (the branch directory.) Arbitrarily chosen, really; if there's a BCP for this, I couldn't find it, but we can easily change it afterwards ;)
This commit is contained in:
parent
d858f70617
commit
a977329b6f
116 changed files with 3409 additions and 709 deletions
|
@ -41,6 +41,33 @@ class ReadTest(unittest.TestCase):
|
|||
self.assertEqual(r.bytebuffer, "")
|
||||
self.assertEqual(r.charbuffer, u"")
|
||||
|
||||
# do the check again, this time using a incremental decoder
|
||||
d = codecs.getincrementaldecoder(self.encoding)()
|
||||
result = u""
|
||||
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
|
||||
result += d.decode(c)
|
||||
self.assertEqual(result, partialresult)
|
||||
# check that there's nothing left in the buffers
|
||||
self.assertEqual(d.decode("", True), u"")
|
||||
self.assertEqual(d.buffer, "")
|
||||
|
||||
# Check whether the rest method works properly
|
||||
d.reset()
|
||||
result = u""
|
||||
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
|
||||
result += d.decode(c)
|
||||
self.assertEqual(result, partialresult)
|
||||
# check that there's nothing left in the buffers
|
||||
self.assertEqual(d.decode("", True), u"")
|
||||
self.assertEqual(d.buffer, "")
|
||||
|
||||
# check iterdecode()
|
||||
encoded = input.encode(self.encoding)
|
||||
self.assertEqual(
|
||||
input,
|
||||
u"".join(codecs.iterdecode(encoded, self.encoding))
|
||||
)
|
||||
|
||||
def test_readline(self):
|
||||
def getreader(input):
|
||||
stream = StringIO.StringIO(input.encode(self.encoding))
|
||||
|
@ -977,6 +1004,12 @@ class BasicUnicodeTest(unittest.TestCase):
|
|||
def test_basics(self):
|
||||
s = u"abc123" # all codecs should be able to encode these
|
||||
for encoding in all_unicode_encodings:
|
||||
name = codecs.lookup(encoding).name
|
||||
if encoding.endswith("_codec"):
|
||||
name += "_codec"
|
||||
elif encoding == "latin_1":
|
||||
name = "latin_1"
|
||||
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
|
||||
(bytes, size) = codecs.getencoder(encoding)(s)
|
||||
if encoding != "unicode_internal":
|
||||
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
|
||||
|
@ -999,6 +1032,30 @@ class BasicUnicodeTest(unittest.TestCase):
|
|||
decodedresult += reader.read()
|
||||
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
|
||||
|
||||
# check incremental decoder/encoder and iterencode()/iterdecode()
|
||||
try:
|
||||
encoder = codecs.getincrementalencoder(encoding)()
|
||||
except LookupError: # no IncrementalEncoder
|
||||
pass
|
||||
else:
|
||||
# check incremental decoder/encoder
|
||||
encodedresult = ""
|
||||
for c in s:
|
||||
encodedresult += encoder.encode(c)
|
||||
decoder = codecs.getincrementaldecoder(encoding)()
|
||||
decodedresult = u""
|
||||
for c in encodedresult:
|
||||
decodedresult += decoder.decode(c)
|
||||
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
|
||||
|
||||
# check iterencode()/iterdecode()
|
||||
result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
|
||||
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
|
||||
|
||||
# check iterencode()/iterdecode() with empty string
|
||||
result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
|
||||
self.assertEqual(result, u"")
|
||||
|
||||
def test_seek(self):
|
||||
# all codecs should be able to encode these
|
||||
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue