mirror of
https://github.com/python/cpython.git
synced 2025-08-04 00:48:58 +00:00
bpo-43651: PEP 597: Fix EncodingWarning in some tests (GH-25145)
* test_asyncio * test_bz2 * test_math * test_cmath * test_cmd_line * test_cmd_line_script * test_compile * test_contextlib * test_profile * ctypes/test/test_find * test_multiprocessing * test_configparser * test_csv * test_dbm_dumb * test_decimal * test_difflib * os.fdopen() calls io.text_encoding() to emit EncodingWarning for right place.
This commit is contained in:
parent
dc6d3e1e4c
commit
35715d1e72
19 changed files with 78 additions and 75 deletions
|
@ -826,7 +826,7 @@ class _TestSubclassingProcess(BaseTestCase):
|
|||
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
|
||||
proc.start()
|
||||
proc.join()
|
||||
with open(testfn, 'r') as f:
|
||||
with open(testfn, encoding="utf-8") as f:
|
||||
err = f.read()
|
||||
# The whole traceback was printed
|
||||
self.assertIn("ZeroDivisionError", err)
|
||||
|
@ -836,14 +836,14 @@ class _TestSubclassingProcess(BaseTestCase):
|
|||
@classmethod
|
||||
def _test_stderr_flush(cls, testfn):
|
||||
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
|
||||
sys.stderr = open(fd, 'w', closefd=False)
|
||||
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
|
||||
1/0 # MARKER
|
||||
|
||||
|
||||
@classmethod
|
||||
def _test_sys_exit(cls, reason, testfn):
|
||||
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
|
||||
sys.stderr = open(fd, 'w', closefd=False)
|
||||
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
|
||||
sys.exit(reason)
|
||||
|
||||
def test_sys_exit(self):
|
||||
|
@ -864,7 +864,7 @@ class _TestSubclassingProcess(BaseTestCase):
|
|||
join_process(p)
|
||||
self.assertEqual(p.exitcode, 1)
|
||||
|
||||
with open(testfn, 'r') as f:
|
||||
with open(testfn, encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
self.assertEqual(content.rstrip(), str(reason))
|
||||
|
||||
|
@ -1118,7 +1118,7 @@ class _TestQueue(BaseTestCase):
|
|||
def test_no_import_lock_contention(self):
|
||||
with os_helper.temp_cwd():
|
||||
module_name = 'imported_by_an_imported_module'
|
||||
with open(module_name + '.py', 'w') as f:
|
||||
with open(module_name + '.py', 'w', encoding="utf-8") as f:
|
||||
f.write("""if 1:
|
||||
import multiprocessing
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@ class TestBase_Mapping(unittest.TestCase):
|
|||
self.skipTest("Could not retrieve "+self.mapfileurl)
|
||||
|
||||
def open_mapping_file(self):
|
||||
return support.open_urlresource(self.mapfileurl)
|
||||
return support.open_urlresource(self.mapfileurl, encoding="utf-8")
|
||||
|
||||
def test_mapping_file(self):
|
||||
if self.mapfileurl.endswith('.xml'):
|
||||
|
|
|
@ -2096,7 +2096,7 @@ class BaseLoopSockSendfileTests(test_utils.TestCase):
|
|||
|
||||
def test_nonbinary_file(self):
|
||||
sock = self.make_socket()
|
||||
with open(os_helper.TESTFN, 'r') as f:
|
||||
with open(os_helper.TESTFN, encoding="utf-8") as f:
|
||||
with self.assertRaisesRegex(ValueError, "binary mode"):
|
||||
self.run_loop(self.loop.sock_sendfile(sock, f))
|
||||
|
||||
|
|
|
@ -1349,7 +1349,7 @@ class EventLoopTestsMixin:
|
|||
|
||||
rpipe, wpipe = os.pipe()
|
||||
rpipeobj = io.open(rpipe, 'rb', 1024)
|
||||
wpipeobj = io.open(wpipe, 'w', 1024)
|
||||
wpipeobj = io.open(wpipe, 'w', 1024, encoding="utf-8")
|
||||
|
||||
async def connect():
|
||||
read_transport, _ = await loop.connect_read_pipe(
|
||||
|
|
|
@ -922,14 +922,14 @@ class OpenTest(BaseTest):
|
|||
for mode in ("wt", "xt"):
|
||||
if mode == "xt":
|
||||
unlink(self.filename)
|
||||
with self.open(self.filename, mode) as f:
|
||||
with self.open(self.filename, mode, encoding="ascii") as f:
|
||||
f.write(text)
|
||||
with open(self.filename, "rb") as f:
|
||||
file_data = ext_decompress(f.read()).decode("ascii")
|
||||
self.assertEqual(file_data, text_native_eol)
|
||||
with self.open(self.filename, "rt") as f:
|
||||
with self.open(self.filename, "rt", encoding="ascii") as f:
|
||||
self.assertEqual(f.read(), text)
|
||||
with self.open(self.filename, "at") as f:
|
||||
with self.open(self.filename, "at", encoding="ascii") as f:
|
||||
f.write(text)
|
||||
with open(self.filename, "rb") as f:
|
||||
file_data = ext_decompress(f.read()).decode("ascii")
|
||||
|
@ -938,7 +938,8 @@ class OpenTest(BaseTest):
|
|||
def test_x_mode(self):
|
||||
for mode in ("x", "xb", "xt"):
|
||||
unlink(self.filename)
|
||||
with self.open(self.filename, mode) as f:
|
||||
encoding = "utf-8" if "t" in mode else None
|
||||
with self.open(self.filename, mode, encoding=encoding) as f:
|
||||
pass
|
||||
with self.assertRaises(FileExistsError):
|
||||
with self.open(self.filename, mode) as f:
|
||||
|
@ -950,7 +951,7 @@ class OpenTest(BaseTest):
|
|||
with self.open(BytesIO(self.DATA), "rb") as f:
|
||||
self.assertEqual(f.read(), self.TEXT)
|
||||
text = self.TEXT.decode("ascii")
|
||||
with self.open(BytesIO(self.DATA), "rt") as f:
|
||||
with self.open(BytesIO(self.DATA), "rt", encoding="utf-8") as f:
|
||||
self.assertEqual(f.read(), text)
|
||||
|
||||
def test_bad_params(self):
|
||||
|
@ -989,9 +990,9 @@ class OpenTest(BaseTest):
|
|||
def test_newline(self):
|
||||
# Test with explicit newline (universal newline mode disabled).
|
||||
text = self.TEXT.decode("ascii")
|
||||
with self.open(self.filename, "wt", newline="\n") as f:
|
||||
with self.open(self.filename, "wt", encoding="utf-8", newline="\n") as f:
|
||||
f.write(text)
|
||||
with self.open(self.filename, "rt", newline="\r") as f:
|
||||
with self.open(self.filename, "rt", encoding="utf-8", newline="\r") as f:
|
||||
self.assertEqual(f.readlines(), [text])
|
||||
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ class CMathTests(unittest.TestCase):
|
|||
test_functions.append(lambda x : cmath.log(14.-27j, x))
|
||||
|
||||
def setUp(self):
|
||||
self.test_values = open(test_file)
|
||||
self.test_values = open(test_file, encoding="utf-8")
|
||||
|
||||
def tearDown(self):
|
||||
self.test_values.close()
|
||||
|
|
|
@ -512,7 +512,7 @@ class CmdLineTest(unittest.TestCase):
|
|||
# the dict whereas the module was destroyed
|
||||
filename = os_helper.TESTFN
|
||||
self.addCleanup(os_helper.unlink, filename)
|
||||
with open(filename, "w") as script:
|
||||
with open(filename, "w", encoding="utf-8") as script:
|
||||
print("import sys", file=script)
|
||||
print("del sys.modules['__main__']", file=script)
|
||||
assert_python_ok(filename)
|
||||
|
@ -549,9 +549,9 @@ class CmdLineTest(unittest.TestCase):
|
|||
with os_helper.temp_cwd() as tmpdir:
|
||||
fake = os.path.join(tmpdir, "uuid.py")
|
||||
main = os.path.join(tmpdir, "main.py")
|
||||
with open(fake, "w") as f:
|
||||
with open(fake, "w", encoding="utf-8") as f:
|
||||
f.write("raise RuntimeError('isolated mode test')\n")
|
||||
with open(main, "w") as f:
|
||||
with open(main, "w", encoding="utf-8") as f:
|
||||
f.write("import uuid\n")
|
||||
f.write("print('ok')\n")
|
||||
self.assertRaises(subprocess.CalledProcessError,
|
||||
|
|
|
@ -400,7 +400,7 @@ class CmdLineTest(unittest.TestCase):
|
|||
# does not alter the value of sys.path[0]
|
||||
with os_helper.temp_dir() as script_dir:
|
||||
with os_helper.change_cwd(path=script_dir):
|
||||
with open("-c", "w") as f:
|
||||
with open("-c", "w", encoding="utf-8") as f:
|
||||
f.write("data")
|
||||
rc, out, err = assert_python_ok('-c',
|
||||
'import sys; print("sys.path[0]==%r" % sys.path[0])',
|
||||
|
@ -416,7 +416,7 @@ class CmdLineTest(unittest.TestCase):
|
|||
with os_helper.temp_dir() as script_dir:
|
||||
script_name = _make_test_script(script_dir, 'other')
|
||||
with os_helper.change_cwd(path=script_dir):
|
||||
with open("-m", "w") as f:
|
||||
with open("-m", "w", encoding="utf-8") as f:
|
||||
f.write("data")
|
||||
rc, out, err = assert_python_ok('-m', 'other', *example_args,
|
||||
__isolated=False)
|
||||
|
@ -429,7 +429,7 @@ class CmdLineTest(unittest.TestCase):
|
|||
# will be failed.
|
||||
with os_helper.temp_dir() as script_dir:
|
||||
script_name = os.path.join(script_dir, "issue20884.py")
|
||||
with open(script_name, "w", newline='\n') as f:
|
||||
with open(script_name, "w", encoding="latin1", newline='\n') as f:
|
||||
f.write("#coding: iso-8859-1\n")
|
||||
f.write('"""\n')
|
||||
for _ in range(30):
|
||||
|
|
|
@ -427,7 +427,7 @@ if 1:
|
|||
fname = __file__
|
||||
if fname.lower().endswith('pyc'):
|
||||
fname = fname[:-1]
|
||||
with open(fname, 'r') as f:
|
||||
with open(fname, encoding='utf-8') as f:
|
||||
fcontents = f.read()
|
||||
sample_code = [
|
||||
['<assign>', 'x = 5'],
|
||||
|
|
|
@ -714,31 +714,31 @@ boolean {0[0]} NO
|
|||
file1 = support.findfile("cfgparser.1")
|
||||
# check when we pass a mix of readable and non-readable files:
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read([file1, "nonexistent-file"])
|
||||
parsed_files = cf.read([file1, "nonexistent-file"], encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [file1])
|
||||
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
|
||||
# check when we pass only a filename:
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read(file1)
|
||||
parsed_files = cf.read(file1, encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [file1])
|
||||
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
|
||||
# check when we pass only a Path object:
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read(pathlib.Path(file1))
|
||||
parsed_files = cf.read(pathlib.Path(file1), encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [file1])
|
||||
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
|
||||
# check when we passed both a filename and a Path object:
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read([pathlib.Path(file1), file1])
|
||||
parsed_files = cf.read([pathlib.Path(file1), file1], encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [file1, file1])
|
||||
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
|
||||
# check when we pass only missing files:
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read(["nonexistent-file"])
|
||||
parsed_files = cf.read(["nonexistent-file"], encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [])
|
||||
# check when we pass no files:
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read([])
|
||||
parsed_files = cf.read([], encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [])
|
||||
|
||||
def test_read_returns_file_list_with_bytestring_path(self):
|
||||
|
@ -747,15 +747,15 @@ boolean {0[0]} NO
|
|||
file1_bytestring = support.findfile("cfgparser.1").encode()
|
||||
# check when passing an existing bytestring path
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read(file1_bytestring)
|
||||
parsed_files = cf.read(file1_bytestring, encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [file1_bytestring])
|
||||
# check when passing an non-existing bytestring path
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read(b'nonexistent-file')
|
||||
parsed_files = cf.read(b'nonexistent-file', encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [])
|
||||
# check when passing both an existing and non-existing bytestring path
|
||||
cf = self.newconfig()
|
||||
parsed_files = cf.read([file1_bytestring, b'nonexistent-file'])
|
||||
parsed_files = cf.read([file1_bytestring, b'nonexistent-file'], encoding="utf-8")
|
||||
self.assertEqual(parsed_files, [file1_bytestring])
|
||||
|
||||
# shared by subclasses
|
||||
|
@ -1064,7 +1064,7 @@ class MultilineValuesTestCase(BasicTestCase, unittest.TestCase):
|
|||
cf.add_section(s)
|
||||
for j in range(10):
|
||||
cf.set(s, 'lovely_spam{}'.format(j), self.wonderful_spam)
|
||||
with open(os_helper.TESTFN, 'w') as f:
|
||||
with open(os_helper.TESTFN, 'w', encoding="utf-8") as f:
|
||||
cf.write(f)
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -1074,7 +1074,7 @@ class MultilineValuesTestCase(BasicTestCase, unittest.TestCase):
|
|||
# We're reading from file because this is where the code changed
|
||||
# during performance updates in Python 3.2
|
||||
cf_from_file = self.newconfig()
|
||||
with open(os_helper.TESTFN) as f:
|
||||
with open(os_helper.TESTFN, encoding="utf-8") as f:
|
||||
cf_from_file.read_file(f)
|
||||
self.assertEqual(cf_from_file.get('section8', 'lovely_spam4'),
|
||||
self.wonderful_spam.replace('\t\n', '\n'))
|
||||
|
@ -1473,7 +1473,7 @@ class CopyTestCase(BasicTestCase, unittest.TestCase):
|
|||
class FakeFile:
|
||||
def __init__(self):
|
||||
file_path = support.findfile("cfgparser.1")
|
||||
with open(file_path) as f:
|
||||
with open(file_path, encoding="utf-8") as f:
|
||||
self.lines = f.readlines()
|
||||
self.lines.reverse()
|
||||
|
||||
|
@ -1500,7 +1500,7 @@ class ReadFileTestCase(unittest.TestCase):
|
|||
pass # unfortunately we can't test bytes on this path
|
||||
for file_path in file_paths:
|
||||
parser = configparser.ConfigParser()
|
||||
with open(file_path) as f:
|
||||
with open(file_path, encoding="utf-8") as f:
|
||||
parser.read_file(f)
|
||||
self.assertIn("Foo Bar", parser)
|
||||
self.assertIn("foo", parser["Foo Bar"])
|
||||
|
|
|
@ -316,13 +316,13 @@ class FileContextTestCase(unittest.TestCase):
|
|||
tfn = tempfile.mktemp()
|
||||
try:
|
||||
f = None
|
||||
with open(tfn, "w") as f:
|
||||
with open(tfn, "w", encoding="utf-8") as f:
|
||||
self.assertFalse(f.closed)
|
||||
f.write("Booh\n")
|
||||
self.assertTrue(f.closed)
|
||||
f = None
|
||||
with self.assertRaises(ZeroDivisionError):
|
||||
with open(tfn, "r") as f:
|
||||
with open(tfn, "r", encoding="utf-8") as f:
|
||||
self.assertFalse(f.closed)
|
||||
self.assertEqual(f.read(), "Booh\n")
|
||||
1 / 0
|
||||
|
|
|
@ -133,7 +133,7 @@ class Test_Csv(unittest.TestCase):
|
|||
|
||||
|
||||
def _write_test(self, fields, expect, **kwargs):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj, **kwargs)
|
||||
writer.writerow(fields)
|
||||
fileobj.seek(0)
|
||||
|
@ -141,7 +141,7 @@ class Test_Csv(unittest.TestCase):
|
|||
expect + writer.dialect.lineterminator)
|
||||
|
||||
def _write_error_test(self, exc, fields, **kwargs):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj, **kwargs)
|
||||
with self.assertRaises(exc):
|
||||
writer.writerow(fields)
|
||||
|
@ -232,7 +232,7 @@ class Test_Csv(unittest.TestCase):
|
|||
writer = csv.writer(BrokenFile())
|
||||
self.assertRaises(OSError, writer.writerows, [['a']])
|
||||
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj)
|
||||
self.assertRaises(TypeError, writer.writerows, None)
|
||||
writer.writerows([['a', 'b'], ['c', 'd']])
|
||||
|
@ -240,26 +240,26 @@ class Test_Csv(unittest.TestCase):
|
|||
self.assertEqual(fileobj.read(), "a,b\r\nc,d\r\n")
|
||||
|
||||
def test_writerows_with_none(self):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj)
|
||||
writer.writerows([['a', None], [None, 'd']])
|
||||
fileobj.seek(0)
|
||||
self.assertEqual(fileobj.read(), "a,\r\n,d\r\n")
|
||||
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj)
|
||||
writer.writerows([[None], ['a']])
|
||||
fileobj.seek(0)
|
||||
self.assertEqual(fileobj.read(), '""\r\na\r\n')
|
||||
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj)
|
||||
writer.writerows([['a'], [None]])
|
||||
fileobj.seek(0)
|
||||
self.assertEqual(fileobj.read(), 'a\r\n""\r\n')
|
||||
|
||||
def test_writerows_errors(self):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj)
|
||||
self.assertRaises(TypeError, writer.writerows, None)
|
||||
self.assertRaises(OSError, writer.writerows, BadIterable())
|
||||
|
@ -270,7 +270,7 @@ class Test_Csv(unittest.TestCase):
|
|||
def test_writerows_legacy_strings(self):
|
||||
import _testcapi
|
||||
c = _testcapi.unicode_legacy_string('a')
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj)
|
||||
writer.writerows([[c]])
|
||||
fileobj.seek(0)
|
||||
|
@ -367,7 +367,7 @@ class Test_Csv(unittest.TestCase):
|
|||
self.assertEqual(r.line_num, 3)
|
||||
|
||||
def test_roundtrip_quoteed_newlines(self):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj)
|
||||
rows = [['a\nb','b'],['c','x\r\nd']]
|
||||
writer.writerows(rows)
|
||||
|
@ -376,7 +376,7 @@ class Test_Csv(unittest.TestCase):
|
|||
self.assertEqual(row, rows[i])
|
||||
|
||||
def test_roundtrip_escaped_unquoted_newlines(self):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj,quoting=csv.QUOTE_NONE,escapechar="\\")
|
||||
rows = [['a\nb','b'],['c','x\r\nd']]
|
||||
writer.writerows(rows)
|
||||
|
@ -432,7 +432,7 @@ class TestDialectRegistry(unittest.TestCase):
|
|||
quoting = csv.QUOTE_NONE
|
||||
escapechar = "\\"
|
||||
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("abc def\nc1ccccc1 benzene\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.reader(fileobj, dialect=space())
|
||||
|
@ -493,7 +493,7 @@ class TestDialectRegistry(unittest.TestCase):
|
|||
|
||||
class TestCsvBase(unittest.TestCase):
|
||||
def readerAssertEqual(self, input, expected_result):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
fileobj.write(input)
|
||||
fileobj.seek(0)
|
||||
reader = csv.reader(fileobj, dialect = self.dialect)
|
||||
|
@ -501,7 +501,7 @@ class TestCsvBase(unittest.TestCase):
|
|||
self.assertEqual(fields, expected_result)
|
||||
|
||||
def writerAssertEqual(self, input, expected_result):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj, dialect = self.dialect)
|
||||
writer.writerows(input)
|
||||
fileobj.seek(0)
|
||||
|
@ -643,13 +643,13 @@ class TestDictFields(unittest.TestCase):
|
|||
### "long" means the row is longer than the number of fieldnames
|
||||
### "short" means there are fewer elements in the row than fieldnames
|
||||
def test_writeheader_return_value(self):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
|
||||
writeheader_return_value = writer.writeheader()
|
||||
self.assertEqual(writeheader_return_value, 10)
|
||||
|
||||
def test_write_simple_dict(self):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
|
||||
writer.writeheader()
|
||||
fileobj.seek(0)
|
||||
|
@ -674,7 +674,7 @@ class TestDictFields(unittest.TestCase):
|
|||
self.assertRaises(TypeError, csv.DictWriter, fileobj)
|
||||
|
||||
def test_write_fields_not_in_fieldnames(self):
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
|
||||
# Of special note is the non-string key (issue 19449)
|
||||
with self.assertRaises(ValueError) as cx:
|
||||
|
@ -704,7 +704,7 @@ class TestDictFields(unittest.TestCase):
|
|||
self.assertEqual(fileobj.getvalue(), "1,2\r\n")
|
||||
|
||||
def test_read_dict_fields(self):
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("1,2,abc\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj,
|
||||
|
@ -712,7 +712,7 @@ class TestDictFields(unittest.TestCase):
|
|||
self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
|
||||
|
||||
def test_read_dict_no_fieldnames(self):
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj)
|
||||
|
@ -722,7 +722,7 @@ class TestDictFields(unittest.TestCase):
|
|||
# Two test cases to make sure existing ways of implicitly setting
|
||||
# fieldnames continue to work. Both arise from discussion in issue3436.
|
||||
def test_read_dict_fieldnames_from_file(self):
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj,
|
||||
|
@ -732,7 +732,7 @@ class TestDictFields(unittest.TestCase):
|
|||
|
||||
def test_read_dict_fieldnames_chain(self):
|
||||
import itertools
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj)
|
||||
|
@ -742,7 +742,7 @@ class TestDictFields(unittest.TestCase):
|
|||
self.assertEqual(row, {"f1": '1', "f2": '2', "f3": 'abc'})
|
||||
|
||||
def test_read_long(self):
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("1,2,abc,4,5,6\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj,
|
||||
|
@ -751,7 +751,7 @@ class TestDictFields(unittest.TestCase):
|
|||
None: ["abc", "4", "5", "6"]})
|
||||
|
||||
def test_read_long_with_rest(self):
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("1,2,abc,4,5,6\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj,
|
||||
|
@ -760,7 +760,7 @@ class TestDictFields(unittest.TestCase):
|
|||
"_rest": ["abc", "4", "5", "6"]})
|
||||
|
||||
def test_read_long_with_rest_no_fieldnames(self):
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("f1,f2\r\n1,2,abc,4,5,6\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj, restkey="_rest")
|
||||
|
@ -769,7 +769,7 @@ class TestDictFields(unittest.TestCase):
|
|||
"_rest": ["abc", "4", "5", "6"]})
|
||||
|
||||
def test_read_short(self):
|
||||
with TemporaryFile("w+") as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8") as fileobj:
|
||||
fileobj.write("1,2,abc,4,5,6\r\n1,2,abc\r\n")
|
||||
fileobj.seek(0)
|
||||
reader = csv.DictReader(fileobj,
|
||||
|
@ -818,7 +818,7 @@ class TestArrayWrites(unittest.TestCase):
|
|||
contents = [(20-i) for i in range(20)]
|
||||
a = array.array('i', contents)
|
||||
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj, dialect="excel")
|
||||
writer.writerow(a)
|
||||
expected = ",".join([str(i) for i in a])+"\r\n"
|
||||
|
@ -829,7 +829,7 @@ class TestArrayWrites(unittest.TestCase):
|
|||
import array
|
||||
contents = [(20-i)*0.1 for i in range(20)]
|
||||
a = array.array('d', contents)
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj, dialect="excel")
|
||||
writer.writerow(a)
|
||||
expected = ",".join([str(i) for i in a])+"\r\n"
|
||||
|
@ -840,7 +840,7 @@ class TestArrayWrites(unittest.TestCase):
|
|||
import array
|
||||
contents = [(20-i)*0.1 for i in range(20)]
|
||||
a = array.array('f', contents)
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj, dialect="excel")
|
||||
writer.writerow(a)
|
||||
expected = ",".join([str(i) for i in a])+"\r\n"
|
||||
|
@ -851,7 +851,7 @@ class TestArrayWrites(unittest.TestCase):
|
|||
import array, string
|
||||
a = array.array('u', string.ascii_letters)
|
||||
|
||||
with TemporaryFile("w+", newline='') as fileobj:
|
||||
with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
|
||||
writer = csv.writer(fileobj, dialect="excel")
|
||||
writer.writerow(a)
|
||||
expected = ",".join(a)+"\r\n"
|
||||
|
|
|
@ -232,7 +232,7 @@ class DumbDBMTestCase(unittest.TestCase):
|
|||
self.assertEqual(f.keys(), [])
|
||||
|
||||
def test_eval(self):
|
||||
with open(_fname + '.dir', 'w') as stream:
|
||||
with open(_fname + '.dir', 'w', encoding="utf-8") as stream:
|
||||
stream.write("str(print('Hacked!')), 0\n")
|
||||
with support.captured_stdout() as stdout:
|
||||
with self.assertRaises(ValueError):
|
||||
|
|
|
@ -289,7 +289,7 @@ class IBMTestCases(unittest.TestCase):
|
|||
global skip_expected
|
||||
if skip_expected:
|
||||
raise unittest.SkipTest
|
||||
with open(file) as f:
|
||||
with open(file, encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.replace('\r\n', '').replace('\n', '')
|
||||
#print line
|
||||
|
|
|
@ -241,7 +241,7 @@ class TestSFpatches(unittest.TestCase):
|
|||
#with open('test_difflib_expect.html','w') as fp:
|
||||
# fp.write(actual)
|
||||
|
||||
with open(findfile('test_difflib_expect.html')) as fp:
|
||||
with open(findfile('test_difflib_expect.html'), encoding="utf-8") as fp:
|
||||
self.assertEqual(actual, fp.read())
|
||||
|
||||
def test_recursion_limit(self):
|
||||
|
|
|
@ -130,7 +130,7 @@ def parse_mtestfile(fname):
|
|||
id fn arg -> expected [flag]*
|
||||
|
||||
"""
|
||||
with open(fname) as fp:
|
||||
with open(fname, encoding="utf-8") as fp:
|
||||
for line in fp:
|
||||
# strip comments, and skip blank lines
|
||||
if '--' in line:
|
||||
|
@ -153,7 +153,7 @@ def parse_testfile(fname):
|
|||
Empty lines or lines starting with -- are ignored
|
||||
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
|
||||
"""
|
||||
with open(fname) as fp:
|
||||
with open(fname, encoding="utf-8") as fp:
|
||||
for line in fp:
|
||||
# skip comment lines and blank lines
|
||||
if line.startswith('--') or not line.strip():
|
||||
|
|
|
@ -115,7 +115,7 @@ class ProfileTest(unittest.TestCase):
|
|||
def test_output_file_when_changing_directory(self):
|
||||
with temp_dir() as tmpdir, change_cwd(tmpdir):
|
||||
os.mkdir('dest')
|
||||
with open('demo.py', 'w') as f:
|
||||
with open('demo.py', 'w', encoding="utf-8") as f:
|
||||
f.write('import os; os.chdir("dest")')
|
||||
|
||||
assert_python_ok(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue