PyTokenizer_FindEncoding() always failed because it set the tokenizer state

with only a file pointer when it called fp_setreadl() which expected a file
path. Changed fp_setreadl() to use either a file path or file descriptor
(derived from the file pointer) to fix the issue.

Closes issue 3594.
Reviewed by Antoine Pitrou and Benjamin Peterson.
This commit is contained in:
Brett Cannon 2008-09-04 05:04:25 +00:00
parent 451e99b393
commit 8a9583ec5c
3 changed files with 26 additions and 2 deletions

View file

@ -1,4 +1,5 @@
import imp
import sys
import unittest
from test import support
@ -59,6 +60,21 @@ class ImportTests(unittest.TestCase):
'"""Tokenization help for Python programs.\n')
fp.close()
def test_issue3594(self):
temp_mod_name = 'test_imp_helper'
sys.path.insert(0, '.')
try:
with open(temp_mod_name + '.py', 'w') as file:
file.write("# coding: cp1252\nu = 'test.test_imp'\n")
file, filename, info = imp.find_module(temp_mod_name)
file.close()
self.assertEquals(file.encoding, 'cp1252')
finally:
del sys.path[0]
support.unlink(temp_mod_name + '.py')
support.unlink(temp_mod_name + '.pyc')
support.unlink(temp_mod_name + '.pyo')
def test_reload(self):
import marshal
imp.reload(marshal)