#3773: Check for errors around the use of PyTokenizer_FindEncoding().

reviewed by Brett Cannon.
This commit is contained in:
Amaury Forgeot d'Arc 2008-09-04 22:34:09 +00:00
parent 1d6a16bf38
commit 1b933ed50a
3 changed files with 9 additions and 1 deletions

View file

@ -12,6 +12,9 @@ What's New in Python 3.0 release candidate 1
Core and Builtins
-----------------
- Issue 3774: Added a few more checks in PyTokenizer_FindEncoding to handle
error conditions.
- Issue 3594: Fix Parser/tokenizer.c:fp_setreadl() to open the file being
tokenized by either a file path or file pointer for the benefit of
PyTokenizer_FindEncoding().

View file

@ -1610,7 +1610,10 @@ PyTokenizer_FindEncoding(int fd)
fclose(fp);
if (tok->encoding) {
encoding = (char *)PyMem_MALLOC(strlen(tok->encoding) + 1);
strcpy(encoding, tok->encoding);
if (encoding)
strcpy(encoding, tok->encoding);
else
PyErr_NoMemory();
}
PyTokenizer_Free(tok);
return encoding;

View file

@ -2830,6 +2830,8 @@ call_find_module(char *name, PyObject *path)
memory. */
found_encoding = PyTokenizer_FindEncoding(fd);
lseek(fd, 0, 0); /* Reset position */
if (found_encoding == NULL && PyErr_Occurred())
return NULL;
encoding = (found_encoding != NULL) ? found_encoding :
(char*)PyUnicode_GetDefaultEncoding();
}