gh-119118: Fix performance regression in tokenize module (#119615)

* gh-119118: Fix performance regression in tokenize module

- Cache line object to avoid creating a Unicode object
  for all of the tokens in the same line.
- Speed up byte offset to column offset conversion by using the
  smallest buffer possible to measure the difference.

Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
This commit is contained in:
Lysandros Nikolaou 2024-05-28 21:17:49 +02:00 committed by GitHub
parent ae9140f32a
commit d87b015106
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 68 additions and 4 deletions

View file

@ -18,6 +18,31 @@ _PyPegen_interactive_exit(Parser *p)
return NULL;
}
Py_ssize_t
_PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset)
{
const char *data = PyUnicode_AsUTF8(line);
Py_ssize_t len = 0;
while (col_offset < end_col_offset) {
Py_UCS4 ch = data[col_offset];
if (ch < 0x80) {
col_offset += 1;
} else if ((ch & 0xe0) == 0xc0) {
col_offset += 2;
} else if ((ch & 0xf0) == 0xe0) {
col_offset += 3;
} else if ((ch & 0xf8) == 0xf0) {
col_offset += 4;
} else {
PyErr_SetString(PyExc_ValueError, "Invalid UTF-8 sequence");
return -1;
}
len++;
}
return len;
}
Py_ssize_t
_PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t col_offset)
{