mirror of
https://github.com/python/cpython.git
synced 2025-10-06 15:11:58 +00:00
gh-102856: Python tokenizer implementation for PEP 701 (#104323)
This commit replaces the Python implementation of the tokenize module with an implementation that reuses the real C tokenizer via a private extension module. The tokenize module now implements a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward compatibility. As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation. Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
This commit is contained in:
parent
3ed57e4995
commit
6715f91edc
22 changed files with 424 additions and 374 deletions
22
Python/clinic/Python-tokenize.c.h
generated
22
Python/clinic/Python-tokenize.c.h
generated
|
@ -9,7 +9,8 @@ preserve
|
|||
|
||||
|
||||
static PyObject *
|
||||
tokenizeriter_new_impl(PyTypeObject *type, const char *source);
|
||||
tokenizeriter_new_impl(PyTypeObject *type, const char *source,
|
||||
int extra_tokens);
|
||||
|
||||
static PyObject *
|
||||
tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
||||
|
@ -17,14 +18,14 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
|||
PyObject *return_value = NULL;
|
||||
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
|
||||
|
||||
#define NUM_KEYWORDS 1
|
||||
#define NUM_KEYWORDS 2
|
||||
static struct {
|
||||
PyGC_Head _this_is_not_used;
|
||||
PyObject_VAR_HEAD
|
||||
PyObject *ob_item[NUM_KEYWORDS];
|
||||
} _kwtuple = {
|
||||
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
|
||||
.ob_item = { &_Py_ID(source), },
|
||||
.ob_item = { &_Py_ID(source), &_Py_ID(extra_tokens), },
|
||||
};
|
||||
#undef NUM_KEYWORDS
|
||||
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
|
||||
|
@ -33,19 +34,20 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
|||
# define KWTUPLE NULL
|
||||
#endif // !Py_BUILD_CORE
|
||||
|
||||
static const char * const _keywords[] = {"source", NULL};
|
||||
static const char * const _keywords[] = {"source", "extra_tokens", NULL};
|
||||
static _PyArg_Parser _parser = {
|
||||
.keywords = _keywords,
|
||||
.fname = "tokenizeriter",
|
||||
.kwtuple = KWTUPLE,
|
||||
};
|
||||
#undef KWTUPLE
|
||||
PyObject *argsbuf[1];
|
||||
PyObject *argsbuf[2];
|
||||
PyObject * const *fastargs;
|
||||
Py_ssize_t nargs = PyTuple_GET_SIZE(args);
|
||||
const char *source;
|
||||
int extra_tokens;
|
||||
|
||||
fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 1, 1, 0, argsbuf);
|
||||
fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 1, 1, 1, argsbuf);
|
||||
if (!fastargs) {
|
||||
goto exit;
|
||||
}
|
||||
|
@ -62,9 +64,13 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
|||
PyErr_SetString(PyExc_ValueError, "embedded null character");
|
||||
goto exit;
|
||||
}
|
||||
return_value = tokenizeriter_new_impl(type, source);
|
||||
extra_tokens = PyObject_IsTrue(fastargs[1]);
|
||||
if (extra_tokens < 0) {
|
||||
goto exit;
|
||||
}
|
||||
return_value = tokenizeriter_new_impl(type, source, extra_tokens);
|
||||
|
||||
exit:
|
||||
return return_value;
|
||||
}
|
||||
/*[clinic end generated code: output=8c2c09f651961986 input=a9049054013a1b77]*/
|
||||
/*[clinic end generated code: output=940b564c67f6e0e2 input=a9049054013a1b77]*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue