mirror of
https://github.com/python/cpython.git
synced 2025-07-19 09:15:34 +00:00
gh-105069: Add a readline-like callable to the tokenizer to consume input iteratively (#105070)
This commit is contained in:
parent
2ea34cfb3a
commit
9216e69a87
7 changed files with 276 additions and 98 deletions
|
@ -37,15 +37,17 @@ typedef struct
|
|||
@classmethod
|
||||
_tokenizer.tokenizeriter.__new__ as tokenizeriter_new
|
||||
|
||||
source: str
|
||||
readline: object
|
||||
/
|
||||
*
|
||||
extra_tokens: bool
|
||||
encoding: str(c_default="NULL") = 'utf-8'
|
||||
[clinic start generated code]*/
|
||||
|
||||
static PyObject *
|
||||
tokenizeriter_new_impl(PyTypeObject *type, const char *source,
|
||||
int extra_tokens)
|
||||
/*[clinic end generated code: output=f6f9d8b4beec8106 input=90dc5b6a5df180c2]*/
|
||||
tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline,
|
||||
int extra_tokens, const char *encoding)
|
||||
/*[clinic end generated code: output=7501a1211683ce16 input=f7dddf8a613ae8bd]*/
|
||||
{
|
||||
tokenizeriterobject *self = (tokenizeriterobject *)type->tp_alloc(type, 0);
|
||||
if (self == NULL) {
|
||||
|
@ -55,7 +57,7 @@ tokenizeriter_new_impl(PyTypeObject *type, const char *source,
|
|||
if (filename == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
self->tok = _PyTokenizer_FromUTF8(source, 1, 1);
|
||||
self->tok = _PyTokenizer_FromReadline(readline, encoding, 1, 1);
|
||||
if (self->tok == NULL) {
|
||||
Py_DECREF(filename);
|
||||
return NULL;
|
||||
|
|
49
Python/clinic/Python-tokenize.c.h
generated
49
Python/clinic/Python-tokenize.c.h
generated
|
@ -9,8 +9,8 @@ preserve
|
|||
|
||||
|
||||
static PyObject *
|
||||
tokenizeriter_new_impl(PyTypeObject *type, const char *source,
|
||||
int extra_tokens);
|
||||
tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline,
|
||||
int extra_tokens, const char *encoding);
|
||||
|
||||
static PyObject *
|
||||
tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
||||
|
@ -25,7 +25,7 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
|||
PyObject *ob_item[NUM_KEYWORDS];
|
||||
} _kwtuple = {
|
||||
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
|
||||
.ob_item = { &_Py_ID(source), &_Py_ID(extra_tokens), },
|
||||
.ob_item = { &_Py_ID(extra_tokens), &_Py_ID(encoding), },
|
||||
};
|
||||
#undef NUM_KEYWORDS
|
||||
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
|
||||
|
@ -34,43 +34,50 @@ tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
|
|||
# define KWTUPLE NULL
|
||||
#endif // !Py_BUILD_CORE
|
||||
|
||||
static const char * const _keywords[] = {"source", "extra_tokens", NULL};
|
||||
static const char * const _keywords[] = {"", "extra_tokens", "encoding", NULL};
|
||||
static _PyArg_Parser _parser = {
|
||||
.keywords = _keywords,
|
||||
.fname = "tokenizeriter",
|
||||
.kwtuple = KWTUPLE,
|
||||
};
|
||||
#undef KWTUPLE
|
||||
PyObject *argsbuf[2];
|
||||
PyObject *argsbuf[3];
|
||||
PyObject * const *fastargs;
|
||||
Py_ssize_t nargs = PyTuple_GET_SIZE(args);
|
||||
const char *source;
|
||||
Py_ssize_t noptargs = nargs + (kwargs ? PyDict_GET_SIZE(kwargs) : 0) - 2;
|
||||
PyObject *readline;
|
||||
int extra_tokens;
|
||||
const char *encoding = NULL;
|
||||
|
||||
fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, 1, 1, 1, argsbuf);
|
||||
if (!fastargs) {
|
||||
goto exit;
|
||||
}
|
||||
if (!PyUnicode_Check(fastargs[0])) {
|
||||
_PyArg_BadArgument("tokenizeriter", "argument 'source'", "str", fastargs[0]);
|
||||
goto exit;
|
||||
}
|
||||
Py_ssize_t source_length;
|
||||
source = PyUnicode_AsUTF8AndSize(fastargs[0], &source_length);
|
||||
if (source == NULL) {
|
||||
goto exit;
|
||||
}
|
||||
if (strlen(source) != (size_t)source_length) {
|
||||
PyErr_SetString(PyExc_ValueError, "embedded null character");
|
||||
goto exit;
|
||||
}
|
||||
readline = fastargs[0];
|
||||
extra_tokens = PyObject_IsTrue(fastargs[1]);
|
||||
if (extra_tokens < 0) {
|
||||
goto exit;
|
||||
}
|
||||
return_value = tokenizeriter_new_impl(type, source, extra_tokens);
|
||||
if (!noptargs) {
|
||||
goto skip_optional_kwonly;
|
||||
}
|
||||
if (!PyUnicode_Check(fastargs[2])) {
|
||||
_PyArg_BadArgument("tokenizeriter", "argument 'encoding'", "str", fastargs[2]);
|
||||
goto exit;
|
||||
}
|
||||
Py_ssize_t encoding_length;
|
||||
encoding = PyUnicode_AsUTF8AndSize(fastargs[2], &encoding_length);
|
||||
if (encoding == NULL) {
|
||||
goto exit;
|
||||
}
|
||||
if (strlen(encoding) != (size_t)encoding_length) {
|
||||
PyErr_SetString(PyExc_ValueError, "embedded null character");
|
||||
goto exit;
|
||||
}
|
||||
skip_optional_kwonly:
|
||||
return_value = tokenizeriter_new_impl(type, readline, extra_tokens, encoding);
|
||||
|
||||
exit:
|
||||
return return_value;
|
||||
}
|
||||
/*[clinic end generated code: output=940b564c67f6e0e2 input=a9049054013a1b77]*/
|
||||
/*[clinic end generated code: output=48be65a2808bdfa6 input=a9049054013a1b77]*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue