mirror of
https://github.com/python/cpython.git
synced 2025-08-23 02:04:56 +00:00
[3.12] gh-121130: Fix f-string format specifiers with debug expressions (GH-121150) (#122063)
This commit is contained in:
parent
ca531e4326
commit
a9daa4fd04
7 changed files with 8569 additions and 5795 deletions
|
@ -302,9 +302,7 @@ Literals
|
|||
Name(id='a', ctx=Load())],
|
||||
keywords=[]),
|
||||
conversion=-1,
|
||||
format_spec=JoinedStr(
|
||||
values=[
|
||||
Constant(value='.3')]))]))
|
||||
format_spec=Constant(value='.3'))]))
|
||||
|
||||
|
||||
.. class:: List(elts, ctx)
|
||||
|
|
4459
Lib/test/test_ast.py
4459
Lib/test/test_ast.py
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,2 @@
|
|||
Fix f-strings with debug expressions in format specifiers. Patch by Pablo
|
||||
Galindo
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -14,17 +14,15 @@ extern "C" {
|
|||
#define MAXLEVEL 200 /* Max parentheses level */
|
||||
#define MAXFSTRINGLEVEL 150 /* Max f-string nesting level */
|
||||
|
||||
enum decoding_state {
|
||||
STATE_INIT,
|
||||
STATE_SEEK_CODING,
|
||||
STATE_NORMAL
|
||||
};
|
||||
enum decoding_state { STATE_INIT, STATE_SEEK_CODING, STATE_NORMAL };
|
||||
|
||||
enum interactive_underflow_t {
|
||||
/* Normal mode of operation: return a new token when asked in interactive mode */
|
||||
/* Normal mode of operation: return a new token when asked in interactive mode
|
||||
*/
|
||||
IUNDERFLOW_NORMAL,
|
||||
/* Forcefully return ENDMARKER when asked for a new token in interactive mode. This
|
||||
* can be used to prevent the tokenizer to prompt the user for new tokens */
|
||||
/* Forcefully return ENDMARKER when asked for a new token in interactive mode.
|
||||
* This can be used to prevent the tokenizer to prompt the user for new tokens
|
||||
*/
|
||||
IUNDERFLOW_STOP,
|
||||
};
|
||||
|
||||
|
@ -51,8 +49,8 @@ typedef struct _tokenizer_mode {
|
|||
char f_string_quote;
|
||||
int f_string_quote_size;
|
||||
int f_string_raw;
|
||||
const char* f_string_start;
|
||||
const char* f_string_multi_line_start;
|
||||
const char *f_string_start;
|
||||
const char *f_string_multi_line_start;
|
||||
int f_string_line_start;
|
||||
|
||||
Py_ssize_t f_string_start_offset;
|
||||
|
@ -60,20 +58,24 @@ typedef struct _tokenizer_mode {
|
|||
|
||||
Py_ssize_t last_expr_size;
|
||||
Py_ssize_t last_expr_end;
|
||||
char* last_expr_buffer;
|
||||
char *last_expr_buffer;
|
||||
int f_string_debug;
|
||||
int in_format_spec;
|
||||
} tokenizer_mode;
|
||||
|
||||
/* Tokenizer state */
|
||||
struct tok_state {
|
||||
/* Input state; buf <= cur <= inp <= end */
|
||||
/* NB an entire line is held in the buffer */
|
||||
char *buf; /* Input buffer, or NULL; malloc'ed if fp != NULL or readline != NULL */
|
||||
char *buf; /* Input buffer, or NULL; malloc'ed if fp != NULL or readline !=
|
||||
NULL */
|
||||
char *cur; /* Next character in buffer */
|
||||
char *inp; /* End of data in buffer */
|
||||
int fp_interactive; /* If the file descriptor is interactive */
|
||||
char *interactive_src_start; /* The start of the source parsed so far in interactive mode */
|
||||
char *interactive_src_end; /* The end of the source parsed so far in interactive mode */
|
||||
char *interactive_src_start; /* The start of the source parsed so far in
|
||||
interactive mode */
|
||||
char *interactive_src_end; /* The end of the source parsed so far in
|
||||
interactive mode */
|
||||
const char *end; /* End of input buffer if buf != NULL */
|
||||
const char *start; /* Start of current token if not NULL */
|
||||
int done; /* E_OK normally, E_EOF at EOF, otherwise error code */
|
||||
|
@ -103,16 +105,16 @@ struct tok_state {
|
|||
int decoding_erred; /* whether erred in decoding */
|
||||
char *encoding; /* Source encoding. */
|
||||
int cont_line; /* whether we are in a continuation line. */
|
||||
const char* line_start; /* pointer to start of current line */
|
||||
const char* multi_line_start; /* pointer to start of first line of
|
||||
const char *line_start; /* pointer to start of current line */
|
||||
const char *multi_line_start; /* pointer to start of first line of
|
||||
a single line or multi line string
|
||||
expression (cf. issue 16806) */
|
||||
PyObject *decoding_readline; /* open(...).readline */
|
||||
PyObject *decoding_buffer;
|
||||
PyObject *readline; /* readline() function */
|
||||
const char* enc; /* Encoding for the current str. */
|
||||
char* str; /* Source string being tokenized (if tokenizing from a string)*/
|
||||
char* input; /* Tokenizer's newline translated copy of the string. */
|
||||
const char *enc; /* Encoding for the current str. */
|
||||
char *str; /* Source string being tokenized (if tokenizing from a string)*/
|
||||
char *input; /* Tokenizer's newline translated copy of the string. */
|
||||
|
||||
int type_comments; /* Whether to look for type comments */
|
||||
|
||||
|
@ -138,8 +140,9 @@ struct tok_state {
|
|||
|
||||
extern struct tok_state *_PyTokenizer_FromString(const char *, int, int);
|
||||
extern struct tok_state *_PyTokenizer_FromUTF8(const char *, int, int);
|
||||
extern struct tok_state *_PyTokenizer_FromReadline(PyObject*, const char*, int, int);
|
||||
extern struct tok_state *_PyTokenizer_FromFile(FILE *, const char*,
|
||||
extern struct tok_state *_PyTokenizer_FromReadline(PyObject *, const char *,
|
||||
int, int);
|
||||
extern struct tok_state *_PyTokenizer_FromFile(FILE *, const char *,
|
||||
const char *, const char *);
|
||||
extern void _PyTokenizer_Free(struct tok_state *);
|
||||
extern void _PyToken_Free(struct token *);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue