mirror of
https://github.com/python/cpython.git
synced 2025-09-26 10:19:53 +00:00
Basic support for PEP 414 without docs or tests.
This commit is contained in:
parent
745ccf8b1a
commit
6ecf77b3f8
3 changed files with 32 additions and 11 deletions
|
@ -135,10 +135,10 @@ Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
|
||||||
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
|
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
|
||||||
# Tail end of """ string.
|
# Tail end of """ string.
|
||||||
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
|
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
|
||||||
Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
|
Triple = group("[bBuU]?[rR]?'''", '[bBuU]?[rR]?"""')
|
||||||
# Single-line ' or " string.
|
# Single-line ' or " string.
|
||||||
String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
|
String = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
|
||||||
r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
|
r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
|
||||||
|
|
||||||
# Because of leftmost-then-longest match semantics, be sure to put the
|
# Because of leftmost-then-longest match semantics, be sure to put the
|
||||||
# longest operators first (e.g., if = came before ==, == would get
|
# longest operators first (e.g., if = came before ==, == would get
|
||||||
|
@ -156,9 +156,9 @@ PlainToken = group(Number, Funny, String, Name)
|
||||||
Token = Ignore + PlainToken
|
Token = Ignore + PlainToken
|
||||||
|
|
||||||
# First (or only) line of ' or " string.
|
# First (or only) line of ' or " string.
|
||||||
ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
|
ContStr = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
|
||||||
group("'", r'\\\r?\n'),
|
group("'", r'\\\r?\n'),
|
||||||
r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
|
r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
|
||||||
group('"', r'\\\r?\n'))
|
group('"', r'\\\r?\n'))
|
||||||
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
||||||
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
||||||
|
@ -176,21 +176,35 @@ endpats = {"'": Single, '"': Double,
|
||||||
"bR'''": Single3, 'bR"""': Double3,
|
"bR'''": Single3, 'bR"""': Double3,
|
||||||
"Br'''": Single3, 'Br"""': Double3,
|
"Br'''": Single3, 'Br"""': Double3,
|
||||||
"BR'''": Single3, 'BR"""': Double3,
|
"BR'''": Single3, 'BR"""': Double3,
|
||||||
'r': None, 'R': None, 'b': None, 'B': None}
|
"u'''": Single3, 'u"""': Double3,
|
||||||
|
"ur'''": Single3, 'ur"""': Double3,
|
||||||
|
"R'''": Single3, 'R"""': Double3,
|
||||||
|
"U'''": Single3, 'U"""': Double3,
|
||||||
|
"uR'''": Single3, 'uR"""': Double3,
|
||||||
|
"Ur'''": Single3, 'Ur"""': Double3,
|
||||||
|
"UR'''": Single3, 'UR"""': Double3,
|
||||||
|
'r': None, 'R': None, 'b': None, 'B': None,
|
||||||
|
'u': None, 'U': None}
|
||||||
|
|
||||||
triple_quoted = {}
|
triple_quoted = {}
|
||||||
for t in ("'''", '"""',
|
for t in ("'''", '"""',
|
||||||
"r'''", 'r"""', "R'''", 'R"""',
|
"r'''", 'r"""', "R'''", 'R"""',
|
||||||
"b'''", 'b"""', "B'''", 'B"""',
|
"b'''", 'b"""', "B'''", 'B"""',
|
||||||
"br'''", 'br"""', "Br'''", 'Br"""',
|
"br'''", 'br"""', "Br'''", 'Br"""',
|
||||||
"bR'''", 'bR"""', "BR'''", 'BR"""'):
|
"bR'''", 'bR"""', "BR'''", 'BR"""',
|
||||||
|
"u'''", 'u"""', "U'''", 'U"""',
|
||||||
|
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
|
||||||
|
"uR'''", 'uR"""', "UR'''", 'UR"""'):
|
||||||
triple_quoted[t] = t
|
triple_quoted[t] = t
|
||||||
single_quoted = {}
|
single_quoted = {}
|
||||||
for t in ("'", '"',
|
for t in ("'", '"',
|
||||||
"r'", 'r"', "R'", 'R"',
|
"r'", 'r"', "R'", 'R"',
|
||||||
"b'", 'b"', "B'", 'B"',
|
"b'", 'b"', "B'", 'B"',
|
||||||
"br'", 'br"', "Br'", 'Br"',
|
"br'", 'br"', "Br'", 'Br"',
|
||||||
"bR'", 'bR"', "BR'", 'BR"' ):
|
"bR'", 'bR"', "BR'", 'BR"' ,
|
||||||
|
"u'", 'u"', "U'", 'U"',
|
||||||
|
"ur'", 'ur"', "Ur'", 'Ur"',
|
||||||
|
"uR'", 'uR"', "UR'", 'UR"' ):
|
||||||
single_quoted[t] = t
|
single_quoted[t] = t
|
||||||
|
|
||||||
tabsize = 8
|
tabsize = 8
|
||||||
|
|
|
@ -1412,11 +1412,15 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
|
||||||
/* Identifier (most frequent token!) */
|
/* Identifier (most frequent token!) */
|
||||||
nonascii = 0;
|
nonascii = 0;
|
||||||
if (is_potential_identifier_start(c)) {
|
if (is_potential_identifier_start(c)) {
|
||||||
/* Process b"", r"", br"" and rb"" */
|
/* Process b"", r"", u"", br"", rb"" and ur"" */
|
||||||
int saw_b = 0, saw_r = 0;
|
int saw_b = 0, saw_r = 0, saw_u = 0;
|
||||||
while (1) {
|
while (1) {
|
||||||
if (!saw_b && (c == 'b' || c == 'B'))
|
if (!(saw_b || saw_u) && (c == 'b' || c == 'B'))
|
||||||
saw_b = 1;
|
saw_b = 1;
|
||||||
|
/* Since this is a backwards compatibility support literal we don't
|
||||||
|
want to support it in arbitrary order like byte literals. */
|
||||||
|
else if (!(saw_b || saw_u || saw_r) && (c == 'u' || c == 'U'))
|
||||||
|
saw_u = 1;
|
||||||
else if (!saw_r && (c == 'r' || c == 'R'))
|
else if (!saw_r && (c == 'r' || c == 'R'))
|
||||||
saw_r = 1;
|
saw_r = 1;
|
||||||
else
|
else
|
||||||
|
|
|
@ -3796,6 +3796,9 @@ parsestr(struct compiling *c, const node *n, int *bytesmode)
|
||||||
quote = *++s;
|
quote = *++s;
|
||||||
*bytesmode = 1;
|
*bytesmode = 1;
|
||||||
}
|
}
|
||||||
|
else if (quote == 'u' || quote == 'U') {
|
||||||
|
quote = *++s;
|
||||||
|
}
|
||||||
else if (quote == 'r' || quote == 'R') {
|
else if (quote == 'r' || quote == 'R') {
|
||||||
quote = *++s;
|
quote = *++s;
|
||||||
rawmode = 1;
|
rawmode = 1;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue