mirror of
https://github.com/python/cpython.git
synced 2025-09-26 18:29:57 +00:00
Fix builtin test and simplify the classified text tuple.
This commit is contained in:
parent
a6473f9cfd
commit
fb20a1a924
1 changed files with 27 additions and 33 deletions
|
@ -4,12 +4,16 @@
|
||||||
__author__ = 'Raymond Hettinger'
|
__author__ = 'Raymond Hettinger'
|
||||||
|
|
||||||
import keyword, tokenize, cgi, re, functools
|
import keyword, tokenize, cgi, re, functools
|
||||||
|
try:
|
||||||
|
import builtins
|
||||||
|
except ImportError:
|
||||||
|
import __builtin__ as builtins
|
||||||
|
|
||||||
#### Analyze Python Source #################################
|
#### Analyze Python Source #################################
|
||||||
|
|
||||||
def is_builtin(s):
|
def is_builtin(s):
|
||||||
'Return True if s is the name of a builtin'
|
'Return True if s is the name of a builtin'
|
||||||
return hasattr(__builtins__, s)
|
return hasattr(builtins, s)
|
||||||
|
|
||||||
def combine_range(lines, start, end):
|
def combine_range(lines, start, end):
|
||||||
'Join content from a range of lines between start and end'
|
'Join content from a range of lines between start and end'
|
||||||
|
@ -21,9 +25,7 @@ def combine_range(lines, start, end):
|
||||||
|
|
||||||
def analyze_python(source):
|
def analyze_python(source):
|
||||||
'''Generate and classify chunks of Python for syntax highlighting.
|
'''Generate and classify chunks of Python for syntax highlighting.
|
||||||
Yields tuples in the form: (leadin_text, category, categorized_text).
|
Yields tuples in the form: (category, categorized_text).
|
||||||
The final tuple has empty strings for the category and categorized text.
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
lines = source.splitlines(True)
|
lines = source.splitlines(True)
|
||||||
lines.append('')
|
lines.append('')
|
||||||
|
@ -37,7 +39,7 @@ def analyze_python(source):
|
||||||
kind = ''
|
kind = ''
|
||||||
if tok_type == tokenize.COMMENT:
|
if tok_type == tokenize.COMMENT:
|
||||||
kind = 'comment'
|
kind = 'comment'
|
||||||
elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;':
|
elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;@':
|
||||||
kind = 'operator'
|
kind = 'operator'
|
||||||
elif tok_type == tokenize.STRING:
|
elif tok_type == tokenize.STRING:
|
||||||
kind = 'string'
|
kind = 'string'
|
||||||
|
@ -53,22 +55,20 @@ def analyze_python(source):
|
||||||
elif is_builtin(tok_str) and prev_tok_str != '.':
|
elif is_builtin(tok_str) and prev_tok_str != '.':
|
||||||
kind = 'builtin'
|
kind = 'builtin'
|
||||||
if kind:
|
if kind:
|
||||||
line_upto_token, written = combine_range(lines, written, (srow, scol))
|
text, written = combine_range(lines, written, (srow, scol))
|
||||||
line_thru_token, written = combine_range(lines, written, (erow, ecol))
|
yield '', text
|
||||||
yield line_upto_token, kind, line_thru_token
|
text, written = combine_range(lines, written, (erow, ecol))
|
||||||
|
yield kind, text
|
||||||
line_upto_token, written = combine_range(lines, written, (erow, ecol))
|
line_upto_token, written = combine_range(lines, written, (erow, ecol))
|
||||||
yield line_upto_token, '', ''
|
yield '', line_upto_token
|
||||||
|
|
||||||
#### Raw Output ###########################################
|
#### Raw Output ###########################################
|
||||||
|
|
||||||
def raw_highlight(classified_text):
|
def raw_highlight(classified_text):
|
||||||
'Straight text display of text classifications'
|
'Straight text display of text classifications'
|
||||||
result = []
|
result = []
|
||||||
for line_upto_token, kind, line_thru_token in classified_text:
|
for kind, text in classified_text:
|
||||||
if line_upto_token:
|
result.append('%15s: %r\n' % (kind or 'plain', text))
|
||||||
result.append(' plain: %r\n' % line_upto_token)
|
|
||||||
if line_thru_token:
|
|
||||||
result.append('%15s: %r\n' % (kind, line_thru_token))
|
|
||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
#### ANSI Output ###########################################
|
#### ANSI Output ###########################################
|
||||||
|
@ -88,9 +88,9 @@ def ansi_highlight(classified_text, colors=default_ansi):
|
||||||
'Add syntax highlighting to source code using ANSI escape sequences'
|
'Add syntax highlighting to source code using ANSI escape sequences'
|
||||||
# http://en.wikipedia.org/wiki/ANSI_escape_code
|
# http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||||
result = []
|
result = []
|
||||||
for line_upto_token, kind, line_thru_token in classified_text:
|
for kind, text in classified_text:
|
||||||
opener, closer = colors.get(kind, ('', ''))
|
opener, closer = colors.get(kind, ('', ''))
|
||||||
result += [line_upto_token, opener, line_thru_token, closer]
|
result += [opener, text, closer]
|
||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
#### HTML Output ###########################################
|
#### HTML Output ###########################################
|
||||||
|
@ -98,16 +98,13 @@ def ansi_highlight(classified_text, colors=default_ansi):
|
||||||
def html_highlight(classified_text,opener='<pre class="python">\n', closer='</pre>\n'):
|
def html_highlight(classified_text,opener='<pre class="python">\n', closer='</pre>\n'):
|
||||||
'Convert classified text to an HTML fragment'
|
'Convert classified text to an HTML fragment'
|
||||||
result = [opener]
|
result = [opener]
|
||||||
for line_upto_token, kind, line_thru_token in classified_text:
|
for kind, text in classified_text:
|
||||||
if kind:
|
if kind:
|
||||||
result += [cgi.escape(line_upto_token),
|
result.append('<span class="%s">' % kind)
|
||||||
'<span class="%s">' % kind,
|
result.append(cgi.escape(text))
|
||||||
cgi.escape(line_thru_token),
|
if kind:
|
||||||
'</span>']
|
result.append('</span>')
|
||||||
else:
|
result.append(closer)
|
||||||
result += [cgi.escape(line_upto_token),
|
|
||||||
cgi.escape(line_thru_token)]
|
|
||||||
result += [closer]
|
|
||||||
return ''.join(result)
|
return ''.join(result)
|
||||||
|
|
||||||
default_css = {
|
default_css = {
|
||||||
|
@ -188,15 +185,12 @@ def latex_highlight(classified_text, title = 'python',
|
||||||
document = default_latex_document):
|
document = default_latex_document):
|
||||||
'Create a complete LaTeX document with colorized source code'
|
'Create a complete LaTeX document with colorized source code'
|
||||||
result = []
|
result = []
|
||||||
for line_upto_token, kind, line_thru_token in classified_text:
|
for kind, text in classified_text:
|
||||||
if kind:
|
if kind:
|
||||||
result += [latex_escape(line_upto_token),
|
result.append(r'{\color{%s}' % colors[kind])
|
||||||
r'{\color{%s}' % colors[kind],
|
result.append(latex_escape(text))
|
||||||
latex_escape(line_thru_token),
|
if kind:
|
||||||
'}']
|
result.append('}')
|
||||||
else:
|
|
||||||
result += [latex_escape(line_upto_token),
|
|
||||||
latex_escape(line_thru_token)]
|
|
||||||
return default_latex_document % dict(title=title, body=''.join(result))
|
return default_latex_document % dict(title=title, body=''.join(result))
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue