mirror of
https://github.com/python/cpython.git
synced 2025-07-24 19:54:21 +00:00
Update README, remove obsolete scripts.
This commit is contained in:
parent
f7db42fe8c
commit
d3f467ac74
9 changed files with 62 additions and 993 deletions
|
@ -1,68 +1,65 @@
|
|||
This directory contains a collection of executable Python scripts that
|
||||
are useful while building, extending or managing Python. Some (e.g.,
|
||||
dutree or lll) are also generally useful UNIX tools.
|
||||
This directory contains a collection of executable Python scripts that are
|
||||
useful while building, extending or managing Python. Some (e.g., dutree or lll)
|
||||
are also generally useful UNIX tools.
|
||||
|
||||
See also the Demo/scripts directory!
|
||||
|
||||
analyze_dxp.py Analyzes the result of sys.getdxp()
|
||||
byext.py Print lines/words/chars stats of files by extension
|
||||
byteyears.py Print product of a file's size and age
|
||||
checkappend.py Search for multi-argument .append() calls
|
||||
checkpyc.py Check presence and validity of ".pyc" files
|
||||
classfix.py Convert old class syntax to new
|
||||
cleanfuture.py Fix reduntant Python __future__ statements
|
||||
combinerefs.py A helper for analyzing PYTHONDUMPREFS output.
|
||||
copytime.py Copy one file's atime and mtime to another
|
||||
crlf.py Change CRLF line endings to LF (Windows to Unix)
|
||||
cvsfiles.py Print a list of files that are under CVS
|
||||
db2pickle.py Dump a database file to a pickle
|
||||
diff.py Print file diffs in context, unified, or ndiff formats
|
||||
dutree.py Format du(1) output as a tree sorted by size
|
||||
eptags.py Create Emacs TAGS file for Python modules
|
||||
2to3 Main script for running the 2to3 conversion tool
|
||||
analyze_dxp.py Analyzes the result of sys.getdxp()
|
||||
byext.py Print lines/words/chars stats of files by extension
|
||||
byteyears.py Print product of a file's size and age
|
||||
checkpyc.py Check presence and validity of ".pyc" files
|
||||
cleanfuture.py Fix redundant Python __future__ statements
|
||||
combinerefs.py A helper for analyzing PYTHONDUMPREFS output
|
||||
copytime.py Copy one file's atime and mtime to another
|
||||
crlf.py Change CRLF line endings to LF (Windows to Unix)
|
||||
db2pickle.py Dump a database file to a pickle
|
||||
diff.py Print file diffs in context, unified, or ndiff formats
|
||||
dutree.py Format du(1) output as a tree sorted by size
|
||||
eptags.py Create Emacs TAGS file for Python modules
|
||||
find_recursionlimit.py Find the maximum recursion limit on this machine
|
||||
finddiv.py A grep-like tool that looks for division operators
|
||||
findlinksto.py Recursively find symbolic links to a given path prefix
|
||||
findnocoding.py Find source files which need an encoding declaration
|
||||
fixcid.py Massive identifier substitution on C source files
|
||||
fixdiv.py Tool to fix division operators.
|
||||
fixheader.py Add some cpp magic to a C include file
|
||||
fixnotice.py Fix the copyright notice in source files
|
||||
fixps.py Fix Python scripts' first line (if #!)
|
||||
ftpmirror.py FTP mirror script
|
||||
google.py Open a webbrowser with Google
|
||||
gprof2html.py Transform gprof(1) output into useful HTML
|
||||
h2py.py Translate #define's into Python assignments
|
||||
idle Main program to start IDLE
|
||||
ifdef.py Remove #if(n)def groups from C sources
|
||||
lfcr.py Change LF line endings to CRLF (Unix to Windows)
|
||||
linktree.py Make a copy of a tree with links to original files
|
||||
lll.py Find and list symbolic links in current directory
|
||||
logmerge.py Consolidate CVS/RCS logs read from stdin
|
||||
mailerdaemon.py parse error messages from mailer daemons (Sjoerd&Jack)
|
||||
md5sum.py Print MD5 checksums of argument files.
|
||||
methfix.py Fix old method syntax def f(self, (a1, ..., aN)):
|
||||
mkreal.py Turn a symbolic link into a real file or directory
|
||||
ndiff.py Intelligent diff between text files (Tim Peters)
|
||||
nm2def.py Create a template for PC/python_nt.def (Marc Lemburg)
|
||||
objgraph.py Print object graph from nm output on a library
|
||||
parseentities.py Utility for parsing HTML entity definitions
|
||||
pathfix.py Change #!/usr/local/bin/python into something else
|
||||
pdeps.py Print dependencies between Python modules
|
||||
pickle2db.py Load a pickle generated by db2pickle.py to a database
|
||||
pindent.py Indent Python code, giving block-closing comments
|
||||
ptags.py Create vi tags file for Python modules
|
||||
pydoc Python documentation browser.
|
||||
pysource.py Find Python source files
|
||||
redemo.py Basic regular expression demonstration facility
|
||||
reindent.py Change .py files to use 4-space indents.
|
||||
rgrep.py Reverse grep through a file (useful for big logfiles)
|
||||
serve.py Small wsgiref-based web server, used in make serve in Doc
|
||||
setup.py Install all scripts listed here
|
||||
suff.py Sort a list of files by suffix
|
||||
svneol.py Sets svn:eol-style on all files in directory
|
||||
texcheck.py Validate Python LaTeX formatting (Raymond Hettinger)
|
||||
texi2html.py Convert GNU texinfo files into HTML
|
||||
treesync.py Synchronize source trees (very ideosyncratic)
|
||||
untabify.py Replace tabs with spaces in argument files
|
||||
which.py Find a program in $PATH
|
||||
xxci.py Wrapper for rcsdiff and ci
|
||||
finddiv.py A grep-like tool that looks for division operators
|
||||
findlinksto.py Recursively find symbolic links to a given path prefix
|
||||
findnocoding.py Find source files which need an encoding declaration
|
||||
fixcid.py Massive identifier substitution on C source files
|
||||
fixdiv.py Tool to fix division operators.
|
||||
fixheader.py Add some cpp magic to a C include file
|
||||
fixnotice.py Fix the copyright notice in source files
|
||||
fixps.py Fix Python scripts' first line (if #!)
|
||||
ftpmirror.py FTP mirror script
|
||||
google.py Open a webbrowser with Google
|
||||
gprof2html.py Transform gprof(1) output into useful HTML
|
||||
h2py.py Translate #define's into Python assignments
|
||||
idle3 Main program to start IDLE
|
||||
ifdef.py Remove #if(n)def groups from C sources
|
||||
lfcr.py Change LF line endings to CRLF (Unix to Windows)
|
||||
linktree.py Make a copy of a tree with links to original files
|
||||
lll.py Find and list symbolic links in current directory
|
||||
mailerdaemon.py Parse error messages from mailer daemons (Sjoerd&Jack)
|
||||
make_ctype.py Generate ctype.h replacement in stringobject.c
|
||||
md5sum.py Print MD5 checksums of argument files
|
||||
mkreal.py Turn a symbolic link into a real file or directory
|
||||
ndiff.py Intelligent diff between text files (Tim Peters)
|
||||
nm2def.py Create a template for PC/python_nt.def (Marc Lemburg)
|
||||
objgraph.py Print object graph from nm output on a library
|
||||
parseentities.py Utility for parsing HTML entity definitions
|
||||
patchcheck.py Perform common checks and cleanup before committing
|
||||
pathfix.py Change #!/usr/local/bin/python into something else
|
||||
pdeps.py Print dependencies between Python modules
|
||||
pickle2db.py Load a pickle generated by db2pickle.py to a database
|
||||
pindent.py Indent Python code, giving block-closing comments
|
||||
ptags.py Create vi tags file for Python modules
|
||||
pydoc3 Python documentation browser
|
||||
pysource.py Find Python source files
|
||||
redemo.py Basic regular expression demonstration facility
|
||||
reindent.py Change .py files to use 4-space indents
|
||||
reindent-rst.py Fix-up reStructuredText file whitespace
|
||||
rgrep.py Reverse grep through a file (useful for big logfiles)
|
||||
serve.py Small wsgiref-based web server, used in make serve in Doc
|
||||
suff.py Sort a list of files by suffix
|
||||
svneol.py Set svn:eol-style on all files in directory
|
||||
texi2html.py Convert GNU texinfo files into HTML
|
||||
treesync.py Synchronize source trees (very idiosyncratic)
|
||||
untabify.py Replace tabs with spaces in argument files
|
||||
win_add2path.py Add Python to the search path on Windows
|
||||
which.py Find a program in $PATH
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#! /usr/bin/env python3.0
|
||||
#! /usr/bin/env python3
|
||||
|
||||
"""Show file statistics by extension."""
|
||||
|
||||
|
|
|
@ -1,169 +0,0 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Released to the public domain, by Tim Peters, 28 February 2000.
|
||||
|
||||
"""checkappend.py -- search for multi-argument .append() calls.
|
||||
|
||||
Usage: specify one or more file or directory paths:
|
||||
checkappend [-v] file_or_dir [file_or_dir] ...
|
||||
|
||||
Each file_or_dir is checked for multi-argument .append() calls. When
|
||||
a directory, all .py files in the directory, and recursively in its
|
||||
subdirectories, are checked.
|
||||
|
||||
Use -v for status msgs. Use -vv for more status msgs.
|
||||
|
||||
In the absence of -v, the only output is pairs of the form
|
||||
|
||||
filename(linenumber):
|
||||
line containing the suspicious append
|
||||
|
||||
Note that this finds multi-argument append calls regardless of whether
|
||||
they're attached to list objects. If a module defines a class with an
|
||||
append method that takes more than one argument, calls to that method
|
||||
will be listed.
|
||||
|
||||
Note that this will not find multi-argument list.append calls made via a
|
||||
bound method object. For example, this is not caught:
|
||||
|
||||
somelist = []
|
||||
push = somelist.append
|
||||
push(1, 2, 3)
|
||||
"""
|
||||
|
||||
__version__ = 1, 0, 0
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import tokenize
|
||||
|
||||
verbose = 0
|
||||
|
||||
def errprint(*args):
|
||||
msg = ' '.join(args)
|
||||
sys.stderr.write(msg)
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
global verbose
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "v")
|
||||
except getopt.error as msg:
|
||||
errprint(str(msg) + "\n\n" + __doc__)
|
||||
return
|
||||
for opt, optarg in opts:
|
||||
if opt == '-v':
|
||||
verbose = verbose + 1
|
||||
if not args:
|
||||
errprint(__doc__)
|
||||
return
|
||||
for arg in args:
|
||||
check(arg)
|
||||
|
||||
def check(file):
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print("%r: listing directory" % (file,))
|
||||
names = os.listdir(file)
|
||||
for name in names:
|
||||
fullname = os.path.join(file, name)
|
||||
if ((os.path.isdir(fullname) and
|
||||
not os.path.islink(fullname))
|
||||
or os.path.normcase(name[-3:]) == ".py"):
|
||||
check(fullname)
|
||||
return
|
||||
|
||||
try:
|
||||
f = open(file)
|
||||
except IOError as msg:
|
||||
errprint("%r: I/O Error: %s" % (file, msg))
|
||||
return
|
||||
|
||||
if verbose > 1:
|
||||
print("checking %r ..." % (file,))
|
||||
|
||||
ok = AppendChecker(file, f).run()
|
||||
if verbose and ok:
|
||||
print("%r: Clean bill of health." % (file,))
|
||||
|
||||
[FIND_DOT,
|
||||
FIND_APPEND,
|
||||
FIND_LPAREN,
|
||||
FIND_COMMA,
|
||||
FIND_STMT] = range(5)
|
||||
|
||||
class AppendChecker:
|
||||
def __init__(self, fname, file):
|
||||
self.fname = fname
|
||||
self.file = file
|
||||
self.state = FIND_DOT
|
||||
self.nerrors = 0
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
tokens = tokenize.generate_tokens(self.file.readline)
|
||||
for _token in tokens:
|
||||
self.tokeneater(*_token)
|
||||
except tokenize.TokenError as msg:
|
||||
errprint("%r: Token Error: %s" % (self.fname, msg))
|
||||
self.nerrors = self.nerrors + 1
|
||||
return self.nerrors == 0
|
||||
|
||||
def tokeneater(self, type, token, start, end, line,
|
||||
NEWLINE=tokenize.NEWLINE,
|
||||
JUNK=(tokenize.COMMENT, tokenize.NL),
|
||||
OP=tokenize.OP,
|
||||
NAME=tokenize.NAME):
|
||||
|
||||
state = self.state
|
||||
|
||||
if type in JUNK:
|
||||
pass
|
||||
|
||||
elif state is FIND_DOT:
|
||||
if type is OP and token == ".":
|
||||
state = FIND_APPEND
|
||||
|
||||
elif state is FIND_APPEND:
|
||||
if type is NAME and token == "append":
|
||||
self.line = line
|
||||
self.lineno = start[0]
|
||||
state = FIND_LPAREN
|
||||
else:
|
||||
state = FIND_DOT
|
||||
|
||||
elif state is FIND_LPAREN:
|
||||
if type is OP and token == "(":
|
||||
self.level = 1
|
||||
state = FIND_COMMA
|
||||
else:
|
||||
state = FIND_DOT
|
||||
|
||||
elif state is FIND_COMMA:
|
||||
if type is OP:
|
||||
if token in ("(", "{", "["):
|
||||
self.level = self.level + 1
|
||||
elif token in (")", "}", "]"):
|
||||
self.level = self.level - 1
|
||||
if self.level == 0:
|
||||
state = FIND_DOT
|
||||
elif token == "," and self.level == 1:
|
||||
self.nerrors = self.nerrors + 1
|
||||
print("%s(%d):\n%s" % (self.fname, self.lineno,
|
||||
self.line))
|
||||
# don't gripe about this stmt again
|
||||
state = FIND_STMT
|
||||
|
||||
elif state is FIND_STMT:
|
||||
if type is NEWLINE:
|
||||
state = FIND_DOT
|
||||
|
||||
else:
|
||||
raise SystemError("unknown internal state '%r'" % (state,))
|
||||
|
||||
self.state = state
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,190 +0,0 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# This script is obsolete -- it is kept for historical purposes only.
|
||||
#
|
||||
# Fix Python source files to use the new class definition syntax, i.e.,
|
||||
# the syntax used in Python versions before 0.9.8:
|
||||
# class C() = base(), base(), ...: ...
|
||||
# is changed to the current syntax:
|
||||
# class C(base, base, ...): ...
|
||||
#
|
||||
# The script uses heuristics to find class definitions that usually
|
||||
# work but occasionally can fail; carefully check the output!
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a python module.
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments). Of course, the original file is kept as a back-up
|
||||
# (with a "~" attached to its name).
|
||||
#
|
||||
# Changes made are reported to stdout in a diff-like format.
|
||||
#
|
||||
# Undoubtedly you can do this using find and sed or perl, but this is
|
||||
# a nice example of Python code that recurses down a directory tree
|
||||
# and uses regular expressions. Also note several subtleties like
|
||||
# preserving the file's mode and avoiding to even write a temp file
|
||||
# when no changes are needed for a file.
|
||||
#
|
||||
# NB: by changing only the function fixline() you can turn this
|
||||
# into a program for a different change to Python programs...
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
def main():
|
||||
bad = 0
|
||||
if not sys.argv[1:]: # No arguments
|
||||
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
|
||||
sys.exit(2)
|
||||
for arg in sys.argv[1:]:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
|
||||
def ispython(name):
|
||||
return ispythonprog.match(name) >= 0
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except os.error as msg:
|
||||
err('%s: cannot list directory: %r\n' % (dirname, msg))
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif ispython(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError as msg:
|
||||
err('%s: cannot open: %r\n' % (filename, msg))
|
||||
return 1
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
g = None
|
||||
# If we find a match, we rewind the file and start over but
|
||||
# now copy everything to a temp file.
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = f.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
lineno = lineno + 1
|
||||
newline = fixline(line)
|
||||
if newline != line:
|
||||
if g is None:
|
||||
try:
|
||||
g = open(tempname, 'w')
|
||||
except IOError as msg:
|
||||
f.close()
|
||||
err('%s: cannot create: %r\n' % (tempname, msg))
|
||||
return 1
|
||||
f.seek(0)
|
||||
lineno = 0
|
||||
rep(filename + ':\n')
|
||||
continue # restart from the beginning
|
||||
rep(repr(lineno) + '\n')
|
||||
rep('< ' + line)
|
||||
rep('> ' + newline)
|
||||
if g is not None:
|
||||
g.write(newline)
|
||||
|
||||
# End of file
|
||||
f.close()
|
||||
if not g: return 0 # No changes
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 0o7777)
|
||||
except os.error as msg:
|
||||
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
|
||||
# Then make a backup of the original file as filename~
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except os.error as msg:
|
||||
err('%s: warning: backup failed (%r)\n' % (filename, msg))
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except os.error as msg:
|
||||
err('%s: rename failed (%r)\n' % (filename, msg))
|
||||
return 1
|
||||
# Return succes
|
||||
return 0
|
||||
|
||||
# This expression doesn't catch *all* class definition headers,
|
||||
# but it's pretty darn close.
|
||||
classexpr = '^([ \t]*class +[a-zA-Z0-9_]+) *( *) *((=.*)?):'
|
||||
classprog = re.compile(classexpr)
|
||||
|
||||
# Expressions for finding base class expressions.
|
||||
baseexpr = '^ *(.*) *( *) *$'
|
||||
baseprog = re.compile(baseexpr)
|
||||
|
||||
def fixline(line):
|
||||
if classprog.match(line) < 0: # No 'class' keyword -- no change
|
||||
return line
|
||||
|
||||
(a0, b0), (a1, b1), (a2, b2) = classprog.regs[:3]
|
||||
# a0, b0 = Whole match (up to ':')
|
||||
# a1, b1 = First subexpression (up to classname)
|
||||
# a2, b2 = Second subexpression (=.*)
|
||||
head = line[:b1]
|
||||
tail = line[b0:] # Unmatched rest of line
|
||||
|
||||
if a2 == b2: # No base classes -- easy case
|
||||
return head + ':' + tail
|
||||
|
||||
# Get rid of leading '='
|
||||
basepart = line[a2+1:b2]
|
||||
|
||||
# Extract list of base expressions
|
||||
bases = basepart.split(',')
|
||||
|
||||
# Strip trailing '()' from each base expression
|
||||
for i in range(len(bases)):
|
||||
if baseprog.match(bases[i]) >= 0:
|
||||
x1, y1 = baseprog.regs[1]
|
||||
bases[i] = bases[i][x1:y1]
|
||||
|
||||
# Join the bases back again and build the new line
|
||||
basepart = ', '.join(bases)
|
||||
|
||||
return head + '(' + basepart + '):' + tail
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,72 +0,0 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Print a list of files that are mentioned in CVS directories.
|
||||
|
||||
Usage: cvsfiles.py [-n file] [directory] ...
|
||||
|
||||
If the '-n file' option is given, only files under CVS that are newer
|
||||
than the given file are printed; by default, all files under CVS are
|
||||
printed. As a special case, if a file does not exist, it is always
|
||||
printed.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import stat
|
||||
import getopt
|
||||
|
||||
cutofftime = 0
|
||||
|
||||
def main():
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "n:")
|
||||
except getopt.error as msg:
|
||||
print(msg)
|
||||
print(__doc__, end=' ')
|
||||
return 1
|
||||
global cutofftime
|
||||
newerfile = None
|
||||
for o, a in opts:
|
||||
if o == '-n':
|
||||
cutofftime = getmtime(a)
|
||||
if args:
|
||||
for arg in args:
|
||||
process(arg)
|
||||
else:
|
||||
process(".")
|
||||
|
||||
def process(dir):
|
||||
cvsdir = 0
|
||||
subdirs = []
|
||||
names = os.listdir(dir)
|
||||
for name in names:
|
||||
fullname = os.path.join(dir, name)
|
||||
if name == "CVS":
|
||||
cvsdir = fullname
|
||||
else:
|
||||
if os.path.isdir(fullname):
|
||||
if not os.path.islink(fullname):
|
||||
subdirs.append(fullname)
|
||||
if cvsdir:
|
||||
entries = os.path.join(cvsdir, "Entries")
|
||||
for e in open(entries).readlines():
|
||||
words = e.split('/')
|
||||
if words[0] == '' and words[1:]:
|
||||
name = words[1]
|
||||
fullname = os.path.join(dir, name)
|
||||
if cutofftime and getmtime(fullname) <= cutofftime:
|
||||
pass
|
||||
else:
|
||||
print(fullname)
|
||||
for sub in subdirs:
|
||||
process(sub)
|
||||
|
||||
def getmtime(filename):
|
||||
try:
|
||||
st = os.stat(filename)
|
||||
except os.error:
|
||||
return 0
|
||||
return st[stat.ST_MTIME]
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,185 +0,0 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Consolidate a bunch of CVS or RCS logs read from stdin.
|
||||
|
||||
Input should be the output of a CVS or RCS logging command, e.g.
|
||||
|
||||
cvs log -rrelease14:
|
||||
|
||||
which dumps all log messages from release1.4 upwards (assuming that
|
||||
release 1.4 was tagged with tag 'release14'). Note the trailing
|
||||
colon!
|
||||
|
||||
This collects all the revision records and outputs them sorted by date
|
||||
rather than by file, collapsing duplicate revision record, i.e.,
|
||||
records with the same message for different files.
|
||||
|
||||
The -t option causes it to truncate (discard) the last revision log
|
||||
entry; this is useful when using something like the above cvs log
|
||||
command, which shows the revisions including the given tag, while you
|
||||
probably want everything *since* that tag.
|
||||
|
||||
The -r option reverses the output (oldest first; the default is oldest
|
||||
last).
|
||||
|
||||
The -b tag option restricts the output to *only* checkin messages
|
||||
belonging to the given branch tag. The form -b HEAD restricts the
|
||||
output to checkin messages belonging to the CVS head (trunk). (It
|
||||
produces some output if tag is a non-branch tag, but this output is
|
||||
not very useful.)
|
||||
|
||||
-h prints this message and exits.
|
||||
|
||||
XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7
|
||||
from their output.
|
||||
"""
|
||||
|
||||
import sys, errno, getopt, re
|
||||
|
||||
sep1 = '='*77 + '\n' # file separator
|
||||
sep2 = '-'*28 + '\n' # revision separator
|
||||
|
||||
def main():
|
||||
"""Main program"""
|
||||
truncate_last = 0
|
||||
reverse = 0
|
||||
branch = None
|
||||
opts, args = getopt.getopt(sys.argv[1:], "trb:h")
|
||||
for o, a in opts:
|
||||
if o == '-t':
|
||||
truncate_last = 1
|
||||
elif o == '-r':
|
||||
reverse = 1
|
||||
elif o == '-b':
|
||||
branch = a
|
||||
elif o == '-h':
|
||||
print(__doc__)
|
||||
sys.exit(0)
|
||||
database = []
|
||||
while 1:
|
||||
chunk = read_chunk(sys.stdin)
|
||||
if not chunk:
|
||||
break
|
||||
records = digest_chunk(chunk, branch)
|
||||
if truncate_last:
|
||||
del records[-1]
|
||||
database[len(database):] = records
|
||||
database.sort()
|
||||
if not reverse:
|
||||
database.reverse()
|
||||
format_output(database)
|
||||
|
||||
def read_chunk(fp):
|
||||
"""Read a chunk -- data for one file, ending with sep1.
|
||||
|
||||
Split the chunk in parts separated by sep2.
|
||||
|
||||
"""
|
||||
chunk = []
|
||||
lines = []
|
||||
while 1:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
if line == sep1:
|
||||
if lines:
|
||||
chunk.append(lines)
|
||||
break
|
||||
if line == sep2:
|
||||
if lines:
|
||||
chunk.append(lines)
|
||||
lines = []
|
||||
else:
|
||||
lines.append(line)
|
||||
return chunk
|
||||
|
||||
def digest_chunk(chunk, branch=None):
|
||||
"""Digest a chunk -- extract working file name and revisions"""
|
||||
lines = chunk[0]
|
||||
key = 'Working file:'
|
||||
keylen = len(key)
|
||||
for line in lines:
|
||||
if line[:keylen] == key:
|
||||
working_file = line[keylen:].strip()
|
||||
break
|
||||
else:
|
||||
working_file = None
|
||||
if branch is None:
|
||||
pass
|
||||
elif branch == "HEAD":
|
||||
branch = re.compile(r"^\d+\.\d+$")
|
||||
else:
|
||||
revisions = {}
|
||||
key = 'symbolic names:\n'
|
||||
found = 0
|
||||
for line in lines:
|
||||
if line == key:
|
||||
found = 1
|
||||
elif found:
|
||||
if line[0] in '\t ':
|
||||
tag, rev = line.split()
|
||||
if tag[-1] == ':':
|
||||
tag = tag[:-1]
|
||||
revisions[tag] = rev
|
||||
else:
|
||||
found = 0
|
||||
rev = revisions.get(branch)
|
||||
branch = re.compile(r"^<>$") # <> to force a mismatch by default
|
||||
if rev:
|
||||
if rev.find('.0.') >= 0:
|
||||
rev = rev.replace('.0.', '.')
|
||||
branch = re.compile(r"^" + re.escape(rev) + r"\.\d+$")
|
||||
records = []
|
||||
for lines in chunk[1:]:
|
||||
revline = lines[0]
|
||||
dateline = lines[1]
|
||||
text = lines[2:]
|
||||
words = dateline.split()
|
||||
author = None
|
||||
if len(words) >= 3 and words[0] == 'date:':
|
||||
dateword = words[1]
|
||||
timeword = words[2]
|
||||
if timeword[-1:] == ';':
|
||||
timeword = timeword[:-1]
|
||||
date = dateword + ' ' + timeword
|
||||
if len(words) >= 5 and words[3] == 'author:':
|
||||
author = words[4]
|
||||
if author[-1:] == ';':
|
||||
author = author[:-1]
|
||||
else:
|
||||
date = None
|
||||
text.insert(0, revline)
|
||||
words = revline.split()
|
||||
if len(words) >= 2 and words[0] == 'revision':
|
||||
rev = words[1]
|
||||
else:
|
||||
# No 'revision' line -- weird...
|
||||
rev = None
|
||||
text.insert(0, revline)
|
||||
if branch:
|
||||
if rev is None or not branch.match(rev):
|
||||
continue
|
||||
records.append((date, working_file, rev, author, text))
|
||||
return records
|
||||
|
||||
def format_output(database):
|
||||
prevtext = None
|
||||
prev = []
|
||||
database.append((None, None, None, None, None)) # Sentinel
|
||||
for (date, working_file, rev, author, text) in database:
|
||||
if text != prevtext:
|
||||
if prev:
|
||||
print(sep2, end=' ')
|
||||
for (p_date, p_working_file, p_rev, p_author) in prev:
|
||||
print(p_date, p_author, p_working_file, p_rev)
|
||||
sys.stdout.writelines(prevtext)
|
||||
prev = []
|
||||
prev.append((date, working_file, rev, author))
|
||||
prevtext = text
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except IOError as e:
|
||||
if e.errno != errno.EPIPE:
|
||||
raise
|
|
@ -1,171 +0,0 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# Fix Python source files to avoid using
|
||||
# def method(self, (arg1, ..., argn)):
|
||||
# instead of the more rational
|
||||
# def method(self, arg1, ..., argn):
|
||||
#
|
||||
# Command line arguments are files or directories to be processed.
|
||||
# Directories are searched recursively for files whose name looks
|
||||
# like a python module.
|
||||
# Symbolic links are always ignored (except as explicit directory
|
||||
# arguments). Of course, the original file is kept as a back-up
|
||||
# (with a "~" attached to its name).
|
||||
# It complains about binaries (files containing null bytes)
|
||||
# and about files that are ostensibly not Python files: if the first
|
||||
# line starts with '#!' and does not contain the string 'python'.
|
||||
#
|
||||
# Changes made are reported to stdout in a diff-like format.
|
||||
#
|
||||
# Undoubtedly you can do this using find and sed or perl, but this is
|
||||
# a nice example of Python code that recurses down a directory tree
|
||||
# and uses regular expressions. Also note several subtleties like
|
||||
# preserving the file's mode and avoiding to even write a temp file
|
||||
# when no changes are needed for a file.
|
||||
#
|
||||
# NB: by changing only the function fixline() you can turn this
|
||||
# into a program for a different change to Python programs...
|
||||
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
from stat import *
|
||||
|
||||
err = sys.stderr.write
|
||||
dbg = err
|
||||
rep = sys.stdout.write
|
||||
|
||||
def main():
|
||||
bad = 0
|
||||
if not sys.argv[1:]: # No arguments
|
||||
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
|
||||
sys.exit(2)
|
||||
for arg in sys.argv[1:]:
|
||||
if os.path.isdir(arg):
|
||||
if recursedown(arg): bad = 1
|
||||
elif os.path.islink(arg):
|
||||
err(arg + ': will not process symbolic links\n')
|
||||
bad = 1
|
||||
else:
|
||||
if fix(arg): bad = 1
|
||||
sys.exit(bad)
|
||||
|
||||
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
|
||||
def ispython(name):
|
||||
return ispythonprog.match(name) >= 0
|
||||
|
||||
def recursedown(dirname):
|
||||
dbg('recursedown(%r)\n' % (dirname,))
|
||||
bad = 0
|
||||
try:
|
||||
names = os.listdir(dirname)
|
||||
except os.error as msg:
|
||||
err('%s: cannot list directory: %r\n' % (dirname, msg))
|
||||
return 1
|
||||
names.sort()
|
||||
subdirs = []
|
||||
for name in names:
|
||||
if name in (os.curdir, os.pardir): continue
|
||||
fullname = os.path.join(dirname, name)
|
||||
if os.path.islink(fullname): pass
|
||||
elif os.path.isdir(fullname):
|
||||
subdirs.append(fullname)
|
||||
elif ispython(name):
|
||||
if fix(fullname): bad = 1
|
||||
for fullname in subdirs:
|
||||
if recursedown(fullname): bad = 1
|
||||
return bad
|
||||
|
||||
def fix(filename):
|
||||
## dbg('fix(%r)\n' % (filename,))
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError as msg:
|
||||
err('%s: cannot open: %r\n' % (filename, msg))
|
||||
return 1
|
||||
head, tail = os.path.split(filename)
|
||||
tempname = os.path.join(head, '@' + tail)
|
||||
g = None
|
||||
# If we find a match, we rewind the file and start over but
|
||||
# now copy everything to a temp file.
|
||||
lineno = 0
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if not line: break
|
||||
lineno = lineno + 1
|
||||
if g is None and '\0' in line:
|
||||
# Check for binary files
|
||||
err(filename + ': contains null bytes; not fixed\n')
|
||||
f.close()
|
||||
return 1
|
||||
if lineno == 1 and g is None and line[:2] == '#!':
|
||||
# Check for non-Python scripts
|
||||
words = line[2:].split()
|
||||
if words and re.search('[pP]ython', words[0]) < 0:
|
||||
msg = filename + ': ' + words[0]
|
||||
msg = msg + ' script; not fixed\n'
|
||||
err(msg)
|
||||
f.close()
|
||||
return 1
|
||||
while line[-2:] == '\\\n':
|
||||
nextline = f.readline()
|
||||
if not nextline: break
|
||||
line = line + nextline
|
||||
lineno = lineno + 1
|
||||
newline = fixline(line)
|
||||
if newline != line:
|
||||
if g is None:
|
||||
try:
|
||||
g = open(tempname, 'w')
|
||||
except IOError as msg:
|
||||
f.close()
|
||||
err('%s: cannot create: %r\n' % (tempname, msg))
|
||||
return 1
|
||||
f.seek(0)
|
||||
lineno = 0
|
||||
rep(filename + ':\n')
|
||||
continue # restart from the beginning
|
||||
rep(repr(lineno) + '\n')
|
||||
rep('< ' + line)
|
||||
rep('> ' + newline)
|
||||
if g is not None:
|
||||
g.write(newline)
|
||||
|
||||
# End of file
|
||||
f.close()
|
||||
if not g: return 0 # No changes
|
||||
|
||||
# Finishing touch -- move files
|
||||
|
||||
# First copy the file's mode to the temp file
|
||||
try:
|
||||
statbuf = os.stat(filename)
|
||||
os.chmod(tempname, statbuf[ST_MODE] & 0o7777)
|
||||
except os.error as msg:
|
||||
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
|
||||
# Then make a backup of the original file as filename~
|
||||
try:
|
||||
os.rename(filename, filename + '~')
|
||||
except os.error as msg:
|
||||
err('%s: warning: backup failed (%r)\n' % (filename, msg))
|
||||
# Now move the temp file to the original file
|
||||
try:
|
||||
os.rename(tempname, filename)
|
||||
except os.error as msg:
|
||||
err('%s: rename failed (%r)\n' % (filename, msg))
|
||||
return 1
|
||||
# Return succes
|
||||
return 0
|
||||
|
||||
|
||||
fixpat = '^[ \t]+def +[a-zA-Z0-9_]+ *( *self *, *(( *(.*) *)) *) *:'
|
||||
fixprog = re.compile(fixpat)
|
||||
|
||||
def fixline(line):
|
||||
if fixprog.match(line) >= 0:
|
||||
(a, b), (c, d) = fixprog.regs[1:3]
|
||||
line = line[:a] + line[c:d] + line[b:]
|
||||
return line
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,20 +0,0 @@
|
|||
from distutils.core import setup
|
||||
|
||||
if __name__ == '__main__':
|
||||
setup(
|
||||
scripts=[
|
||||
'byteyears.py',
|
||||
'checkpyc.py',
|
||||
'copytime.py',
|
||||
'crlf.py',
|
||||
'dutree.py',
|
||||
'ftpmirror.py',
|
||||
'h2py.py',
|
||||
'lfcr.py',
|
||||
'../i18n/pygettext.py',
|
||||
'logmerge.py',
|
||||
'../../Lib/tabnanny.py',
|
||||
'../../Lib/timeit.py',
|
||||
'untabify.py',
|
||||
],
|
||||
)
|
|
@ -1,121 +0,0 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
# xxci
|
||||
#
|
||||
# check in files for which rcsdiff returns nonzero exit status
|
||||
|
||||
import sys
|
||||
import os
|
||||
from stat import *
|
||||
import fnmatch
|
||||
|
||||
EXECMAGIC = '\001\140\000\010'
|
||||
|
||||
MAXSIZE = 200*1024 # Files this big must be binaries and are skipped.
|
||||
|
||||
def getargs():
|
||||
args = sys.argv[1:]
|
||||
if args:
|
||||
return args
|
||||
print('No arguments, checking almost *, in "ls -t" order')
|
||||
list = []
|
||||
for file in os.listdir(os.curdir):
|
||||
if not skipfile(file):
|
||||
list.append((getmtime(file), file))
|
||||
list.sort()
|
||||
if not list:
|
||||
print('Nothing to do -- exit 1')
|
||||
sys.exit(1)
|
||||
list.sort()
|
||||
list.reverse()
|
||||
for mtime, file in list: args.append(file)
|
||||
return args
|
||||
|
||||
def getmtime(file):
|
||||
try:
|
||||
st = os.stat(file)
|
||||
return st[ST_MTIME]
|
||||
except os.error:
|
||||
return -1
|
||||
|
||||
badnames = ['tags', 'TAGS', 'xyzzy', 'nohup.out', 'core']
|
||||
badprefixes = ['.', ',', '@', '#', 'o.']
|
||||
badsuffixes = \
|
||||
['~', '.a', '.o', '.old', '.bak', '.orig', '.new', '.prev', '.not', \
|
||||
'.pyc', '.fdc', '.rgb', '.elc', ',v']
|
||||
ignore = []
|
||||
|
||||
def setup():
|
||||
ignore[:] = badnames
|
||||
for p in badprefixes:
|
||||
ignore.append(p + '*')
|
||||
for p in badsuffixes:
|
||||
ignore.append('*' + p)
|
||||
try:
|
||||
f = open('.xxcign', 'r')
|
||||
except IOError:
|
||||
return
|
||||
ignore[:] = ignore + f.read().split()
|
||||
|
||||
def skipfile(file):
|
||||
for p in ignore:
|
||||
if fnmatch.fnmatch(file, p): return 1
|
||||
try:
|
||||
st = os.lstat(file)
|
||||
except os.error:
|
||||
return 1 # Doesn't exist -- skip it
|
||||
# Skip non-plain files.
|
||||
if not S_ISREG(st[ST_MODE]): return 1
|
||||
# Skip huge files -- probably binaries.
|
||||
if st[ST_SIZE] >= MAXSIZE: return 1
|
||||
# Skip executables
|
||||
try:
|
||||
data = open(file, 'r').read(len(EXECMAGIC))
|
||||
if data == EXECMAGIC: return 1
|
||||
except:
|
||||
pass
|
||||
return 0
|
||||
|
||||
def badprefix(file):
|
||||
for bad in badprefixes:
|
||||
if file[:len(bad)] == bad: return 1
|
||||
return 0
|
||||
|
||||
def badsuffix(file):
|
||||
for bad in badsuffixes:
|
||||
if file[-len(bad):] == bad: return 1
|
||||
return 0
|
||||
|
||||
def go(args):
|
||||
for file in args:
|
||||
print(file + ':')
|
||||
if differing(file):
|
||||
showdiffs(file)
|
||||
if askyesno('Check in ' + file + ' ? '):
|
||||
sts = os.system('rcs -l ' + file) # ignored
|
||||
sts = os.system('ci -l ' + file)
|
||||
|
||||
def differing(file):
|
||||
cmd = 'co -p ' + file + ' 2>/dev/null | cmp -s - ' + file
|
||||
sts = os.system(cmd)
|
||||
return sts != 0
|
||||
|
||||
def showdiffs(file):
|
||||
cmd = 'rcsdiff ' + file + ' 2>&1 | ${PAGER-more}'
|
||||
sts = os.system(cmd)
|
||||
|
||||
def raw_input(prompt):
|
||||
sys.stdout.write(prompt)
|
||||
sys.stdout.flush()
|
||||
return sys.stdin.readline()
|
||||
|
||||
def askyesno(prompt):
|
||||
s = input(prompt)
|
||||
return s in ['y', 'yes']
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
setup()
|
||||
go(getargs())
|
||||
except KeyboardInterrupt:
|
||||
print('[Intr]')
|
Loading…
Add table
Add a link
Reference in a new issue