mirror of
https://github.com/python/cpython.git
synced 2025-10-03 05:35:59 +00:00
Whitespace normalization.
This commit is contained in:
parent
752d3f557e
commit
88869f9787
16 changed files with 1168 additions and 1170 deletions
|
@ -73,16 +73,16 @@ ConfigParser -- responsible for for parsing a list of
|
||||||
1, only)
|
1, only)
|
||||||
|
|
||||||
remove_section(section)
|
remove_section(section)
|
||||||
remove the given file section and all its options
|
remove the given file section and all its options
|
||||||
|
|
||||||
remove_option(section, option)
|
remove_option(section, option)
|
||||||
remove the given option from the given section
|
remove the given option from the given section
|
||||||
|
|
||||||
set(section, option, value)
|
set(section, option, value)
|
||||||
set the given option
|
set the given option
|
||||||
|
|
||||||
write(fp)
|
write(fp)
|
||||||
write the configuration state in .ini format
|
write the configuration state in .ini format
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
@ -94,7 +94,7 @@ DEFAULTSECT = "DEFAULT"
|
||||||
MAX_INTERPOLATION_DEPTH = 10
|
MAX_INTERPOLATION_DEPTH = 10
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# exception classes
|
# exception classes
|
||||||
class Error(Exception):
|
class Error(Exception):
|
||||||
def __init__(self, msg=''):
|
def __init__(self, msg=''):
|
||||||
|
@ -166,7 +166,7 @@ class MissingSectionHeaderError(ParsingError):
|
||||||
self.line = line
|
self.line = line
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigParser:
|
class ConfigParser:
|
||||||
def __init__(self, defaults=None):
|
def __init__(self, defaults=None):
|
||||||
self.__sections = {}
|
self.__sections = {}
|
||||||
|
@ -217,7 +217,7 @@ class ConfigParser:
|
||||||
|
|
||||||
def read(self, filenames):
|
def read(self, filenames):
|
||||||
"""Read and parse a filename or a list of filenames.
|
"""Read and parse a filename or a list of filenames.
|
||||||
|
|
||||||
Files that cannot be opened are silently ignored; this is
|
Files that cannot be opened are silently ignored; this is
|
||||||
designed so that you can specify a list of potential
|
designed so that you can specify a list of potential
|
||||||
configuration file locations (e.g. current directory, user's
|
configuration file locations (e.g. current directory, user's
|
||||||
|
@ -285,7 +285,7 @@ class ConfigParser:
|
||||||
|
|
||||||
# do the string interpolation
|
# do the string interpolation
|
||||||
value = rawval # Make it a pretty variable name
|
value = rawval # Make it a pretty variable name
|
||||||
depth = 0
|
depth = 0
|
||||||
while depth < 10: # Loop through this until it's done
|
while depth < 10: # Loop through this until it's done
|
||||||
depth = depth + 1
|
depth = depth + 1
|
||||||
if string.find(value, "%(") >= 0:
|
if string.find(value, "%(") >= 0:
|
||||||
|
@ -298,7 +298,7 @@ class ConfigParser:
|
||||||
if value.find("%(") >= 0:
|
if value.find("%(") >= 0:
|
||||||
raise InterpolationDepthError(option, section, rawval)
|
raise InterpolationDepthError(option, section, rawval)
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def __get(self, section, conv, option):
|
def __get(self, section, conv, option):
|
||||||
return conv(self.get(section, option))
|
return conv(self.get(section, option))
|
||||||
|
|
||||||
|
|
|
@ -3,9 +3,9 @@
|
||||||
|
|
||||||
####
|
####
|
||||||
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
|
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
|
||||||
#
|
#
|
||||||
# All Rights Reserved
|
# All Rights Reserved
|
||||||
#
|
#
|
||||||
# Permission to use, copy, modify, and distribute this software
|
# Permission to use, copy, modify, and distribute this software
|
||||||
# and its documentation for any purpose and without fee is hereby
|
# and its documentation for any purpose and without fee is hereby
|
||||||
# granted, provided that the above copyright notice appear in all
|
# granted, provided that the above copyright notice appear in all
|
||||||
|
@ -13,8 +13,8 @@
|
||||||
# notice appear in supporting documentation, and that the name of
|
# notice appear in supporting documentation, and that the name of
|
||||||
# Timothy O'Malley not be used in advertising or publicity
|
# Timothy O'Malley not be used in advertising or publicity
|
||||||
# pertaining to distribution of the software without specific, written
|
# pertaining to distribution of the software without specific, written
|
||||||
# prior permission.
|
# prior permission.
|
||||||
#
|
#
|
||||||
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
|
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
|
||||||
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||||
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
|
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
|
||||||
|
@ -22,11 +22,11 @@
|
||||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
||||||
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
||||||
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||||
# PERFORMANCE OF THIS SOFTWARE.
|
# PERFORMANCE OF THIS SOFTWARE.
|
||||||
#
|
#
|
||||||
####
|
####
|
||||||
#
|
#
|
||||||
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
|
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
|
||||||
# by Timothy O'Malley <timo@alum.mit.edu>
|
# by Timothy O'Malley <timo@alum.mit.edu>
|
||||||
#
|
#
|
||||||
# Cookie.py is a Python module for the handling of HTTP
|
# Cookie.py is a Python module for the handling of HTTP
|
||||||
|
@ -116,7 +116,7 @@ attribute.
|
||||||
Set-Cookie: oreo="doublestuff"; Path=/;
|
Set-Cookie: oreo="doublestuff"; Path=/;
|
||||||
|
|
||||||
Each dictionary element has a 'value' attribute, which gives you
|
Each dictionary element has a 'value' attribute, which gives you
|
||||||
back the value associated with the key.
|
back the value associated with the key.
|
||||||
|
|
||||||
>>> C = Cookie.SmartCookie()
|
>>> C = Cookie.SmartCookie()
|
||||||
>>> C["twix"] = "none for you"
|
>>> C["twix"] = "none for you"
|
||||||
|
@ -148,7 +148,7 @@ the value to a string, when the values are set dictionary-style.
|
||||||
Set-Cookie: number=7;
|
Set-Cookie: number=7;
|
||||||
Set-Cookie: string=seven;
|
Set-Cookie: string=seven;
|
||||||
|
|
||||||
|
|
||||||
SerialCookie
|
SerialCookie
|
||||||
|
|
||||||
The SerialCookie expects that all values should be serialized using
|
The SerialCookie expects that all values should be serialized using
|
||||||
|
@ -214,7 +214,7 @@ Finis.
|
||||||
|
|
||||||
#
|
#
|
||||||
# Import our required modules
|
# Import our required modules
|
||||||
#
|
#
|
||||||
import string, sys
|
import string, sys
|
||||||
from UserDict import UserDict
|
from UserDict import UserDict
|
||||||
|
|
||||||
|
@ -242,7 +242,7 @@ class CookieError(Exception):
|
||||||
# into a 4 character sequence: a forward-slash followed by the
|
# into a 4 character sequence: a forward-slash followed by the
|
||||||
# three-digit octal equivalent of the character. Any '\' or '"' is
|
# three-digit octal equivalent of the character. Any '\' or '"' is
|
||||||
# quoted with a preceeding '\' slash.
|
# quoted with a preceeding '\' slash.
|
||||||
#
|
#
|
||||||
# These are taken from RFC2068 and RFC2109.
|
# These are taken from RFC2068 and RFC2109.
|
||||||
# _LegalChars is the list of chars which don't require "'s
|
# _LegalChars is the list of chars which don't require "'s
|
||||||
# _Translator hash-table for fast quoting
|
# _Translator hash-table for fast quoting
|
||||||
|
@ -319,7 +319,7 @@ def _quote(str, LegalChars=_LegalChars,
|
||||||
if "" == translate(str, idmap, LegalChars):
|
if "" == translate(str, idmap, LegalChars):
|
||||||
return str
|
return str
|
||||||
else:
|
else:
|
||||||
return '"' + join( map(_Translator.get, str, str), "" ) + '"'
|
return '"' + join( map(_Translator.get, str, str), "" ) + '"'
|
||||||
# end _quote
|
# end _quote
|
||||||
|
|
||||||
|
|
||||||
|
@ -370,7 +370,7 @@ def _unquote(str, join=string.join, atoi=string.atoi):
|
||||||
|
|
||||||
# The _getdate() routine is used to set the expiration time in
|
# The _getdate() routine is used to set the expiration time in
|
||||||
# the cookie's HTTP header. By default, _getdate() returns the
|
# the cookie's HTTP header. By default, _getdate() returns the
|
||||||
# current time in the appropriate "expires" format for a
|
# current time in the appropriate "expires" format for a
|
||||||
# Set-Cookie header. The one optional argument is an offset from
|
# Set-Cookie header. The one optional argument is an offset from
|
||||||
# now, in seconds. For example, an offset of -3600 means "one hour ago".
|
# now, in seconds. For example, an offset of -3600 means "one hour ago".
|
||||||
# The offset may be a floating point number.
|
# The offset may be a floating point number.
|
||||||
|
@ -405,7 +405,7 @@ class Morsel(UserDict):
|
||||||
# RFC 2109 lists these attributes as reserved:
|
# RFC 2109 lists these attributes as reserved:
|
||||||
# path comment domain
|
# path comment domain
|
||||||
# max-age secure version
|
# max-age secure version
|
||||||
#
|
#
|
||||||
# For historical reasons, these attributes are also reserved:
|
# For historical reasons, these attributes are also reserved:
|
||||||
# expires
|
# expires
|
||||||
#
|
#
|
||||||
|
|
|
@ -131,7 +131,7 @@ def month(theyear, themonth, w=0, l=0):
|
||||||
"""Return a month's calendar string (multi-line)."""
|
"""Return a month's calendar string (multi-line)."""
|
||||||
w = max(2, w)
|
w = max(2, w)
|
||||||
l = max(1, l)
|
l = max(1, l)
|
||||||
s = (_center(month_name[themonth] + ' ' + `theyear`,
|
s = (_center(month_name[themonth] + ' ' + `theyear`,
|
||||||
7 * (w + 1) - 1).rstrip() +
|
7 * (w + 1) - 1).rstrip() +
|
||||||
'\n' * l + weekheader(w).rstrip() + '\n' * l)
|
'\n' * l + weekheader(w).rstrip() + '\n' * l)
|
||||||
for aweek in monthcalendar(theyear, themonth):
|
for aweek in monthcalendar(theyear, themonth):
|
||||||
|
@ -167,7 +167,7 @@ def calendar(year, w=0, l=0, c=_spacing):
|
||||||
for q in range(January, January+12, 3):
|
for q in range(January, January+12, 3):
|
||||||
s = (s + '\n' * l +
|
s = (s + '\n' * l +
|
||||||
format3cstring(month_name[q], month_name[q+1], month_name[q+2],
|
format3cstring(month_name[q], month_name[q+1], month_name[q+2],
|
||||||
colwidth, c).rstrip() +
|
colwidth, c).rstrip() +
|
||||||
'\n' * l + header + '\n' * l)
|
'\n' * l + header + '\n' * l)
|
||||||
data = []
|
data = []
|
||||||
height = 0
|
height = 0
|
||||||
|
@ -183,7 +183,7 @@ def calendar(year, w=0, l=0, c=_spacing):
|
||||||
weeks.append('')
|
weeks.append('')
|
||||||
else:
|
else:
|
||||||
weeks.append(week(cal[i], w))
|
weeks.append(week(cal[i], w))
|
||||||
s = s + format3cstring(weeks[0], weeks[1], weeks[2],
|
s = s + format3cstring(weeks[0], weeks[1], weeks[2],
|
||||||
colwidth, c).rstrip() + '\n' * l
|
colwidth, c).rstrip() + '\n' * l
|
||||||
return s[:-l] + '\n'
|
return s[:-l] + '\n'
|
||||||
|
|
||||||
|
|
54
Lib/cgi.py
54
Lib/cgi.py
|
@ -11,13 +11,13 @@ written in Python.
|
||||||
|
|
||||||
# History
|
# History
|
||||||
# -------
|
# -------
|
||||||
#
|
#
|
||||||
# Michael McLay started this module. Steve Majewski changed the
|
# Michael McLay started this module. Steve Majewski changed the
|
||||||
# interface to SvFormContentDict and FormContentDict. The multipart
|
# interface to SvFormContentDict and FormContentDict. The multipart
|
||||||
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
|
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
|
||||||
# Rossum rewrote, reformatted and documented the module and is currently
|
# Rossum rewrote, reformatted and documented the module and is currently
|
||||||
# responsible for its maintenance.
|
# responsible for its maintenance.
|
||||||
#
|
#
|
||||||
|
|
||||||
__version__ = "2.5"
|
__version__ = "2.5"
|
||||||
|
|
||||||
|
@ -104,8 +104,8 @@ def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
|
||||||
environ : environment dictionary; default: os.environ
|
environ : environment dictionary; default: os.environ
|
||||||
|
|
||||||
keep_blank_values: flag indicating whether blank values in
|
keep_blank_values: flag indicating whether blank values in
|
||||||
URL encoded forms should be treated as blank strings.
|
URL encoded forms should be treated as blank strings.
|
||||||
A true value indicates that blanks should be retained as
|
A true value indicates that blanks should be retained as
|
||||||
blank strings. The default false value indicates that
|
blank strings. The default false value indicates that
|
||||||
blank values are to be ignored and treated as if they were
|
blank values are to be ignored and treated as if they were
|
||||||
not included.
|
not included.
|
||||||
|
@ -129,10 +129,10 @@ def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
|
||||||
qs = fp.read(clength)
|
qs = fp.read(clength)
|
||||||
else:
|
else:
|
||||||
qs = '' # Unknown content-type
|
qs = '' # Unknown content-type
|
||||||
if environ.has_key('QUERY_STRING'):
|
if environ.has_key('QUERY_STRING'):
|
||||||
if qs: qs = qs + '&'
|
if qs: qs = qs + '&'
|
||||||
qs = qs + environ['QUERY_STRING']
|
qs = qs + environ['QUERY_STRING']
|
||||||
elif sys.argv[1:]:
|
elif sys.argv[1:]:
|
||||||
if qs: qs = qs + '&'
|
if qs: qs = qs + '&'
|
||||||
qs = qs + sys.argv[1]
|
qs = qs + sys.argv[1]
|
||||||
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
|
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
|
||||||
|
@ -155,8 +155,8 @@ def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
|
||||||
qs: URL-encoded query string to be parsed
|
qs: URL-encoded query string to be parsed
|
||||||
|
|
||||||
keep_blank_values: flag indicating whether blank values in
|
keep_blank_values: flag indicating whether blank values in
|
||||||
URL encoded queries should be treated as blank strings.
|
URL encoded queries should be treated as blank strings.
|
||||||
A true value indicates that blanks should be retained as
|
A true value indicates that blanks should be retained as
|
||||||
blank strings. The default false value indicates that
|
blank strings. The default false value indicates that
|
||||||
blank values are to be ignored and treated as if they were
|
blank values are to be ignored and treated as if they were
|
||||||
not included.
|
not included.
|
||||||
|
@ -188,7 +188,7 @@ def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
|
||||||
|
|
||||||
strict_parsing: flag indicating what to do with parsing errors. If
|
strict_parsing: flag indicating what to do with parsing errors. If
|
||||||
false (the default), errors are silently ignored. If true,
|
false (the default), errors are silently ignored. If true,
|
||||||
errors raise a ValueError exception.
|
errors raise a ValueError exception.
|
||||||
|
|
||||||
Returns a list, as G-d intended.
|
Returns a list, as G-d intended.
|
||||||
"""
|
"""
|
||||||
|
@ -215,17 +215,17 @@ def parse_multipart(fp, pdict):
|
||||||
fp : input file
|
fp : input file
|
||||||
pdict: dictionary containing other parameters of conten-type header
|
pdict: dictionary containing other parameters of conten-type header
|
||||||
|
|
||||||
Returns a dictionary just like parse_qs(): keys are the field names, each
|
Returns a dictionary just like parse_qs(): keys are the field names, each
|
||||||
value is a list of values for that field. This is easy to use but not
|
value is a list of values for that field. This is easy to use but not
|
||||||
much good if you are expecting megabytes to be uploaded -- in that case,
|
much good if you are expecting megabytes to be uploaded -- in that case,
|
||||||
use the FieldStorage class instead which is much more flexible. Note
|
use the FieldStorage class instead which is much more flexible. Note
|
||||||
that content-type is the raw, unparsed contents of the content-type
|
that content-type is the raw, unparsed contents of the content-type
|
||||||
header.
|
header.
|
||||||
|
|
||||||
XXX This does not parse nested multipart parts -- use FieldStorage for
|
XXX This does not parse nested multipart parts -- use FieldStorage for
|
||||||
that.
|
that.
|
||||||
|
|
||||||
XXX This should really be subsumed by FieldStorage altogether -- no
|
XXX This should really be subsumed by FieldStorage altogether -- no
|
||||||
point in having two implementations of the same parsing algorithm.
|
point in having two implementations of the same parsing algorithm.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -409,8 +409,8 @@ class FieldStorage:
|
||||||
environ : environment dictionary; default: os.environ
|
environ : environment dictionary; default: os.environ
|
||||||
|
|
||||||
keep_blank_values: flag indicating whether blank values in
|
keep_blank_values: flag indicating whether blank values in
|
||||||
URL encoded forms should be treated as blank strings.
|
URL encoded forms should be treated as blank strings.
|
||||||
A true value indicates that blanks should be retained as
|
A true value indicates that blanks should be retained as
|
||||||
blank strings. The default false value indicates that
|
blank strings. The default false value indicates that
|
||||||
blank values are to be ignored and treated as if they were
|
blank values are to be ignored and treated as if they were
|
||||||
not included.
|
not included.
|
||||||
|
@ -707,7 +707,7 @@ class FieldStorage:
|
||||||
"""
|
"""
|
||||||
import tempfile
|
import tempfile
|
||||||
return tempfile.TemporaryFile("w+b")
|
return tempfile.TemporaryFile("w+b")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Backwards Compatibility Classes
|
# Backwards Compatibility Classes
|
||||||
|
@ -744,8 +744,8 @@ class SvFormContentDict(FormContentDict):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
if len(self.dict[key]) > 1:
|
if len(self.dict[key]) > 1:
|
||||||
raise IndexError, 'expecting a single value'
|
raise IndexError, 'expecting a single value'
|
||||||
return self.dict[key][0]
|
return self.dict[key][0]
|
||||||
def getlist(self, key):
|
def getlist(self, key):
|
||||||
return self.dict[key]
|
return self.dict[key]
|
||||||
|
@ -766,7 +766,7 @@ class SvFormContentDict(FormContentDict):
|
||||||
|
|
||||||
|
|
||||||
class InterpFormContentDict(SvFormContentDict):
|
class InterpFormContentDict(SvFormContentDict):
|
||||||
"""This class is present for backwards compatibility only."""
|
"""This class is present for backwards compatibility only."""
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
v = SvFormContentDict.__getitem__(self, key)
|
v = SvFormContentDict.__getitem__(self, key)
|
||||||
if v[0] in string.digits + '+-.':
|
if v[0] in string.digits + '+-.':
|
||||||
|
@ -794,7 +794,7 @@ class InterpFormContentDict(SvFormContentDict):
|
||||||
|
|
||||||
|
|
||||||
class FormContent(FormContentDict):
|
class FormContent(FormContentDict):
|
||||||
"""This class is present for backwards compatibility only."""
|
"""This class is present for backwards compatibility only."""
|
||||||
def values(self, key):
|
def values(self, key):
|
||||||
if self.dict.has_key(key) :return self.dict[key]
|
if self.dict.has_key(key) :return self.dict[key]
|
||||||
else: return None
|
else: return None
|
||||||
|
@ -882,7 +882,7 @@ def print_environ(environ=os.environ):
|
||||||
print "<DL>"
|
print "<DL>"
|
||||||
for key in keys:
|
for key in keys:
|
||||||
print "<DT>", escape(key), "<DD>", escape(environ[key])
|
print "<DT>", escape(key), "<DD>", escape(environ[key])
|
||||||
print "</DL>"
|
print "</DL>"
|
||||||
print
|
print
|
||||||
|
|
||||||
def print_form(form):
|
def print_form(form):
|
||||||
|
@ -982,5 +982,5 @@ def escape(s, quote=None):
|
||||||
# ===============
|
# ===============
|
||||||
|
|
||||||
# Call test() when this file is run as a script (not imported as a module)
|
# Call test() when this file is run as a script (not imported as a module)
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
test()
|
test()
|
||||||
|
|
15
Lib/chunk.py
15
Lib/chunk.py
|
@ -18,7 +18,7 @@ The size field (a 32-bit value, encoded using big-endian byte order)
|
||||||
gives the size of the whole chunk, including the 8-byte header.
|
gives the size of the whole chunk, including the 8-byte header.
|
||||||
|
|
||||||
Usually an IFF-type file consists of one or more chunks. The proposed
|
Usually an IFF-type file consists of one or more chunks. The proposed
|
||||||
usage of the Chunk class defined here is to instantiate an instance at
|
usage of the Chunk class defined here is to instantiate an instance at
|
||||||
the start of each chunk and read from the instance until it reaches
|
the start of each chunk and read from the instance until it reaches
|
||||||
the end, after which a new instance can be instantiated. At the end
|
the end, after which a new instance can be instantiated. At the end
|
||||||
of the file, creating a new instance will fail with a EOFError
|
of the file, creating a new instance will fail with a EOFError
|
||||||
|
@ -44,7 +44,7 @@ getname() (returns the name (ID) of the chunk)
|
||||||
|
|
||||||
The __init__ method has one required argument, a file-like object
|
The __init__ method has one required argument, a file-like object
|
||||||
(including a chunk instance), and one optional argument, a flag which
|
(including a chunk instance), and one optional argument, a flag which
|
||||||
specifies whether or not chunks are aligned on 2-byte boundaries. The
|
specifies whether or not chunks are aligned on 2-byte boundaries. The
|
||||||
default is 1, i.e. aligned.
|
default is 1, i.e. aligned.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ class Chunk:
|
||||||
def __init__(self, file, align = 1, bigendian = 1, inclheader = 0):
|
def __init__(self, file, align = 1, bigendian = 1, inclheader = 0):
|
||||||
import struct
|
import struct
|
||||||
self.closed = 0
|
self.closed = 0
|
||||||
self.align = align # whether to align to word (2-byte) boundaries
|
self.align = align # whether to align to word (2-byte) boundaries
|
||||||
if bigendian:
|
if bigendian:
|
||||||
strflag = '>'
|
strflag = '>'
|
||||||
else:
|
else:
|
||||||
|
@ -97,7 +97,7 @@ class Chunk:
|
||||||
"""Seek to specified position into the chunk.
|
"""Seek to specified position into the chunk.
|
||||||
Default position is 0 (start of chunk).
|
Default position is 0 (start of chunk).
|
||||||
If the file is not seekable, this will result in an error.
|
If the file is not seekable, this will result in an error.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
raise ValueError, "I/O operation on closed file"
|
raise ValueError, "I/O operation on closed file"
|
||||||
|
@ -121,7 +121,7 @@ class Chunk:
|
||||||
"""Read at most size bytes from the chunk.
|
"""Read at most size bytes from the chunk.
|
||||||
If size is omitted or negative, read until the end
|
If size is omitted or negative, read until the end
|
||||||
of the chunk.
|
of the chunk.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
raise ValueError, "I/O operation on closed file"
|
raise ValueError, "I/O operation on closed file"
|
||||||
|
@ -130,7 +130,7 @@ class Chunk:
|
||||||
if size < 0:
|
if size < 0:
|
||||||
size = self.chunksize - self.size_read
|
size = self.chunksize - self.size_read
|
||||||
if size > self.chunksize - self.size_read:
|
if size > self.chunksize - self.size_read:
|
||||||
size = self.chunksize - self.size_read
|
size = self.chunksize - self.size_read
|
||||||
data = self.file.read(size)
|
data = self.file.read(size)
|
||||||
self.size_read = self.size_read + len(data)
|
self.size_read = self.size_read + len(data)
|
||||||
if self.size_read == self.chunksize and \
|
if self.size_read == self.chunksize and \
|
||||||
|
@ -145,7 +145,7 @@ class Chunk:
|
||||||
If you are not interested in the contents of the chunk,
|
If you are not interested in the contents of the chunk,
|
||||||
this method should be called so that the file points to
|
this method should be called so that the file points to
|
||||||
the start of the next chunk.
|
the start of the next chunk.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
raise ValueError, "I/O operation on closed file"
|
raise ValueError, "I/O operation on closed file"
|
||||||
|
@ -165,4 +165,3 @@ class Chunk:
|
||||||
dummy = self.read(n)
|
dummy = self.read(n)
|
||||||
if not dummy:
|
if not dummy:
|
||||||
raise EOFError
|
raise EOFError
|
||||||
|
|
||||||
|
|
|
@ -25,19 +25,19 @@ except ImportError,why:
|
||||||
BOM = struct.pack('=H',0xFEFF)
|
BOM = struct.pack('=H',0xFEFF)
|
||||||
#
|
#
|
||||||
BOM_BE = BOM32_BE = '\376\377'
|
BOM_BE = BOM32_BE = '\376\377'
|
||||||
# corresponds to Unicode U+FEFF in UTF-16 on big endian
|
# corresponds to Unicode U+FEFF in UTF-16 on big endian
|
||||||
# platforms == ZERO WIDTH NO-BREAK SPACE
|
# platforms == ZERO WIDTH NO-BREAK SPACE
|
||||||
BOM_LE = BOM32_LE = '\377\376'
|
BOM_LE = BOM32_LE = '\377\376'
|
||||||
# corresponds to Unicode U+FFFE in UTF-16 on little endian
|
# corresponds to Unicode U+FFFE in UTF-16 on little endian
|
||||||
# platforms == defined as being an illegal Unicode character
|
# platforms == defined as being an illegal Unicode character
|
||||||
|
|
||||||
#
|
#
|
||||||
# 64-bit Byte Order Marks
|
# 64-bit Byte Order Marks
|
||||||
#
|
#
|
||||||
BOM64_BE = '\000\000\376\377'
|
BOM64_BE = '\000\000\376\377'
|
||||||
# corresponds to Unicode U+0000FEFF in UCS-4
|
# corresponds to Unicode U+0000FEFF in UCS-4
|
||||||
BOM64_LE = '\377\376\000\000'
|
BOM64_LE = '\377\376\000\000'
|
||||||
# corresponds to Unicode U+0000FFFE in UCS-4
|
# corresponds to Unicode U+0000FFFE in UCS-4
|
||||||
|
|
||||||
|
|
||||||
### Codec base classes (defining the API)
|
### Codec base classes (defining the API)
|
||||||
|
@ -547,7 +547,7 @@ def make_identity_dict(rng):
|
||||||
|
|
||||||
Return a dictionary where elements of the rng sequence are
|
Return a dictionary where elements of the rng sequence are
|
||||||
mapped to themselves.
|
mapped to themselves.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
res = {}
|
res = {}
|
||||||
for i in rng:
|
for i in rng:
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
"""Execute shell commands via os.popen() and return status, output.
|
"""Execute shell commands via os.popen() and return status, output.
|
||||||
|
|
||||||
Interface summary:
|
Interface summary:
|
||||||
|
|
||||||
import commands
|
import commands
|
||||||
|
|
||||||
outtext = commands.getoutput(cmd)
|
outtext = commands.getoutput(cmd)
|
||||||
(exitstatus, outtext) = commands.getstatusoutput(cmd)
|
(exitstatus, outtext) = commands.getstatusoutput(cmd)
|
||||||
outtext = commands.getstatus(file) # returns output of "ls -ld file"
|
outtext = commands.getstatus(file) # returns output of "ls -ld file"
|
||||||
|
@ -11,7 +11,7 @@ Interface summary:
|
||||||
A trailing newline is removed from the output string.
|
A trailing newline is removed from the output string.
|
||||||
|
|
||||||
Encapsulates the basic operation:
|
Encapsulates the basic operation:
|
||||||
|
|
||||||
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
|
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
|
||||||
text = pipe.read()
|
text = pipe.read()
|
||||||
sts = pipe.close()
|
sts = pipe.close()
|
||||||
|
|
356
Lib/copy.py
356
Lib/copy.py
|
@ -2,10 +2,10 @@
|
||||||
|
|
||||||
Interface summary:
|
Interface summary:
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
x = copy.copy(y) # make a shallow copy of y
|
x = copy.copy(y) # make a shallow copy of y
|
||||||
x = copy.deepcopy(y) # make a deep copy of y
|
x = copy.deepcopy(y) # make a deep copy of y
|
||||||
|
|
||||||
For module specific errors, copy.error is raised.
|
For module specific errors, copy.error is raised.
|
||||||
|
|
||||||
|
@ -53,8 +53,8 @@ __getstate__() and __setstate__(). See the documentation for module
|
||||||
import types
|
import types
|
||||||
|
|
||||||
class Error(Exception):
|
class Error(Exception):
|
||||||
pass
|
pass
|
||||||
error = Error # backward compatibility
|
error = Error # backward compatibility
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from org.python.core import PyStringMap
|
from org.python.core import PyStringMap
|
||||||
|
@ -62,28 +62,28 @@ except ImportError:
|
||||||
PyStringMap = None
|
PyStringMap = None
|
||||||
|
|
||||||
def copy(x):
|
def copy(x):
|
||||||
"""Shallow copy operation on arbitrary Python objects.
|
"""Shallow copy operation on arbitrary Python objects.
|
||||||
|
|
||||||
See the module's __doc__ string for more info.
|
See the module's __doc__ string for more info.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
copierfunction = _copy_dispatch[type(x)]
|
copierfunction = _copy_dispatch[type(x)]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
try:
|
try:
|
||||||
copier = x.__copy__
|
copier = x.__copy__
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
raise error, \
|
raise error, \
|
||||||
"un(shallow)copyable object of type %s" % type(x)
|
"un(shallow)copyable object of type %s" % type(x)
|
||||||
y = copier()
|
y = copier()
|
||||||
else:
|
else:
|
||||||
y = copierfunction(x)
|
y = copierfunction(x)
|
||||||
return y
|
return y
|
||||||
|
|
||||||
_copy_dispatch = d = {}
|
_copy_dispatch = d = {}
|
||||||
|
|
||||||
def _copy_atomic(x):
|
def _copy_atomic(x):
|
||||||
return x
|
return x
|
||||||
d[types.NoneType] = _copy_atomic
|
d[types.NoneType] = _copy_atomic
|
||||||
d[types.IntType] = _copy_atomic
|
d[types.IntType] = _copy_atomic
|
||||||
d[types.LongType] = _copy_atomic
|
d[types.LongType] = _copy_atomic
|
||||||
|
@ -91,78 +91,78 @@ d[types.FloatType] = _copy_atomic
|
||||||
d[types.StringType] = _copy_atomic
|
d[types.StringType] = _copy_atomic
|
||||||
d[types.UnicodeType] = _copy_atomic
|
d[types.UnicodeType] = _copy_atomic
|
||||||
try:
|
try:
|
||||||
d[types.CodeType] = _copy_atomic
|
d[types.CodeType] = _copy_atomic
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
d[types.TypeType] = _copy_atomic
|
d[types.TypeType] = _copy_atomic
|
||||||
d[types.XRangeType] = _copy_atomic
|
d[types.XRangeType] = _copy_atomic
|
||||||
d[types.ClassType] = _copy_atomic
|
d[types.ClassType] = _copy_atomic
|
||||||
|
|
||||||
def _copy_list(x):
|
def _copy_list(x):
|
||||||
return x[:]
|
return x[:]
|
||||||
d[types.ListType] = _copy_list
|
d[types.ListType] = _copy_list
|
||||||
|
|
||||||
def _copy_tuple(x):
|
def _copy_tuple(x):
|
||||||
return x[:]
|
return x[:]
|
||||||
d[types.TupleType] = _copy_tuple
|
d[types.TupleType] = _copy_tuple
|
||||||
|
|
||||||
def _copy_dict(x):
|
def _copy_dict(x):
|
||||||
return x.copy()
|
return x.copy()
|
||||||
d[types.DictionaryType] = _copy_dict
|
d[types.DictionaryType] = _copy_dict
|
||||||
if PyStringMap is not None:
|
if PyStringMap is not None:
|
||||||
d[PyStringMap] = _copy_dict
|
d[PyStringMap] = _copy_dict
|
||||||
|
|
||||||
def _copy_inst(x):
|
def _copy_inst(x):
|
||||||
if hasattr(x, '__copy__'):
|
if hasattr(x, '__copy__'):
|
||||||
return x.__copy__()
|
return x.__copy__()
|
||||||
if hasattr(x, '__getinitargs__'):
|
if hasattr(x, '__getinitargs__'):
|
||||||
args = x.__getinitargs__()
|
args = x.__getinitargs__()
|
||||||
y = apply(x.__class__, args)
|
y = apply(x.__class__, args)
|
||||||
else:
|
else:
|
||||||
y = _EmptyClass()
|
y = _EmptyClass()
|
||||||
y.__class__ = x.__class__
|
y.__class__ = x.__class__
|
||||||
if hasattr(x, '__getstate__'):
|
if hasattr(x, '__getstate__'):
|
||||||
state = x.__getstate__()
|
state = x.__getstate__()
|
||||||
else:
|
else:
|
||||||
state = x.__dict__
|
state = x.__dict__
|
||||||
if hasattr(y, '__setstate__'):
|
if hasattr(y, '__setstate__'):
|
||||||
y.__setstate__(state)
|
y.__setstate__(state)
|
||||||
else:
|
else:
|
||||||
y.__dict__.update(state)
|
y.__dict__.update(state)
|
||||||
return y
|
return y
|
||||||
d[types.InstanceType] = _copy_inst
|
d[types.InstanceType] = _copy_inst
|
||||||
|
|
||||||
del d
|
del d
|
||||||
|
|
||||||
def deepcopy(x, memo = None):
|
def deepcopy(x, memo = None):
|
||||||
"""Deep copy operation on arbitrary Python objects.
|
"""Deep copy operation on arbitrary Python objects.
|
||||||
|
|
||||||
See the module's __doc__ string for more info.
|
See the module's __doc__ string for more info.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if memo is None:
|
if memo is None:
|
||||||
memo = {}
|
memo = {}
|
||||||
d = id(x)
|
d = id(x)
|
||||||
if memo.has_key(d):
|
if memo.has_key(d):
|
||||||
return memo[d]
|
return memo[d]
|
||||||
try:
|
try:
|
||||||
copierfunction = _deepcopy_dispatch[type(x)]
|
copierfunction = _deepcopy_dispatch[type(x)]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
try:
|
try:
|
||||||
copier = x.__deepcopy__
|
copier = x.__deepcopy__
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
raise error, \
|
raise error, \
|
||||||
"un-deep-copyable object of type %s" % type(x)
|
"un-deep-copyable object of type %s" % type(x)
|
||||||
y = copier(memo)
|
y = copier(memo)
|
||||||
else:
|
else:
|
||||||
y = copierfunction(x, memo)
|
y = copierfunction(x, memo)
|
||||||
memo[d] = y
|
memo[d] = y
|
||||||
return y
|
return y
|
||||||
|
|
||||||
_deepcopy_dispatch = d = {}
|
_deepcopy_dispatch = d = {}
|
||||||
|
|
||||||
def _deepcopy_atomic(x, memo):
|
def _deepcopy_atomic(x, memo):
|
||||||
return x
|
return x
|
||||||
d[types.NoneType] = _deepcopy_atomic
|
d[types.NoneType] = _deepcopy_atomic
|
||||||
d[types.IntType] = _deepcopy_atomic
|
d[types.IntType] = _deepcopy_atomic
|
||||||
d[types.LongType] = _deepcopy_atomic
|
d[types.LongType] = _deepcopy_atomic
|
||||||
|
@ -174,81 +174,81 @@ d[types.TypeType] = _deepcopy_atomic
|
||||||
d[types.XRangeType] = _deepcopy_atomic
|
d[types.XRangeType] = _deepcopy_atomic
|
||||||
|
|
||||||
def _deepcopy_list(x, memo):
|
def _deepcopy_list(x, memo):
|
||||||
y = []
|
y = []
|
||||||
memo[id(x)] = y
|
memo[id(x)] = y
|
||||||
for a in x:
|
for a in x:
|
||||||
y.append(deepcopy(a, memo))
|
y.append(deepcopy(a, memo))
|
||||||
return y
|
return y
|
||||||
d[types.ListType] = _deepcopy_list
|
d[types.ListType] = _deepcopy_list
|
||||||
|
|
||||||
def _deepcopy_tuple(x, memo):
|
def _deepcopy_tuple(x, memo):
|
||||||
y = []
|
y = []
|
||||||
for a in x:
|
for a in x:
|
||||||
y.append(deepcopy(a, memo))
|
y.append(deepcopy(a, memo))
|
||||||
d = id(x)
|
d = id(x)
|
||||||
try:
|
try:
|
||||||
return memo[d]
|
return memo[d]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
for i in range(len(x)):
|
for i in range(len(x)):
|
||||||
if x[i] is not y[i]:
|
if x[i] is not y[i]:
|
||||||
y = tuple(y)
|
y = tuple(y)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
y = x
|
y = x
|
||||||
memo[d] = y
|
memo[d] = y
|
||||||
return y
|
return y
|
||||||
d[types.TupleType] = _deepcopy_tuple
|
d[types.TupleType] = _deepcopy_tuple
|
||||||
|
|
||||||
def _deepcopy_dict(x, memo):
|
def _deepcopy_dict(x, memo):
|
||||||
y = {}
|
y = {}
|
||||||
memo[id(x)] = y
|
memo[id(x)] = y
|
||||||
for key in x.keys():
|
for key in x.keys():
|
||||||
y[deepcopy(key, memo)] = deepcopy(x[key], memo)
|
y[deepcopy(key, memo)] = deepcopy(x[key], memo)
|
||||||
return y
|
return y
|
||||||
d[types.DictionaryType] = _deepcopy_dict
|
d[types.DictionaryType] = _deepcopy_dict
|
||||||
if PyStringMap is not None:
|
if PyStringMap is not None:
|
||||||
d[PyStringMap] = _deepcopy_dict
|
d[PyStringMap] = _deepcopy_dict
|
||||||
|
|
||||||
def _keep_alive(x, memo):
|
def _keep_alive(x, memo):
|
||||||
"""Keeps a reference to the object x in the memo.
|
"""Keeps a reference to the object x in the memo.
|
||||||
|
|
||||||
Because we remember objects by their id, we have
|
Because we remember objects by their id, we have
|
||||||
to assure that possibly temporary objects are kept
|
to assure that possibly temporary objects are kept
|
||||||
alive by referencing them.
|
alive by referencing them.
|
||||||
We store a reference at the id of the memo, which should
|
We store a reference at the id of the memo, which should
|
||||||
normally not be used unless someone tries to deepcopy
|
normally not be used unless someone tries to deepcopy
|
||||||
the memo itself...
|
the memo itself...
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
memo[id(memo)].append(x)
|
memo[id(memo)].append(x)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# aha, this is the first one :-)
|
# aha, this is the first one :-)
|
||||||
memo[id(memo)]=[x]
|
memo[id(memo)]=[x]
|
||||||
|
|
||||||
def _deepcopy_inst(x, memo):
|
def _deepcopy_inst(x, memo):
|
||||||
if hasattr(x, '__deepcopy__'):
|
if hasattr(x, '__deepcopy__'):
|
||||||
return x.__deepcopy__(memo)
|
return x.__deepcopy__(memo)
|
||||||
if hasattr(x, '__getinitargs__'):
|
if hasattr(x, '__getinitargs__'):
|
||||||
args = x.__getinitargs__()
|
args = x.__getinitargs__()
|
||||||
_keep_alive(args, memo)
|
_keep_alive(args, memo)
|
||||||
args = deepcopy(args, memo)
|
args = deepcopy(args, memo)
|
||||||
y = apply(x.__class__, args)
|
y = apply(x.__class__, args)
|
||||||
else:
|
else:
|
||||||
y = _EmptyClass()
|
y = _EmptyClass()
|
||||||
y.__class__ = x.__class__
|
y.__class__ = x.__class__
|
||||||
memo[id(x)] = y
|
memo[id(x)] = y
|
||||||
if hasattr(x, '__getstate__'):
|
if hasattr(x, '__getstate__'):
|
||||||
state = x.__getstate__()
|
state = x.__getstate__()
|
||||||
_keep_alive(state, memo)
|
_keep_alive(state, memo)
|
||||||
else:
|
else:
|
||||||
state = x.__dict__
|
state = x.__dict__
|
||||||
state = deepcopy(state, memo)
|
state = deepcopy(state, memo)
|
||||||
if hasattr(y, '__setstate__'):
|
if hasattr(y, '__setstate__'):
|
||||||
y.__setstate__(state)
|
y.__setstate__(state)
|
||||||
else:
|
else:
|
||||||
y.__dict__.update(state)
|
y.__dict__.update(state)
|
||||||
return y
|
return y
|
||||||
d[types.InstanceType] = _deepcopy_inst
|
d[types.InstanceType] = _deepcopy_inst
|
||||||
|
|
||||||
del d
|
del d
|
||||||
|
@ -260,57 +260,57 @@ class _EmptyClass:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _test():
|
def _test():
|
||||||
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
|
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
|
||||||
{'abc': 'ABC'}, (), [], {}]
|
{'abc': 'ABC'}, (), [], {}]
|
||||||
l1 = copy(l)
|
l1 = copy(l)
|
||||||
print l1==l
|
print l1==l
|
||||||
l1 = map(copy, l)
|
l1 = map(copy, l)
|
||||||
print l1==l
|
print l1==l
|
||||||
l1 = deepcopy(l)
|
l1 = deepcopy(l)
|
||||||
print l1==l
|
print l1==l
|
||||||
class C:
|
class C:
|
||||||
def __init__(self, arg=None):
|
def __init__(self, arg=None):
|
||||||
self.a = 1
|
self.a = 1
|
||||||
self.arg = arg
|
self.arg = arg
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys
|
import sys
|
||||||
file = sys.argv[0]
|
file = sys.argv[0]
|
||||||
else:
|
else:
|
||||||
file = __file__
|
file = __file__
|
||||||
self.fp = open(file)
|
self.fp = open(file)
|
||||||
self.fp.close()
|
self.fp.close()
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
return {'a': self.a, 'arg': self.arg}
|
return {'a': self.a, 'arg': self.arg}
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
for key in state.keys():
|
for key in state.keys():
|
||||||
setattr(self, key, state[key])
|
setattr(self, key, state[key])
|
||||||
def __deepcopy__(self, memo = None):
|
def __deepcopy__(self, memo = None):
|
||||||
new = self.__class__(deepcopy(self.arg, memo))
|
new = self.__class__(deepcopy(self.arg, memo))
|
||||||
new.a = self.a
|
new.a = self.a
|
||||||
return new
|
return new
|
||||||
c = C('argument sketch')
|
c = C('argument sketch')
|
||||||
l.append(c)
|
l.append(c)
|
||||||
l2 = copy(l)
|
l2 = copy(l)
|
||||||
print l == l2
|
print l == l2
|
||||||
print l
|
print l
|
||||||
print l2
|
print l2
|
||||||
l2 = deepcopy(l)
|
l2 = deepcopy(l)
|
||||||
print l == l2
|
print l == l2
|
||||||
print l
|
print l
|
||||||
print l2
|
print l2
|
||||||
l.append({l[1]: l, 'xyz': l[2]})
|
l.append({l[1]: l, 'xyz': l[2]})
|
||||||
l3 = copy(l)
|
l3 = copy(l)
|
||||||
import repr
|
import repr
|
||||||
print map(repr.repr, l)
|
print map(repr.repr, l)
|
||||||
print map(repr.repr, l1)
|
print map(repr.repr, l1)
|
||||||
print map(repr.repr, l2)
|
print map(repr.repr, l2)
|
||||||
print map(repr.repr, l3)
|
print map(repr.repr, l3)
|
||||||
l3 = deepcopy(l)
|
l3 = deepcopy(l)
|
||||||
import repr
|
import repr
|
||||||
print map(repr.repr, l)
|
print map(repr.repr, l)
|
||||||
print map(repr.repr, l1)
|
print map(repr.repr, l1)
|
||||||
print map(repr.repr, l2)
|
print map(repr.repr, l2)
|
||||||
print map(repr.repr, l3)
|
print map(repr.repr, l3)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
_test()
|
_test()
|
||||||
|
|
318
Lib/dis.py
318
Lib/dis.py
|
@ -5,115 +5,115 @@ import string
|
||||||
import types
|
import types
|
||||||
|
|
||||||
def dis(x=None):
|
def dis(x=None):
|
||||||
"""Disassemble classes, methods, functions, or code.
|
"""Disassemble classes, methods, functions, or code.
|
||||||
|
|
||||||
With no argument, disassemble the last traceback.
|
With no argument, disassemble the last traceback.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not x:
|
if not x:
|
||||||
distb()
|
distb()
|
||||||
return
|
return
|
||||||
if type(x) is types.InstanceType:
|
if type(x) is types.InstanceType:
|
||||||
x = x.__class__
|
x = x.__class__
|
||||||
if hasattr(x, '__dict__'):
|
if hasattr(x, '__dict__'):
|
||||||
items = x.__dict__.items()
|
items = x.__dict__.items()
|
||||||
items.sort()
|
items.sort()
|
||||||
for name, x1 in items:
|
for name, x1 in items:
|
||||||
if type(x1) in (types.MethodType,
|
if type(x1) in (types.MethodType,
|
||||||
types.FunctionType,
|
types.FunctionType,
|
||||||
types.CodeType):
|
types.CodeType):
|
||||||
print "Disassembly of %s:" % name
|
print "Disassembly of %s:" % name
|
||||||
try:
|
try:
|
||||||
dis(x1)
|
dis(x1)
|
||||||
except TypeError, msg:
|
except TypeError, msg:
|
||||||
print "Sorry:", msg
|
print "Sorry:", msg
|
||||||
print
|
print
|
||||||
else:
|
else:
|
||||||
if hasattr(x, 'im_func'):
|
if hasattr(x, 'im_func'):
|
||||||
x = x.im_func
|
x = x.im_func
|
||||||
if hasattr(x, 'func_code'):
|
if hasattr(x, 'func_code'):
|
||||||
x = x.func_code
|
x = x.func_code
|
||||||
if hasattr(x, 'co_code'):
|
if hasattr(x, 'co_code'):
|
||||||
disassemble(x)
|
disassemble(x)
|
||||||
else:
|
else:
|
||||||
raise TypeError, \
|
raise TypeError, \
|
||||||
"don't know how to disassemble %s objects" % \
|
"don't know how to disassemble %s objects" % \
|
||||||
type(x).__name__
|
type(x).__name__
|
||||||
|
|
||||||
def distb(tb=None):
|
def distb(tb=None):
|
||||||
"""Disassemble a traceback (default: last traceback)."""
|
"""Disassemble a traceback (default: last traceback)."""
|
||||||
if not tb:
|
if not tb:
|
||||||
try:
|
try:
|
||||||
tb = sys.last_traceback
|
tb = sys.last_traceback
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
raise RuntimeError, "no last traceback to disassemble"
|
raise RuntimeError, "no last traceback to disassemble"
|
||||||
while tb.tb_next: tb = tb.tb_next
|
while tb.tb_next: tb = tb.tb_next
|
||||||
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
|
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
|
||||||
|
|
||||||
def disassemble(co, lasti=-1):
|
def disassemble(co, lasti=-1):
|
||||||
"""Disassemble a code object."""
|
"""Disassemble a code object."""
|
||||||
code = co.co_code
|
code = co.co_code
|
||||||
labels = findlabels(code)
|
labels = findlabels(code)
|
||||||
n = len(code)
|
n = len(code)
|
||||||
i = 0
|
i = 0
|
||||||
extended_arg = 0
|
extended_arg = 0
|
||||||
while i < n:
|
while i < n:
|
||||||
c = code[i]
|
c = code[i]
|
||||||
op = ord(c)
|
op = ord(c)
|
||||||
if op == SET_LINENO and i > 0: print # Extra blank line
|
if op == SET_LINENO and i > 0: print # Extra blank line
|
||||||
if i == lasti: print '-->',
|
if i == lasti: print '-->',
|
||||||
else: print ' ',
|
else: print ' ',
|
||||||
if i in labels: print '>>',
|
if i in labels: print '>>',
|
||||||
else: print ' ',
|
else: print ' ',
|
||||||
print string.rjust(`i`, 4),
|
print string.rjust(`i`, 4),
|
||||||
print string.ljust(opname[op], 20),
|
print string.ljust(opname[op], 20),
|
||||||
i = i+1
|
i = i+1
|
||||||
if op >= HAVE_ARGUMENT:
|
if op >= HAVE_ARGUMENT:
|
||||||
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
|
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
|
||||||
extended_arg = 0
|
extended_arg = 0
|
||||||
i = i+2
|
i = i+2
|
||||||
if op == EXTENDED_ARG:
|
if op == EXTENDED_ARG:
|
||||||
extended_arg = oparg*65536L
|
extended_arg = oparg*65536L
|
||||||
print string.rjust(`oparg`, 5),
|
print string.rjust(`oparg`, 5),
|
||||||
if op in hasconst:
|
if op in hasconst:
|
||||||
print '(' + `co.co_consts[oparg]` + ')',
|
print '(' + `co.co_consts[oparg]` + ')',
|
||||||
elif op in hasname:
|
elif op in hasname:
|
||||||
print '(' + co.co_names[oparg] + ')',
|
print '(' + co.co_names[oparg] + ')',
|
||||||
elif op in hasjrel:
|
elif op in hasjrel:
|
||||||
print '(to ' + `i + oparg` + ')',
|
print '(to ' + `i + oparg` + ')',
|
||||||
elif op in haslocal:
|
elif op in haslocal:
|
||||||
print '(' + co.co_varnames[oparg] + ')',
|
print '(' + co.co_varnames[oparg] + ')',
|
||||||
elif op in hascompare:
|
elif op in hascompare:
|
||||||
print '(' + cmp_op[oparg] + ')',
|
print '(' + cmp_op[oparg] + ')',
|
||||||
print
|
print
|
||||||
|
|
||||||
disco = disassemble # XXX For backwards compatibility
|
disco = disassemble # XXX For backwards compatibility
|
||||||
|
|
||||||
def findlabels(code):
|
def findlabels(code):
|
||||||
"""Detect all offsets in a byte code which are jump targets.
|
"""Detect all offsets in a byte code which are jump targets.
|
||||||
|
|
||||||
Return the list of offsets.
|
Return the list of offsets.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
labels = []
|
labels = []
|
||||||
n = len(code)
|
n = len(code)
|
||||||
i = 0
|
i = 0
|
||||||
while i < n:
|
while i < n:
|
||||||
c = code[i]
|
c = code[i]
|
||||||
op = ord(c)
|
op = ord(c)
|
||||||
i = i+1
|
i = i+1
|
||||||
if op >= HAVE_ARGUMENT:
|
if op >= HAVE_ARGUMENT:
|
||||||
oparg = ord(code[i]) + ord(code[i+1])*256
|
oparg = ord(code[i]) + ord(code[i+1])*256
|
||||||
i = i+2
|
i = i+2
|
||||||
label = -1
|
label = -1
|
||||||
if op in hasjrel:
|
if op in hasjrel:
|
||||||
label = i+oparg
|
label = i+oparg
|
||||||
elif op in hasjabs:
|
elif op in hasjabs:
|
||||||
label = oparg
|
label = oparg
|
||||||
if label >= 0:
|
if label >= 0:
|
||||||
if label not in labels:
|
if label not in labels:
|
||||||
labels.append(label)
|
labels.append(label)
|
||||||
return labels
|
return labels
|
||||||
|
|
||||||
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
|
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
|
||||||
'is not', 'exception match', 'BAD')
|
'is not', 'exception match', 'BAD')
|
||||||
|
@ -129,19 +129,19 @@ opname = [''] * 256
|
||||||
for op in range(256): opname[op] = '<' + `op` + '>'
|
for op in range(256): opname[op] = '<' + `op` + '>'
|
||||||
|
|
||||||
def def_op(name, op):
|
def def_op(name, op):
|
||||||
opname[op] = name
|
opname[op] = name
|
||||||
|
|
||||||
def name_op(name, op):
|
def name_op(name, op):
|
||||||
opname[op] = name
|
opname[op] = name
|
||||||
hasname.append(op)
|
hasname.append(op)
|
||||||
|
|
||||||
def jrel_op(name, op):
|
def jrel_op(name, op):
|
||||||
opname[op] = name
|
opname[op] = name
|
||||||
hasjrel.append(op)
|
hasjrel.append(op)
|
||||||
|
|
||||||
def jabs_op(name, op):
|
def jabs_op(name, op):
|
||||||
opname[op] = name
|
opname[op] = name
|
||||||
hasjabs.append(op)
|
hasjabs.append(op)
|
||||||
|
|
||||||
# Instruction opcodes for compiled code
|
# Instruction opcodes for compiled code
|
||||||
|
|
||||||
|
@ -219,49 +219,49 @@ def_op('POP_BLOCK', 87)
|
||||||
def_op('END_FINALLY', 88)
|
def_op('END_FINALLY', 88)
|
||||||
def_op('BUILD_CLASS', 89)
|
def_op('BUILD_CLASS', 89)
|
||||||
|
|
||||||
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
|
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
|
||||||
|
|
||||||
name_op('STORE_NAME', 90) # Index in name list
|
name_op('STORE_NAME', 90) # Index in name list
|
||||||
name_op('DELETE_NAME', 91) # ""
|
name_op('DELETE_NAME', 91) # ""
|
||||||
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
|
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
|
||||||
|
|
||||||
name_op('STORE_ATTR', 95) # Index in name list
|
name_op('STORE_ATTR', 95) # Index in name list
|
||||||
name_op('DELETE_ATTR', 96) # ""
|
name_op('DELETE_ATTR', 96) # ""
|
||||||
name_op('STORE_GLOBAL', 97) # ""
|
name_op('STORE_GLOBAL', 97) # ""
|
||||||
name_op('DELETE_GLOBAL', 98) # ""
|
name_op('DELETE_GLOBAL', 98) # ""
|
||||||
def_op('DUP_TOPX', 99) # number of items to duplicate
|
def_op('DUP_TOPX', 99) # number of items to duplicate
|
||||||
def_op('LOAD_CONST', 100) # Index in const list
|
def_op('LOAD_CONST', 100) # Index in const list
|
||||||
hasconst.append(100)
|
hasconst.append(100)
|
||||||
name_op('LOAD_NAME', 101) # Index in name list
|
name_op('LOAD_NAME', 101) # Index in name list
|
||||||
def_op('BUILD_TUPLE', 102) # Number of tuple items
|
def_op('BUILD_TUPLE', 102) # Number of tuple items
|
||||||
def_op('BUILD_LIST', 103) # Number of list items
|
def_op('BUILD_LIST', 103) # Number of list items
|
||||||
def_op('BUILD_MAP', 104) # Always zero for now
|
def_op('BUILD_MAP', 104) # Always zero for now
|
||||||
name_op('LOAD_ATTR', 105) # Index in name list
|
name_op('LOAD_ATTR', 105) # Index in name list
|
||||||
def_op('COMPARE_OP', 106) # Comparison operator
|
def_op('COMPARE_OP', 106) # Comparison operator
|
||||||
hascompare.append(106)
|
hascompare.append(106)
|
||||||
name_op('IMPORT_NAME', 107) # Index in name list
|
name_op('IMPORT_NAME', 107) # Index in name list
|
||||||
name_op('IMPORT_FROM', 108) # Index in name list
|
name_op('IMPORT_FROM', 108) # Index in name list
|
||||||
|
|
||||||
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
|
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
|
||||||
jrel_op('JUMP_IF_FALSE', 111) # ""
|
jrel_op('JUMP_IF_FALSE', 111) # ""
|
||||||
jrel_op('JUMP_IF_TRUE', 112) # ""
|
jrel_op('JUMP_IF_TRUE', 112) # ""
|
||||||
jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code
|
jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code
|
||||||
jrel_op('FOR_LOOP', 114) # Number of bytes to skip
|
jrel_op('FOR_LOOP', 114) # Number of bytes to skip
|
||||||
|
|
||||||
name_op('LOAD_GLOBAL', 116) # Index in name list
|
name_op('LOAD_GLOBAL', 116) # Index in name list
|
||||||
|
|
||||||
jrel_op('SETUP_LOOP', 120) # Distance to target address
|
jrel_op('SETUP_LOOP', 120) # Distance to target address
|
||||||
jrel_op('SETUP_EXCEPT', 121) # ""
|
jrel_op('SETUP_EXCEPT', 121) # ""
|
||||||
jrel_op('SETUP_FINALLY', 122) # ""
|
jrel_op('SETUP_FINALLY', 122) # ""
|
||||||
|
|
||||||
def_op('LOAD_FAST', 124) # Local variable number
|
def_op('LOAD_FAST', 124) # Local variable number
|
||||||
haslocal.append(124)
|
haslocal.append(124)
|
||||||
def_op('STORE_FAST', 125) # Local variable number
|
def_op('STORE_FAST', 125) # Local variable number
|
||||||
haslocal.append(125)
|
haslocal.append(125)
|
||||||
def_op('DELETE_FAST', 126) # Local variable number
|
def_op('DELETE_FAST', 126) # Local variable number
|
||||||
haslocal.append(126)
|
haslocal.append(126)
|
||||||
|
|
||||||
def_op('SET_LINENO', 127) # Current line number
|
def_op('SET_LINENO', 127) # Current line number
|
||||||
SET_LINENO = 127
|
SET_LINENO = 127
|
||||||
|
|
||||||
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
|
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
|
||||||
|
@ -273,31 +273,31 @@ def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
|
||||||
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
|
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
|
||||||
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
|
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
|
||||||
|
|
||||||
def_op('EXTENDED_ARG', 143)
|
def_op('EXTENDED_ARG', 143)
|
||||||
EXTENDED_ARG = 143
|
EXTENDED_ARG = 143
|
||||||
|
|
||||||
def _test():
|
def _test():
|
||||||
"""Simple test program to disassemble a file."""
|
"""Simple test program to disassemble a file."""
|
||||||
if sys.argv[1:]:
|
if sys.argv[1:]:
|
||||||
if sys.argv[2:]:
|
if sys.argv[2:]:
|
||||||
sys.stderr.write("usage: python dis.py [-|file]\n")
|
sys.stderr.write("usage: python dis.py [-|file]\n")
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
fn = sys.argv[1]
|
fn = sys.argv[1]
|
||||||
if not fn or fn == "-":
|
if not fn or fn == "-":
|
||||||
fn = None
|
fn = None
|
||||||
else:
|
else:
|
||||||
fn = None
|
fn = None
|
||||||
if not fn:
|
if not fn:
|
||||||
f = sys.stdin
|
f = sys.stdin
|
||||||
else:
|
else:
|
||||||
f = open(fn)
|
f = open(fn)
|
||||||
source = f.read()
|
source = f.read()
|
||||||
if fn:
|
if fn:
|
||||||
f.close()
|
f.close()
|
||||||
else:
|
else:
|
||||||
fn = "<stdin>"
|
fn = "<stdin>"
|
||||||
code = compile(source, fn, "exec")
|
code = compile(source, fn, "exec")
|
||||||
dis(code)
|
dis(code)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
_test()
|
_test()
|
||||||
|
|
|
@ -10,8 +10,8 @@ def normcase(s):
|
||||||
backslashes.
|
backslashes.
|
||||||
Other normalizations (such as optimizing '../' away) are not allowed
|
Other normalizations (such as optimizing '../' away) are not allowed
|
||||||
(this is done by normpath).
|
(this is done by normpath).
|
||||||
Previously, this version mapped invalid consecutive characters to a
|
Previously, this version mapped invalid consecutive characters to a
|
||||||
single '_', but this has been removed. This functionality should
|
single '_', but this has been removed. This functionality should
|
||||||
possibly be added as a new function."""
|
possibly be added as a new function."""
|
||||||
|
|
||||||
return s.replace("/", "\\").lower()
|
return s.replace("/", "\\").lower()
|
||||||
|
|
212
Lib/dumbdbm.py
212
Lib/dumbdbm.py
|
@ -28,117 +28,117 @@ _open = __builtin__.open
|
||||||
|
|
||||||
_BLOCKSIZE = 512
|
_BLOCKSIZE = 512
|
||||||
|
|
||||||
error = IOError # For anydbm
|
error = IOError # For anydbm
|
||||||
|
|
||||||
class _Database:
|
class _Database:
|
||||||
|
|
||||||
def __init__(self, file):
|
def __init__(self, file):
|
||||||
self._dirfile = file + '.dir'
|
self._dirfile = file + '.dir'
|
||||||
self._datfile = file + '.dat'
|
self._datfile = file + '.dat'
|
||||||
self._bakfile = file + '.bak'
|
self._bakfile = file + '.bak'
|
||||||
# Mod by Jack: create data file if needed
|
# Mod by Jack: create data file if needed
|
||||||
try:
|
try:
|
||||||
f = _open(self._datfile, 'r')
|
f = _open(self._datfile, 'r')
|
||||||
except IOError:
|
except IOError:
|
||||||
f = _open(self._datfile, 'w')
|
f = _open(self._datfile, 'w')
|
||||||
f.close()
|
f.close()
|
||||||
self._update()
|
self._update()
|
||||||
|
|
||||||
def _update(self):
|
|
||||||
self._index = {}
|
|
||||||
try:
|
|
||||||
f = _open(self._dirfile)
|
|
||||||
except IOError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
while 1:
|
|
||||||
line = f.readline().rstrip()
|
|
||||||
if not line: break
|
|
||||||
key, (pos, siz) = eval(line)
|
|
||||||
self._index[key] = (pos, siz)
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
def _commit(self):
|
def _update(self):
|
||||||
try: _os.unlink(self._bakfile)
|
self._index = {}
|
||||||
except _os.error: pass
|
try:
|
||||||
try: _os.rename(self._dirfile, self._bakfile)
|
f = _open(self._dirfile)
|
||||||
except _os.error: pass
|
except IOError:
|
||||||
f = _open(self._dirfile, 'w')
|
pass
|
||||||
for key, (pos, siz) in self._index.items():
|
else:
|
||||||
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
while 1:
|
||||||
f.close()
|
line = f.readline().rstrip()
|
||||||
|
if not line: break
|
||||||
def __getitem__(self, key):
|
key, (pos, siz) = eval(line)
|
||||||
pos, siz = self._index[key] # may raise KeyError
|
self._index[key] = (pos, siz)
|
||||||
f = _open(self._datfile, 'rb')
|
f.close()
|
||||||
f.seek(pos)
|
|
||||||
dat = f.read(siz)
|
def _commit(self):
|
||||||
f.close()
|
try: _os.unlink(self._bakfile)
|
||||||
return dat
|
except _os.error: pass
|
||||||
|
try: _os.rename(self._dirfile, self._bakfile)
|
||||||
def _addval(self, val):
|
except _os.error: pass
|
||||||
f = _open(self._datfile, 'rb+')
|
f = _open(self._dirfile, 'w')
|
||||||
f.seek(0, 2)
|
for key, (pos, siz) in self._index.items():
|
||||||
pos = int(f.tell())
|
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
pos, siz = self._index[key] # may raise KeyError
|
||||||
|
f = _open(self._datfile, 'rb')
|
||||||
|
f.seek(pos)
|
||||||
|
dat = f.read(siz)
|
||||||
|
f.close()
|
||||||
|
return dat
|
||||||
|
|
||||||
|
def _addval(self, val):
|
||||||
|
f = _open(self._datfile, 'rb+')
|
||||||
|
f.seek(0, 2)
|
||||||
|
pos = int(f.tell())
|
||||||
## Does not work under MW compiler
|
## Does not work under MW compiler
|
||||||
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
||||||
## f.seek(pos)
|
## f.seek(pos)
|
||||||
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
||||||
f.write('\0'*(npos-pos))
|
f.write('\0'*(npos-pos))
|
||||||
pos = npos
|
pos = npos
|
||||||
|
|
||||||
f.write(val)
|
f.write(val)
|
||||||
f.close()
|
f.close()
|
||||||
return (pos, len(val))
|
return (pos, len(val))
|
||||||
|
|
||||||
def _setval(self, pos, val):
|
def _setval(self, pos, val):
|
||||||
f = _open(self._datfile, 'rb+')
|
f = _open(self._datfile, 'rb+')
|
||||||
f.seek(pos)
|
f.seek(pos)
|
||||||
f.write(val)
|
f.write(val)
|
||||||
f.close()
|
f.close()
|
||||||
return (pos, len(val))
|
return (pos, len(val))
|
||||||
|
|
||||||
def _addkey(self, key, (pos, siz)):
|
def _addkey(self, key, (pos, siz)):
|
||||||
self._index[key] = (pos, siz)
|
self._index[key] = (pos, siz)
|
||||||
f = _open(self._dirfile, 'a')
|
f = _open(self._dirfile, 'a')
|
||||||
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
def __setitem__(self, key, val):
|
def __setitem__(self, key, val):
|
||||||
if not type(key) == type('') == type(val):
|
if not type(key) == type('') == type(val):
|
||||||
raise TypeError, "keys and values must be strings"
|
raise TypeError, "keys and values must be strings"
|
||||||
if not self._index.has_key(key):
|
if not self._index.has_key(key):
|
||||||
(pos, siz) = self._addval(val)
|
(pos, siz) = self._addval(val)
|
||||||
self._addkey(key, (pos, siz))
|
self._addkey(key, (pos, siz))
|
||||||
else:
|
else:
|
||||||
pos, siz = self._index[key]
|
pos, siz = self._index[key]
|
||||||
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
|
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
|
||||||
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
|
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
|
||||||
if newblocks <= oldblocks:
|
if newblocks <= oldblocks:
|
||||||
pos, siz = self._setval(pos, val)
|
pos, siz = self._setval(pos, val)
|
||||||
self._index[key] = pos, siz
|
self._index[key] = pos, siz
|
||||||
else:
|
else:
|
||||||
pos, siz = self._addval(val)
|
pos, siz = self._addval(val)
|
||||||
self._index[key] = pos, siz
|
self._index[key] = pos, siz
|
||||||
|
|
||||||
def __delitem__(self, key):
|
def __delitem__(self, key):
|
||||||
del self._index[key]
|
del self._index[key]
|
||||||
self._commit()
|
self._commit()
|
||||||
|
|
||||||
def keys(self):
|
def keys(self):
|
||||||
return self._index.keys()
|
return self._index.keys()
|
||||||
|
|
||||||
def has_key(self, key):
|
def has_key(self, key):
|
||||||
return self._index.has_key(key)
|
return self._index.has_key(key)
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
return len(self._index)
|
return len(self._index)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self._index = None
|
self._index = None
|
||||||
self._datfile = self._dirfile = self._bakfile = None
|
self._datfile = self._dirfile = self._bakfile = None
|
||||||
|
|
||||||
|
|
||||||
def open(file, flag = None, mode = None):
|
def open(file, flag = None, mode = None):
|
||||||
# flag, mode arguments are currently ignored
|
# flag, mode arguments are currently ignored
|
||||||
return _Database(file)
|
return _Database(file)
|
||||||
|
|
|
@ -291,9 +291,9 @@ def cmpfiles(a, b, common, shallow=1, use_statcache=0):
|
||||||
|
|
||||||
# Compare two files.
|
# Compare two files.
|
||||||
# Return:
|
# Return:
|
||||||
# 0 for equal
|
# 0 for equal
|
||||||
# 1 for different
|
# 1 for different
|
||||||
# 2 for funny cases (can't stat, etc.)
|
# 2 for funny cases (can't stat, etc.)
|
||||||
#
|
#
|
||||||
def _cmp(a, b, sh, st):
|
def _cmp(a, b, sh, st):
|
||||||
try:
|
try:
|
||||||
|
|
136
Lib/fnmatch.py
136
Lib/fnmatch.py
|
@ -15,75 +15,75 @@ import re
|
||||||
_cache = {}
|
_cache = {}
|
||||||
|
|
||||||
def fnmatch(name, pat):
|
def fnmatch(name, pat):
|
||||||
"""Test whether FILENAME matches PATTERN.
|
"""Test whether FILENAME matches PATTERN.
|
||||||
|
|
||||||
Patterns are Unix shell style:
|
Patterns are Unix shell style:
|
||||||
|
|
||||||
* matches everything
|
* matches everything
|
||||||
? matches any single character
|
? matches any single character
|
||||||
[seq] matches any character in seq
|
[seq] matches any character in seq
|
||||||
[!seq] matches any char not in seq
|
[!seq] matches any char not in seq
|
||||||
|
|
||||||
An initial period in FILENAME is not special.
|
An initial period in FILENAME is not special.
|
||||||
Both FILENAME and PATTERN are first case-normalized
|
Both FILENAME and PATTERN are first case-normalized
|
||||||
if the operating system requires it.
|
if the operating system requires it.
|
||||||
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
|
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
name = os.path.normcase(name)
|
name = os.path.normcase(name)
|
||||||
pat = os.path.normcase(pat)
|
pat = os.path.normcase(pat)
|
||||||
return fnmatchcase(name, pat)
|
return fnmatchcase(name, pat)
|
||||||
|
|
||||||
def fnmatchcase(name, pat):
|
def fnmatchcase(name, pat):
|
||||||
"""Test whether FILENAME matches PATTERN, including case.
|
"""Test whether FILENAME matches PATTERN, including case.
|
||||||
|
|
||||||
This is a version of fnmatch() which doesn't case-normalize
|
This is a version of fnmatch() which doesn't case-normalize
|
||||||
its arguments.
|
its arguments.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not _cache.has_key(pat):
|
if not _cache.has_key(pat):
|
||||||
res = translate(pat)
|
res = translate(pat)
|
||||||
_cache[pat] = re.compile(res)
|
_cache[pat] = re.compile(res)
|
||||||
return _cache[pat].match(name) is not None
|
return _cache[pat].match(name) is not None
|
||||||
|
|
||||||
def translate(pat):
|
def translate(pat):
|
||||||
"""Translate a shell PATTERN to a regular expression.
|
"""Translate a shell PATTERN to a regular expression.
|
||||||
|
|
||||||
There is no way to quote meta-characters.
|
There is no way to quote meta-characters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
i, n = 0, len(pat)
|
i, n = 0, len(pat)
|
||||||
res = ''
|
res = ''
|
||||||
while i < n:
|
while i < n:
|
||||||
c = pat[i]
|
c = pat[i]
|
||||||
i = i+1
|
i = i+1
|
||||||
if c == '*':
|
if c == '*':
|
||||||
res = res + '.*'
|
res = res + '.*'
|
||||||
elif c == '?':
|
elif c == '?':
|
||||||
res = res + '.'
|
res = res + '.'
|
||||||
elif c == '[':
|
elif c == '[':
|
||||||
j = i
|
j = i
|
||||||
if j < n and pat[j] == '!':
|
if j < n and pat[j] == '!':
|
||||||
j = j+1
|
j = j+1
|
||||||
if j < n and pat[j] == ']':
|
if j < n and pat[j] == ']':
|
||||||
j = j+1
|
j = j+1
|
||||||
while j < n and pat[j] != ']':
|
while j < n and pat[j] != ']':
|
||||||
j = j+1
|
j = j+1
|
||||||
if j >= n:
|
if j >= n:
|
||||||
res = res + '\\['
|
res = res + '\\['
|
||||||
else:
|
else:
|
||||||
stuff = pat[i:j]
|
stuff = pat[i:j]
|
||||||
i = j+1
|
i = j+1
|
||||||
if stuff[0] == '!':
|
if stuff[0] == '!':
|
||||||
stuff = '[^' + stuff[1:] + ']'
|
stuff = '[^' + stuff[1:] + ']'
|
||||||
elif stuff == '^'*len(stuff):
|
elif stuff == '^'*len(stuff):
|
||||||
stuff = '\\^'
|
stuff = '\\^'
|
||||||
else:
|
else:
|
||||||
while stuff[0] == '^':
|
while stuff[0] == '^':
|
||||||
stuff = stuff[1:] + stuff[0]
|
stuff = stuff[1:] + stuff[0]
|
||||||
stuff = '[' + stuff + ']'
|
stuff = '[' + stuff + ']'
|
||||||
res = res + stuff
|
res = res + stuff
|
||||||
else:
|
else:
|
||||||
res = res + re.escape(c)
|
res = res + re.escape(c)
|
||||||
return res + "$"
|
return res + "$"
|
||||||
|
|
|
@ -9,13 +9,13 @@ controlled via formatter objects are horizontal alignment, font, and left
|
||||||
margin indentations. A mechanism is provided which supports providing
|
margin indentations. A mechanism is provided which supports providing
|
||||||
arbitrary, non-exclusive style settings to a writer as well. Additional
|
arbitrary, non-exclusive style settings to a writer as well. Additional
|
||||||
interfaces facilitate formatting events which are not reversible, such as
|
interfaces facilitate formatting events which are not reversible, such as
|
||||||
paragraph separation.
|
paragraph separation.
|
||||||
|
|
||||||
Writer objects encapsulate device interfaces. Abstract devices, such as
|
Writer objects encapsulate device interfaces. Abstract devices, such as
|
||||||
file formats, are supported as well as physical devices. The provided
|
file formats, are supported as well as physical devices. The provided
|
||||||
implementations all work with abstract devices. The interface makes
|
implementations all work with abstract devices. The interface makes
|
||||||
available mechanisms for setting the properties which formatter objects
|
available mechanisms for setting the properties which formatter objects
|
||||||
manage and inserting data into the output.
|
manage and inserting data into the output.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import string
|
import string
|
||||||
|
|
|
@ -138,4 +138,3 @@ def test():
|
||||||
print x, fix(x, digs), sci(x, digs)
|
print x, fix(x, digs), sci(x, digs)
|
||||||
except (EOFError, KeyboardInterrupt):
|
except (EOFError, KeyboardInterrupt):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
1162
Lib/ftplib.py
1162
Lib/ftplib.py
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue