mirror of
https://github.com/python/cpython.git
synced 2025-10-09 16:34:44 +00:00
Issue #22831: Use "with" to avoid possible fd leaks.
This commit is contained in:
parent
ae2d667ae8
commit
46ba6c8563
10 changed files with 107 additions and 120 deletions
|
@ -229,14 +229,13 @@ def binhex(inp, out):
|
||||||
finfo = getfileinfo(inp)
|
finfo = getfileinfo(inp)
|
||||||
ofp = BinHex(finfo, out)
|
ofp = BinHex(finfo, out)
|
||||||
|
|
||||||
ifp = io.open(inp, 'rb')
|
with io.open(inp, 'rb') as ifp:
|
||||||
# XXXX Do textfile translation on non-mac systems
|
# XXXX Do textfile translation on non-mac systems
|
||||||
while True:
|
while True:
|
||||||
d = ifp.read(128000)
|
d = ifp.read(128000)
|
||||||
if not d: break
|
if not d: break
|
||||||
ofp.write(d)
|
ofp.write(d)
|
||||||
ofp.close_data()
|
ofp.close_data()
|
||||||
ifp.close()
|
|
||||||
|
|
||||||
ifp = openrsrc(inp, 'rb')
|
ifp = openrsrc(inp, 'rb')
|
||||||
while True:
|
while True:
|
||||||
|
@ -449,13 +448,12 @@ def hexbin(inp, out):
|
||||||
if not out:
|
if not out:
|
||||||
out = ifp.FName
|
out = ifp.FName
|
||||||
|
|
||||||
ofp = io.open(out, 'wb')
|
with io.open(out, 'wb') as ofp:
|
||||||
# XXXX Do translation on non-mac systems
|
# XXXX Do translation on non-mac systems
|
||||||
while True:
|
while True:
|
||||||
d = ifp.read(128000)
|
d = ifp.read(128000)
|
||||||
if not d: break
|
if not d: break
|
||||||
ofp.write(d)
|
ofp.write(d)
|
||||||
ofp.close()
|
|
||||||
ifp.close_data()
|
ifp.close_data()
|
||||||
|
|
||||||
d = ifp.read_rsrc(128000)
|
d = ifp.read_rsrc(128000)
|
||||||
|
|
|
@ -294,9 +294,8 @@ class Hook:
|
||||||
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
|
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
file = os.fdopen(fd, 'w')
|
with os.fdopen(fd, 'w') as file:
|
||||||
file.write(doc)
|
file.write(doc)
|
||||||
file.close()
|
|
||||||
msg = '%s contains the description of this error.' % path
|
msg = '%s contains the description of this error.' % path
|
||||||
except:
|
except:
|
||||||
msg = 'Tried to save traceback to %s, but failed.' % path
|
msg = 'Tried to save traceback to %s, but failed.' % path
|
||||||
|
|
|
@ -153,9 +153,9 @@ def whichdb(filename):
|
||||||
except OSError:
|
except OSError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Read the start of the file -- the magic number
|
with f:
|
||||||
s16 = f.read(16)
|
# Read the start of the file -- the magic number
|
||||||
f.close()
|
s16 = f.read(16)
|
||||||
s = s16[0:4]
|
s = s16[0:4]
|
||||||
|
|
||||||
# Return "" if not at least 4 bytes
|
# Return "" if not at least 4 bytes
|
||||||
|
|
|
@ -1999,7 +1999,6 @@ class MozillaCookieJar(FileCookieJar):
|
||||||
|
|
||||||
magic = f.readline()
|
magic = f.readline()
|
||||||
if not self.magic_re.search(magic):
|
if not self.magic_re.search(magic):
|
||||||
f.close()
|
|
||||||
raise LoadError(
|
raise LoadError(
|
||||||
"%r does not look like a Netscape format cookies file" %
|
"%r does not look like a Netscape format cookies file" %
|
||||||
filename)
|
filename)
|
||||||
|
|
|
@ -163,40 +163,39 @@ def libc_ver(executable=sys.executable, lib='', version='',
|
||||||
# here to work around problems with Cygwin not being
|
# here to work around problems with Cygwin not being
|
||||||
# able to open symlinks for reading
|
# able to open symlinks for reading
|
||||||
executable = os.path.realpath(executable)
|
executable = os.path.realpath(executable)
|
||||||
f = open(executable, 'rb')
|
with open(executable, 'rb') as f:
|
||||||
binary = f.read(chunksize)
|
binary = f.read(chunksize)
|
||||||
pos = 0
|
pos = 0
|
||||||
while 1:
|
while 1:
|
||||||
if b'libc' in binary or b'GLIBC' in binary:
|
if b'libc' in binary or b'GLIBC' in binary:
|
||||||
m = _libc_search.search(binary, pos)
|
m = _libc_search.search(binary, pos)
|
||||||
else:
|
else:
|
||||||
m = None
|
m = None
|
||||||
if not m:
|
if not m:
|
||||||
binary = f.read(chunksize)
|
binary = f.read(chunksize)
|
||||||
if not binary:
|
if not binary:
|
||||||
break
|
break
|
||||||
pos = 0
|
pos = 0
|
||||||
continue
|
continue
|
||||||
libcinit, glibc, glibcversion, so, threads, soversion = [
|
libcinit, glibc, glibcversion, so, threads, soversion = [
|
||||||
s.decode('latin1') if s is not None else s
|
s.decode('latin1') if s is not None else s
|
||||||
for s in m.groups()]
|
for s in m.groups()]
|
||||||
if libcinit and not lib:
|
if libcinit and not lib:
|
||||||
lib = 'libc'
|
|
||||||
elif glibc:
|
|
||||||
if lib != 'glibc':
|
|
||||||
lib = 'glibc'
|
|
||||||
version = glibcversion
|
|
||||||
elif glibcversion > version:
|
|
||||||
version = glibcversion
|
|
||||||
elif so:
|
|
||||||
if lib != 'glibc':
|
|
||||||
lib = 'libc'
|
lib = 'libc'
|
||||||
if soversion and soversion > version:
|
elif glibc:
|
||||||
version = soversion
|
if lib != 'glibc':
|
||||||
if threads and version[-len(threads):] != threads:
|
lib = 'glibc'
|
||||||
version = version + threads
|
version = glibcversion
|
||||||
pos = m.end()
|
elif glibcversion > version:
|
||||||
f.close()
|
version = glibcversion
|
||||||
|
elif so:
|
||||||
|
if lib != 'glibc':
|
||||||
|
lib = 'libc'
|
||||||
|
if soversion and soversion > version:
|
||||||
|
version = soversion
|
||||||
|
if threads and version[-len(threads):] != threads:
|
||||||
|
version = version + threads
|
||||||
|
pos = m.end()
|
||||||
return lib, version
|
return lib, version
|
||||||
|
|
||||||
def _dist_try_harder(distname, version, id):
|
def _dist_try_harder(distname, version, id):
|
||||||
|
|
|
@ -1639,9 +1639,8 @@ def writedoc(thing, forceload=0):
|
||||||
try:
|
try:
|
||||||
object, name = resolve(thing, forceload)
|
object, name = resolve(thing, forceload)
|
||||||
page = html.page(describe(object), html.document(object, name))
|
page = html.page(describe(object), html.document(object, name))
|
||||||
file = open(name + '.html', 'w', encoding='utf-8')
|
with open(name + '.html', 'w', encoding='utf-8') as file:
|
||||||
file.write(page)
|
file.write(page)
|
||||||
file.close()
|
|
||||||
print('wrote', name + '.html')
|
print('wrote', name + '.html')
|
||||||
except (ImportError, ErrorDuringImport) as value:
|
except (ImportError, ErrorDuringImport) as value:
|
||||||
print(value)
|
print(value)
|
||||||
|
|
|
@ -182,8 +182,8 @@ if __name__ == "__main__":
|
||||||
items = sorted(d)
|
items = sorted(d)
|
||||||
for item in items:
|
for item in items:
|
||||||
f.write("#define %s_%s %d\n" % (prefix, item, item))
|
f.write("#define %s_%s %d\n" % (prefix, item, item))
|
||||||
f = open("sre_constants.h", "w")
|
with open("sre_constants.h", "w") as f:
|
||||||
f.write("""\
|
f.write("""\
|
||||||
/*
|
/*
|
||||||
* Secret Labs' Regular Expression Engine
|
* Secret Labs' Regular Expression Engine
|
||||||
*
|
*
|
||||||
|
@ -199,25 +199,24 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
""")
|
""")
|
||||||
|
|
||||||
f.write("#define SRE_MAGIC %d\n" % MAGIC)
|
f.write("#define SRE_MAGIC %d\n" % MAGIC)
|
||||||
|
|
||||||
dump(f, OPCODES, "SRE_OP")
|
dump(f, OPCODES, "SRE_OP")
|
||||||
dump(f, ATCODES, "SRE")
|
dump(f, ATCODES, "SRE")
|
||||||
dump(f, CHCODES, "SRE")
|
dump(f, CHCODES, "SRE")
|
||||||
|
|
||||||
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
|
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
|
||||||
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
|
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
|
||||||
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
|
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
|
||||||
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
|
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
|
||||||
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
|
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
|
||||||
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
|
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
|
||||||
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
|
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
|
||||||
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
|
f.write("#define SRE_FLAG_DEBUG %d\n" % SRE_FLAG_DEBUG)
|
||||||
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
|
f.write("#define SRE_FLAG_ASCII %d\n" % SRE_FLAG_ASCII)
|
||||||
|
|
||||||
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
|
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
|
||||||
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
|
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
|
||||||
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
|
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
|
||||||
|
|
||||||
f.close()
|
|
||||||
print("done")
|
print("done")
|
||||||
|
|
12
Lib/token.py
12
Lib/token.py
|
@ -97,8 +97,8 @@ def _main():
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
sys.stdout.write("I/O error: %s\n" % str(err))
|
sys.stdout.write("I/O error: %s\n" % str(err))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
lines = fp.read().split("\n")
|
with fp:
|
||||||
fp.close()
|
lines = fp.read().split("\n")
|
||||||
prog = re.compile(
|
prog = re.compile(
|
||||||
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
|
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
|
||||||
re.IGNORECASE)
|
re.IGNORECASE)
|
||||||
|
@ -116,8 +116,8 @@ def _main():
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
sys.stderr.write("I/O error: %s\n" % str(err))
|
sys.stderr.write("I/O error: %s\n" % str(err))
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
format = fp.read().split("\n")
|
with fp:
|
||||||
fp.close()
|
format = fp.read().split("\n")
|
||||||
try:
|
try:
|
||||||
start = format.index("#--start constants--") + 1
|
start = format.index("#--start constants--") + 1
|
||||||
end = format.index("#--end constants--")
|
end = format.index("#--end constants--")
|
||||||
|
@ -133,8 +133,8 @@ def _main():
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
sys.stderr.write("I/O error: %s\n" % str(err))
|
sys.stderr.write("I/O error: %s\n" % str(err))
|
||||||
sys.exit(4)
|
sys.exit(4)
|
||||||
fp.write("\n".join(format))
|
with fp:
|
||||||
fp.close()
|
fp.write("\n".join(format))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
40
Lib/trace.py
40
Lib/trace.py
|
@ -232,8 +232,8 @@ class CoverageResults:
|
||||||
if self.infile:
|
if self.infile:
|
||||||
# Try to merge existing counts file.
|
# Try to merge existing counts file.
|
||||||
try:
|
try:
|
||||||
counts, calledfuncs, callers = \
|
with open(self.infile, 'rb') as f:
|
||||||
pickle.load(open(self.infile, 'rb'))
|
counts, calledfuncs, callers = pickle.load(f)
|
||||||
self.update(self.__class__(counts, calledfuncs, callers))
|
self.update(self.__class__(counts, calledfuncs, callers))
|
||||||
except (OSError, EOFError, ValueError) as err:
|
except (OSError, EOFError, ValueError) as err:
|
||||||
print(("Skipping counts file %r: %s"
|
print(("Skipping counts file %r: %s"
|
||||||
|
@ -361,26 +361,26 @@ class CoverageResults:
|
||||||
|
|
||||||
n_lines = 0
|
n_lines = 0
|
||||||
n_hits = 0
|
n_hits = 0
|
||||||
for lineno, line in enumerate(lines, 1):
|
with outfile:
|
||||||
# do the blank/comment match to try to mark more lines
|
for lineno, line in enumerate(lines, 1):
|
||||||
# (help the reader find stuff that hasn't been covered)
|
# do the blank/comment match to try to mark more lines
|
||||||
if lineno in lines_hit:
|
# (help the reader find stuff that hasn't been covered)
|
||||||
outfile.write("%5d: " % lines_hit[lineno])
|
if lineno in lines_hit:
|
||||||
n_hits += 1
|
outfile.write("%5d: " % lines_hit[lineno])
|
||||||
n_lines += 1
|
n_hits += 1
|
||||||
elif rx_blank.match(line):
|
|
||||||
outfile.write(" ")
|
|
||||||
else:
|
|
||||||
# lines preceded by no marks weren't hit
|
|
||||||
# Highlight them if so indicated, unless the line contains
|
|
||||||
# #pragma: NO COVER
|
|
||||||
if lineno in lnotab and not PRAGMA_NOCOVER in line:
|
|
||||||
outfile.write(">>>>>> ")
|
|
||||||
n_lines += 1
|
n_lines += 1
|
||||||
else:
|
elif rx_blank.match(line):
|
||||||
outfile.write(" ")
|
outfile.write(" ")
|
||||||
outfile.write(line.expandtabs(8))
|
else:
|
||||||
outfile.close()
|
# lines preceded by no marks weren't hit
|
||||||
|
# Highlight them if so indicated, unless the line contains
|
||||||
|
# #pragma: NO COVER
|
||||||
|
if lineno in lnotab and not PRAGMA_NOCOVER in line:
|
||||||
|
outfile.write(">>>>>> ")
|
||||||
|
n_lines += 1
|
||||||
|
else:
|
||||||
|
outfile.write(" ")
|
||||||
|
outfile.write(line.expandtabs(8))
|
||||||
|
|
||||||
return n_hits, n_lines
|
return n_hits, n_lines
|
||||||
|
|
||||||
|
|
|
@ -1010,12 +1010,9 @@ def gzip_encode(data):
|
||||||
if not gzip:
|
if not gzip:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
f = BytesIO()
|
f = BytesIO()
|
||||||
gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1)
|
with gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1) as gzf:
|
||||||
gzf.write(data)
|
gzf.write(data)
|
||||||
gzf.close()
|
return f.getvalue()
|
||||||
encoded = f.getvalue()
|
|
||||||
f.close()
|
|
||||||
return encoded
|
|
||||||
|
|
||||||
##
|
##
|
||||||
# Decode a string using the gzip content encoding such as specified by the
|
# Decode a string using the gzip content encoding such as specified by the
|
||||||
|
@ -1036,17 +1033,14 @@ def gzip_decode(data, max_decode=20971520):
|
||||||
"""
|
"""
|
||||||
if not gzip:
|
if not gzip:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
f = BytesIO(data)
|
with gzip.GzipFile(mode="rb", fileobj=BytesIO(data)) as gzf:
|
||||||
gzf = gzip.GzipFile(mode="rb", fileobj=f)
|
try:
|
||||||
try:
|
if max_decode < 0: # no limit
|
||||||
if max_decode < 0: # no limit
|
decoded = gzf.read()
|
||||||
decoded = gzf.read()
|
else:
|
||||||
else:
|
decoded = gzf.read(max_decode + 1)
|
||||||
decoded = gzf.read(max_decode + 1)
|
except OSError:
|
||||||
except OSError:
|
raise ValueError("invalid data")
|
||||||
raise ValueError("invalid data")
|
|
||||||
f.close()
|
|
||||||
gzf.close()
|
|
||||||
if max_decode >= 0 and len(decoded) > max_decode:
|
if max_decode >= 0 and len(decoded) > max_decode:
|
||||||
raise ValueError("max gzipped payload length exceeded")
|
raise ValueError("max gzipped payload length exceeded")
|
||||||
return decoded
|
return decoded
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue