mirror of
https://github.com/python/cpython.git
synced 2025-07-10 04:45:36 +00:00

Merged revisions 46490-46494,46496,46498,46500,46506,46521,46538,46558,46563-46567,46570-46571,46583,46593,46595-46598,46604,46606,46609-46753 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r46610 | martin.v.loewis | 2006-06-03 09:42:26 +0200 (Sat, 03 Jun 2006) | 2 lines Updated version (win32-icons2.zip) from #1490384. ........ r46612 | andrew.kuchling | 2006-06-03 20:09:41 +0200 (Sat, 03 Jun 2006) | 1 line [Bug #1472084] Fix description of do_tag ........ r46614 | andrew.kuchling | 2006-06-03 20:33:35 +0200 (Sat, 03 Jun 2006) | 1 line [Bug #1475554] Strengthen text to say 'must' instead of 'should' ........ r46616 | andrew.kuchling | 2006-06-03 20:41:28 +0200 (Sat, 03 Jun 2006) | 1 line [Bug #1441864] Clarify description of 'data' argument ........ r46617 | andrew.kuchling | 2006-06-03 20:43:24 +0200 (Sat, 03 Jun 2006) | 1 line Minor rewording ........ r46619 | andrew.kuchling | 2006-06-03 21:02:35 +0200 (Sat, 03 Jun 2006) | 9 lines [Bug #1497414] _self is a reserved word in the WATCOM 10.6 C compiler. Fix by renaming the variable. In a different module, Neal fixed it by renaming _self to self. There's already a variable named 'self' here, so I used selfptr. (I'm committing this on a Mac without Tk, but it's a simple search-and-replace. <crosses fingers>, so I'll watch the buildbots and see what happens.) ........ r46621 | fredrik.lundh | 2006-06-03 23:56:05 +0200 (Sat, 03 Jun 2006) | 5 lines "_self" is a said to be a reserved word in Watcom C 10.6. I'm not sure that's really standard compliant behaviour, but I guess we have to fix that anyway... ........ r46622 | andrew.kuchling | 2006-06-04 00:44:42 +0200 (Sun, 04 Jun 2006) | 1 line Update readme ........ r46623 | andrew.kuchling | 2006-06-04 00:59:23 +0200 (Sun, 04 Jun 2006) | 1 line Drop 0 parameter ........ r46624 | andrew.kuchling | 2006-06-04 00:59:59 +0200 (Sun, 04 Jun 2006) | 1 line Some code tidying; use curses.wrapper ........ r46625 | andrew.kuchling | 2006-06-04 01:02:15 +0200 (Sun, 04 Jun 2006) | 1 line Use True; value returned from main is unused ........ r46626 | andrew.kuchling | 2006-06-04 01:07:21 +0200 (Sun, 04 Jun 2006) | 1 line Use true division, and the True value ........ r46627 | andrew.kuchling | 2006-06-04 01:09:58 +0200 (Sun, 04 Jun 2006) | 1 line Docstring fix; use True ........ r46628 | andrew.kuchling | 2006-06-04 01:15:56 +0200 (Sun, 04 Jun 2006) | 1 line Put code in a main() function; loosen up the spacing to match current code style ........ r46629 | andrew.kuchling | 2006-06-04 01:39:07 +0200 (Sun, 04 Jun 2006) | 1 line Use functions; modernize code ........ r46630 | andrew.kuchling | 2006-06-04 01:43:22 +0200 (Sun, 04 Jun 2006) | 1 line This demo requires Medusa (not just asyncore); remove it ........ r46631 | andrew.kuchling | 2006-06-04 01:46:36 +0200 (Sun, 04 Jun 2006) | 2 lines Remove xmlrpc demo -- it duplicates the SimpleXMLRPCServer module. ........ r46632 | andrew.kuchling | 2006-06-04 01:47:22 +0200 (Sun, 04 Jun 2006) | 1 line Remove xmlrpc/ directory ........ r46633 | andrew.kuchling | 2006-06-04 01:51:21 +0200 (Sun, 04 Jun 2006) | 1 line Remove dangling reference ........ r46634 | andrew.kuchling | 2006-06-04 01:59:36 +0200 (Sun, 04 Jun 2006) | 1 line Add more whitespace; use a better socket name ........ r46635 | tim.peters | 2006-06-04 03:22:53 +0200 (Sun, 04 Jun 2006) | 2 lines Whitespace normalization. ........ r46637 | tim.peters | 2006-06-04 05:26:02 +0200 (Sun, 04 Jun 2006) | 16 lines In a PYMALLOC_DEBUG build obmalloc adds extra debugging info to each allocated block. This was using 4 bytes for each such piece of info regardless of platform. This didn't really matter before (proof: no bug reports, and the debug-build obmalloc would have assert-failed if it was ever asked for a chunk of memory >= 2**32 bytes), since container indices were plain ints. But after the Py_ssize_t changes, it's at least theoretically possible to allocate a list or string whose guts exceed 2**32 bytes, and the PYMALLOC_DEBUG routines would fail then (having only 4 bytes to record the originally requested size). Now we use sizeof(size_t) bytes for each of a PYMALLOC_DEBUG build's extra debugging fields. This won't make any difference on 32-bit boxes, but will add 16 bytes to each allocation in a debug build on a 64-bit box. ........ r46638 | tim.peters | 2006-06-04 05:38:04 +0200 (Sun, 04 Jun 2006) | 4 lines _PyObject_DebugMalloc(): The return value should add 2*sizeof(size_t) now, not 8. This probably accounts for current disasters on the 64-bit buildbot slaves. ........ r46639 | neal.norwitz | 2006-06-04 08:19:31 +0200 (Sun, 04 Jun 2006) | 1 line SF #1499797, Fix for memory leak in WindowsError_str ........ r46640 | andrew.macintyre | 2006-06-04 14:31:09 +0200 (Sun, 04 Jun 2006) | 2 lines Patch #1454481: Make thread stack size runtime tunable. ........ r46641 | andrew.macintyre | 2006-06-04 14:59:59 +0200 (Sun, 04 Jun 2006) | 2 lines clean up function declarations to conform to PEP-7 style. ........ r46642 | martin.blais | 2006-06-04 15:49:49 +0200 (Sun, 04 Jun 2006) | 15 lines Fixes in struct and socket from merge reviews. - Following Guido's comments, renamed * pack_to -> pack_into * recv_buf -> recv_into * recvfrom_buf -> recvfrom_into - Made fixes to _struct.c according to Neal Norwitz comments on the checkins list. - Converted some ints into the appropriate -- I hope -- ssize_t and size_t. ........ r46643 | ronald.oussoren | 2006-06-04 16:05:28 +0200 (Sun, 04 Jun 2006) | 3 lines "Import" LDFLAGS in Mac/OSX/Makefile.in to ensure pythonw gets build with the right compiler flags. ........ r46644 | ronald.oussoren | 2006-06-04 16:24:59 +0200 (Sun, 04 Jun 2006) | 2 lines Drop Mac wrappers for the WASTE library. ........ r46645 | tim.peters | 2006-06-04 17:49:07 +0200 (Sun, 04 Jun 2006) | 3 lines s_methods[]: Stop compiler warnings by casting s_unpack_from to PyCFunction. ........ r46646 | george.yoshida | 2006-06-04 19:04:12 +0200 (Sun, 04 Jun 2006) | 2 lines Remove a redundant word ........ r46647 | george.yoshida | 2006-06-04 19:17:25 +0200 (Sun, 04 Jun 2006) | 2 lines Markup fix ........ r46648 | martin.v.loewis | 2006-06-04 21:36:28 +0200 (Sun, 04 Jun 2006) | 2 lines Patch #1359618: Speed-up charmap encoder. ........ r46649 | georg.brandl | 2006-06-04 23:46:16 +0200 (Sun, 04 Jun 2006) | 3 lines Repair refleaks in unicodeobject. ........ r46650 | georg.brandl | 2006-06-04 23:56:52 +0200 (Sun, 04 Jun 2006) | 4 lines Patch #1346214: correctly optimize away "if 0"-style stmts (thanks to Neal for review) ........ r46651 | georg.brandl | 2006-06-05 00:15:37 +0200 (Mon, 05 Jun 2006) | 2 lines Bug #1500293: fix memory leaks in _subprocess module. ........ r46654 | tim.peters | 2006-06-05 01:43:53 +0200 (Mon, 05 Jun 2006) | 2 lines Whitespace normalization. ........ r46655 | tim.peters | 2006-06-05 01:52:47 +0200 (Mon, 05 Jun 2006) | 16 lines Revert revisions: 46640 Patch #1454481: Make thread stack size runtime tunable. 46647 Markup fix The first is causing many buildbots to fail test runs, and there are multiple causes with seemingly no immediate prospects for repairing them. See python-dev discussion. Note that a branch can (and should) be created for resolving these problems, like svn copy svn+ssh://svn.python.org/python/trunk -r46640 svn+ssh://svn.python.org/python/branches/NEW_BRANCH followed by merging rev 46647 to the new branch. ........ r46656 | andrew.kuchling | 2006-06-05 02:08:09 +0200 (Mon, 05 Jun 2006) | 1 line Mention second encoding speedup ........ r46657 | gregory.p.smith | 2006-06-05 02:31:01 +0200 (Mon, 05 Jun 2006) | 7 lines bugfix: when log_archive was called with the DB_ARCH_REMOVE flag present in BerkeleyDB >= 4.2 it tried to construct a list out of an uninitialized char **log_list. feature: export the DB_ARCH_REMOVE flag by name in the module on BerkeleyDB >= 4.2. ........ r46658 | gregory.p.smith | 2006-06-05 02:33:35 +0200 (Mon, 05 Jun 2006) | 5 lines fix a bug in the previous commit. don't leak empty list on error return and fix the additional rare (out of memory only) bug that it was supposed to fix of not freeing log_list when the python allocator failed. ........ r46660 | tim.peters | 2006-06-05 02:55:26 +0200 (Mon, 05 Jun 2006) | 9 lines "Flat is better than nested." Move the long-winded, multiply-nested -R support out of runtest() and into some module-level helper functions. This makes runtest() and the -R code easier to follow. That in turn allowed seeing some opportunities for code simplification, and made it obvious that reglog.txt never got closed. ........ r46661 | hyeshik.chang | 2006-06-05 02:59:54 +0200 (Mon, 05 Jun 2006) | 3 lines Fix a potentially invalid memory access of CJKCodecs' shift-jis decoder. (found by Neal Norwitz) ........ r46663 | gregory.p.smith | 2006-06-05 03:39:52 +0200 (Mon, 05 Jun 2006) | 3 lines * support DBEnv.log_stat() method on BerkeleyDB >= 4.0 [patch #1494885] ........ r46664 | tim.peters | 2006-06-05 03:43:03 +0200 (Mon, 05 Jun 2006) | 3 lines Remove doctest.testmod's deprecated (in 2.4) `isprivate` argument. A lot of hair went into supporting that! ........ r46665 | tim.peters | 2006-06-05 03:47:24 +0200 (Mon, 05 Jun 2006) | 2 lines Whitespace normalization. ........ r46666 | tim.peters | 2006-06-05 03:48:21 +0200 (Mon, 05 Jun 2006) | 2 lines Make doctest news more accurate. ........ r46667 | gregory.p.smith | 2006-06-05 03:56:15 +0200 (Mon, 05 Jun 2006) | 3 lines * support DBEnv.lsn_reset() method on BerkeleyDB >= 4.4 [patch #1494902] ........ r46668 | gregory.p.smith | 2006-06-05 04:02:25 +0200 (Mon, 05 Jun 2006) | 3 lines mention the just committed bsddb changes ........ r46671 | gregory.p.smith | 2006-06-05 19:38:04 +0200 (Mon, 05 Jun 2006) | 3 lines * add support for DBSequence objects [patch #1466734] ........ r46672 | gregory.p.smith | 2006-06-05 20:20:07 +0200 (Mon, 05 Jun 2006) | 3 lines forgot to add this file in previous commit ........ r46673 | tim.peters | 2006-06-05 20:36:12 +0200 (Mon, 05 Jun 2006) | 2 lines Whitespace normalization. ........ r46674 | tim.peters | 2006-06-05 20:36:54 +0200 (Mon, 05 Jun 2006) | 2 lines Add missing svn:eol-style property to text files. ........ r46675 | gregory.p.smith | 2006-06-05 20:48:21 +0200 (Mon, 05 Jun 2006) | 4 lines * fix DBCursor.pget() bug with keyword argument names when no data= is supplied [SF pybsddb bug #1477863] ........ r46676 | andrew.kuchling | 2006-06-05 21:05:32 +0200 (Mon, 05 Jun 2006) | 1 line Remove use of Trove name, which isn't very helpful to users ........ r46677 | andrew.kuchling | 2006-06-05 21:08:25 +0200 (Mon, 05 Jun 2006) | 1 line [Bug #1470026] Include link to list of classifiers ........ r46679 | tim.peters | 2006-06-05 22:48:49 +0200 (Mon, 05 Jun 2006) | 10 lines Access _struct attributes directly instead of mucking with getattr. string_reverse(): Simplify. assertRaises(): Raise TestFailed on failure. test_unpack_from(), test_pack_into(), test_pack_into_fn(): never use `assert` to test for an expected result (it doesn't test anything when Python is run with -O). ........ r46680 | tim.peters | 2006-06-05 22:49:27 +0200 (Mon, 05 Jun 2006) | 2 lines Add missing svn:eol-style property to text files. ........ r46681 | gregory.p.smith | 2006-06-06 01:38:06 +0200 (Tue, 06 Jun 2006) | 3 lines add depends = ['md5.h'] to the _md5 module extension for correctness sake. ........ r46682 | brett.cannon | 2006-06-06 01:51:55 +0200 (Tue, 06 Jun 2006) | 4 lines Add 3 more bytes to a buffer to cover constants in string and null byte on top of 10 possible digits for an int. Closes bug #1501223. ........ r46684 | gregory.p.smith | 2006-06-06 01:59:37 +0200 (Tue, 06 Jun 2006) | 5 lines - bsddb: the __len__ method of a DB object has been fixed to return correct results. It could previously incorrectly return 0 in some cases. Fixes SF bug 1493322 (pybsddb bug 1184012). ........ r46686 | tim.peters | 2006-06-06 02:25:07 +0200 (Tue, 06 Jun 2006) | 7 lines _PySys_Init(): It's rarely a good idea to size a buffer to the exact maximum size someone guesses is needed. In this case, if we're really worried about extreme integers, then "cp%d" can actually need 14 bytes (2 for "cp" + 1 for \0 at the end + 11 for -(2**31-1)). So reserve 128 bytes instead -- nothing is actually saved by making a stack-local buffer tiny. ........ r46687 | neal.norwitz | 2006-06-06 09:22:08 +0200 (Tue, 06 Jun 2006) | 1 line Remove unused variable (and stop compiler warning) ........ r46688 | neal.norwitz | 2006-06-06 09:23:01 +0200 (Tue, 06 Jun 2006) | 1 line Fix a bunch of parameter strings ........ r46689 | thomas.heller | 2006-06-06 13:34:33 +0200 (Tue, 06 Jun 2006) | 6 lines Convert CFieldObject tp_members to tp_getset, since there is no structmember typecode for Py_ssize_t fields. This should fix some of the errors on the PPC64 debian machine (64-bit, big endian). Assigning to readonly fields now raises AttributeError instead of TypeError, so the testcase has to be changed as well. ........ r46690 | thomas.heller | 2006-06-06 13:54:32 +0200 (Tue, 06 Jun 2006) | 1 line Damn - the sentinel was missing. And fix another silly mistake. ........ r46691 | martin.blais | 2006-06-06 14:46:55 +0200 (Tue, 06 Jun 2006) | 13 lines Normalized a few cases of whitespace in function declarations. Found them using:: find . -name '*.py' | while read i ; do grep 'def[^(]*( ' $i /dev/null ; done find . -name '*.py' | while read i ; do grep ' ):' $i /dev/null ; done (I was doing this all over my own code anyway, because I'd been using spaces in all defs, so I thought I'd make a run on the Python code as well. If you need to do such fixes in your own code, you can use xx-rename or parenregu.el within emacs.) ........ r46693 | thomas.heller | 2006-06-06 17:34:18 +0200 (Tue, 06 Jun 2006) | 1 line Specify argtypes for all test functions. Maybe that helps on strange ;-) architectures ........ r46694 | tim.peters | 2006-06-06 17:50:17 +0200 (Tue, 06 Jun 2006) | 5 lines BSequence_set_range(): Rev 46688 ("Fix a bunch of parameter strings") changed this function's signature seemingly by mistake, which is causing buildbots to fail test_bsddb3. Restored the pre-46688 signature. ........ r46695 | tim.peters | 2006-06-06 17:52:35 +0200 (Tue, 06 Jun 2006) | 4 lines On python-dev Thomas Heller said these were committed by mistake in rev 46693, so reverting this part of rev 46693. ........ r46696 | andrew.kuchling | 2006-06-06 19:10:41 +0200 (Tue, 06 Jun 2006) | 1 line Fix comment typo ........ r46697 | brett.cannon | 2006-06-06 20:08:16 +0200 (Tue, 06 Jun 2006) | 2 lines Fix coding style guide bug. ........ r46698 | thomas.heller | 2006-06-06 20:50:46 +0200 (Tue, 06 Jun 2006) | 2 lines Add a hack so that foreign functions returning float now do work on 64-bit big endian platforms. ........ r46699 | thomas.heller | 2006-06-06 21:25:13 +0200 (Tue, 06 Jun 2006) | 3 lines Use the same big-endian hack as in _ctypes/callproc.c for callback functions. This fixes the callback function tests that return float. ........ r46700 | ronald.oussoren | 2006-06-06 21:50:24 +0200 (Tue, 06 Jun 2006) | 5 lines * Ensure that "make altinstall" works when the tree was configured with --enable-framework * Also for --enable-framework: allow users to use --prefix to specify the location of the compatibility symlinks (such as /usr/local/bin/python) ........ r46701 | ronald.oussoren | 2006-06-06 21:56:00 +0200 (Tue, 06 Jun 2006) | 3 lines A quick hack to ensure the right key-bindings for IDLE on osx: install patched configuration files during a framework install. ........ r46702 | tim.peters | 2006-06-07 03:04:59 +0200 (Wed, 07 Jun 2006) | 4 lines dash_R_cleanup(): Clear filecmp._cache. This accounts for different results across -R runs (at least on Windows) of test_filecmp. ........ r46705 | tim.peters | 2006-06-07 08:57:51 +0200 (Wed, 07 Jun 2006) | 17 lines SF patch 1501987: Remove randomness from test_exceptions, from ?iga Seilnacht (sorry about the name, but Firefox on my box can't display the first character of the name -- the SF "Unix name" is zseil). This appears to cure the oddball intermittent leaks across runs when running test_exceptions under -R. I'm not sure why, but I'm too sleepy to care ;-) The thrust of the SF patch was to remove randomness in the pickle protocol used. I changed the patch to use range(pickle.HIGHEST_PROTOCOL + 1), to try both pickle and cPickle, and randomly mucked with other test lines to put statements on their own lines. Not a bugfix candidate (this is fiddling new-in-2.5 code). ........ r46706 | andrew.kuchling | 2006-06-07 15:55:33 +0200 (Wed, 07 Jun 2006) | 1 line Add an SQLite introduction, taken from the 'What's New' text ........ r46708 | andrew.kuchling | 2006-06-07 19:02:52 +0200 (Wed, 07 Jun 2006) | 1 line Mention other placeholders ........ r46709 | andrew.kuchling | 2006-06-07 19:03:46 +0200 (Wed, 07 Jun 2006) | 1 line Add an item; also, escape % ........ r46710 | andrew.kuchling | 2006-06-07 19:04:01 +0200 (Wed, 07 Jun 2006) | 1 line Mention other placeholders ........ r46716 | ronald.oussoren | 2006-06-07 20:57:44 +0200 (Wed, 07 Jun 2006) | 2 lines Move Mac/OSX/Tools one level up ........ r46717 | ronald.oussoren | 2006-06-07 20:58:01 +0200 (Wed, 07 Jun 2006) | 2 lines Move Mac/OSX/PythonLauncher one level up ........ r46718 | ronald.oussoren | 2006-06-07 20:58:42 +0200 (Wed, 07 Jun 2006) | 2 lines mv Mac/OSX/BuildScript one level up ........ r46719 | ronald.oussoren | 2006-06-07 21:02:03 +0200 (Wed, 07 Jun 2006) | 2 lines Move Mac/OSX/* one level up ........ r46720 | ronald.oussoren | 2006-06-07 21:06:01 +0200 (Wed, 07 Jun 2006) | 2 lines And the last bit: move IDLE one level up and adjust makefiles ........ r46723 | ronald.oussoren | 2006-06-07 21:38:53 +0200 (Wed, 07 Jun 2006) | 4 lines - Patch the correct version of python in the Info.plists at build time, instead of relying on a maintainer to update them before releases. - Remove the now empty Mac/OSX directory ........ r46727 | ronald.oussoren | 2006-06-07 22:18:44 +0200 (Wed, 07 Jun 2006) | 7 lines * If BuildApplet.py is used as an applet it starts with a version of sys.exutable that isn't usuable on an #!-line. That results in generated applets that don't actually work. Work around this problem by resetting sys.executable. * argvemulator.py didn't work on intel macs. This patch fixes this (bug #1491468) ........ r46728 | tim.peters | 2006-06-07 22:40:06 +0200 (Wed, 07 Jun 2006) | 2 lines Whitespace normalization. ........ r46729 | tim.peters | 2006-06-07 22:40:54 +0200 (Wed, 07 Jun 2006) | 2 lines Add missing svn:eol-style property to text files. ........ r46730 | thomas.heller | 2006-06-07 22:43:06 +0200 (Wed, 07 Jun 2006) | 7 lines Fix for foreign functions returning small structures on 64-bit big endian machines. Should fix the remaininf failure in the PPC64 Debian buildbot. Thanks to Matthias Klose for providing access to a machine to debug and test this. ........ r46731 | brett.cannon | 2006-06-07 23:48:17 +0200 (Wed, 07 Jun 2006) | 2 lines Clarify documentation for bf_getcharbuffer. ........ r46735 | neal.norwitz | 2006-06-08 07:12:45 +0200 (Thu, 08 Jun 2006) | 1 line Fix a refleak in recvfrom_into ........ r46736 | gregory.p.smith | 2006-06-08 07:17:08 +0200 (Thu, 08 Jun 2006) | 9 lines - bsddb: the bsddb.dbtables Modify method now raises the proper error and aborts the db transaction safely when a modifier callback fails. Fixes SF python patch/bug #1408584. Also cleans up the bsddb.dbtables docstrings since thats the only documentation that exists for that unadvertised module. (people really should really just use sqlite3) ........ r46737 | gregory.p.smith | 2006-06-08 07:38:11 +0200 (Thu, 08 Jun 2006) | 4 lines * Turn the deadlock situation described in SF bug #775414 into a DBDeadLockError exception. * add the test case for my previous dbtables commit. ........ r46738 | gregory.p.smith | 2006-06-08 07:39:54 +0200 (Thu, 08 Jun 2006) | 2 lines pasted set_lk_detect line in wrong spot in previous commit. fixed. passes tests this time. ........ r46739 | armin.rigo | 2006-06-08 12:56:24 +0200 (Thu, 08 Jun 2006) | 6 lines (arre, arigo) SF bug #1350060 Give a consistent behavior for comparison and hashing of method objects (both user- and built-in methods). Now compares the 'self' recursively. The hash was already asking for the hash of 'self'. ........ r46740 | andrew.kuchling | 2006-06-08 13:56:44 +0200 (Thu, 08 Jun 2006) | 1 line Typo fix ........ r46741 | georg.brandl | 2006-06-08 14:45:01 +0200 (Thu, 08 Jun 2006) | 2 lines Bug #1502750: Fix getargs "i" format to use LONG_MIN and LONG_MAX for bounds checking. ........ r46743 | georg.brandl | 2006-06-08 14:54:13 +0200 (Thu, 08 Jun 2006) | 2 lines Bug #1502728: Correctly link against librt library on HP-UX. ........ r46745 | georg.brandl | 2006-06-08 14:55:47 +0200 (Thu, 08 Jun 2006) | 3 lines Add news for recent bugfix. ........ r46746 | georg.brandl | 2006-06-08 15:31:07 +0200 (Thu, 08 Jun 2006) | 4 lines Argh. "integer" is a very confusing word ;) Actually, checking for INT_MAX and INT_MIN is correct since the format code explicitly handles a C "int". ........ r46748 | nick.coghlan | 2006-06-08 15:54:49 +0200 (Thu, 08 Jun 2006) | 1 line Add functools.update_wrapper() and functools.wraps() as described in PEP 356 ........ r46751 | georg.brandl | 2006-06-08 16:50:21 +0200 (Thu, 08 Jun 2006) | 4 lines Bug #1502805: don't alias file.__exit__ to file.close since the latter can return something that's true. ........ r46752 | georg.brandl | 2006-06-08 16:50:53 +0200 (Thu, 08 Jun 2006) | 3 lines Convert test_file to unittest. ........
2471 lines
62 KiB
C
2471 lines
62 KiB
C
/* File object implementation */
|
|
|
|
#define PY_SSIZE_T_CLEAN
|
|
#include "Python.h"
|
|
#include "structmember.h"
|
|
|
|
#ifndef DONT_HAVE_SYS_TYPES_H
|
|
#include <sys/types.h>
|
|
#endif /* DONT_HAVE_SYS_TYPES_H */
|
|
|
|
#ifdef MS_WINDOWS
|
|
#define fileno _fileno
|
|
/* can simulate truncate with Win32 API functions; see file_truncate */
|
|
#define HAVE_FTRUNCATE
|
|
#define WIN32_LEAN_AND_MEAN
|
|
#include <windows.h>
|
|
#endif
|
|
|
|
#ifdef _MSC_VER
|
|
/* Need GetVersion to see if on NT so safe to use _wfopen */
|
|
#define WIN32_LEAN_AND_MEAN
|
|
#include <windows.h>
|
|
#endif /* _MSC_VER */
|
|
|
|
#if defined(PYOS_OS2) && defined(PYCC_GCC)
|
|
#include <io.h>
|
|
#endif
|
|
|
|
#define BUF(v) PyString_AS_STRING((PyStringObject *)v)
|
|
|
|
#ifndef DONT_HAVE_ERRNO_H
|
|
#include <errno.h>
|
|
#endif
|
|
|
|
#ifdef HAVE_GETC_UNLOCKED
|
|
#define GETC(f) getc_unlocked(f)
|
|
#define FLOCKFILE(f) flockfile(f)
|
|
#define FUNLOCKFILE(f) funlockfile(f)
|
|
#else
|
|
#define GETC(f) getc(f)
|
|
#define FLOCKFILE(f)
|
|
#define FUNLOCKFILE(f)
|
|
#endif
|
|
|
|
/* Bits in f_newlinetypes */
|
|
#define NEWLINE_UNKNOWN 0 /* No newline seen, yet */
|
|
#define NEWLINE_CR 1 /* \r newline seen */
|
|
#define NEWLINE_LF 2 /* \n newline seen */
|
|
#define NEWLINE_CRLF 4 /* \r\n newline seen */
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
FILE *
|
|
PyFile_AsFile(PyObject *f)
|
|
{
|
|
if (f == NULL || !PyFile_Check(f))
|
|
return NULL;
|
|
else
|
|
return ((PyFileObject *)f)->f_fp;
|
|
}
|
|
|
|
PyObject *
|
|
PyFile_Name(PyObject *f)
|
|
{
|
|
if (f == NULL || !PyFile_Check(f))
|
|
return NULL;
|
|
else
|
|
return ((PyFileObject *)f)->f_name;
|
|
}
|
|
|
|
/* On Unix, fopen will succeed for directories.
|
|
In Python, there should be no file objects referring to
|
|
directories, so we need a check. */
|
|
|
|
static PyFileObject*
|
|
dircheck(PyFileObject* f)
|
|
{
|
|
#if defined(HAVE_FSTAT) && defined(S_IFDIR) && defined(EISDIR)
|
|
struct stat buf;
|
|
if (f->f_fp == NULL)
|
|
return f;
|
|
if (fstat(fileno(f->f_fp), &buf) == 0 &&
|
|
S_ISDIR(buf.st_mode)) {
|
|
#ifdef HAVE_STRERROR
|
|
char *msg = strerror(EISDIR);
|
|
#else
|
|
char *msg = "Is a directory";
|
|
#endif
|
|
PyObject *exc = PyObject_CallFunction(PyExc_IOError, "(is)",
|
|
EISDIR, msg);
|
|
PyErr_SetObject(PyExc_IOError, exc);
|
|
Py_XDECREF(exc);
|
|
return NULL;
|
|
}
|
|
#endif
|
|
return f;
|
|
}
|
|
|
|
|
|
static PyObject *
|
|
fill_file_fields(PyFileObject *f, FILE *fp, PyObject *name, char *mode,
|
|
int (*close)(FILE *))
|
|
{
|
|
assert(f != NULL);
|
|
assert(PyFile_Check(f));
|
|
assert(f->f_fp == NULL);
|
|
|
|
Py_DECREF(f->f_name);
|
|
Py_DECREF(f->f_mode);
|
|
Py_DECREF(f->f_encoding);
|
|
|
|
Py_INCREF (name);
|
|
f->f_name = name;
|
|
|
|
f->f_mode = PyString_FromString(mode);
|
|
|
|
f->f_close = close;
|
|
f->f_softspace = 0;
|
|
f->f_binary = strchr(mode,'b') != NULL;
|
|
f->f_buf = NULL;
|
|
f->f_univ_newline = (strchr(mode, 'U') != NULL);
|
|
f->f_newlinetypes = NEWLINE_UNKNOWN;
|
|
f->f_skipnextlf = 0;
|
|
Py_INCREF(Py_None);
|
|
f->f_encoding = Py_None;
|
|
|
|
if (f->f_name == NULL || f->f_mode == NULL)
|
|
return NULL;
|
|
f->f_fp = fp;
|
|
f = dircheck(f);
|
|
return (PyObject *) f;
|
|
}
|
|
|
|
/* check for known incorrect mode strings - problem is, platforms are
|
|
free to accept any mode characters they like and are supposed to
|
|
ignore stuff they don't understand... write or append mode with
|
|
universal newline support is expressly forbidden by PEP 278.
|
|
Additionally, remove the 'U' from the mode string as platforms
|
|
won't know what it is. */
|
|
/* zero return is kewl - one is un-kewl */
|
|
static int
|
|
sanitize_the_mode(char *mode)
|
|
{
|
|
char *upos;
|
|
size_t len = strlen(mode);
|
|
|
|
if (!len) {
|
|
PyErr_SetString(PyExc_ValueError, "empty mode string");
|
|
return 1;
|
|
}
|
|
|
|
upos = strchr(mode, 'U');
|
|
if (upos) {
|
|
memmove(upos, upos+1, len-(upos-mode)); /* incl null char */
|
|
|
|
if (mode[0] == 'w' || mode[0] == 'a') {
|
|
PyErr_Format(PyExc_ValueError, "universal newline "
|
|
"mode can only be used with modes "
|
|
"starting with 'r'");
|
|
return 1;
|
|
}
|
|
|
|
if (mode[0] != 'r') {
|
|
memmove(mode+1, mode, strlen(mode)+1);
|
|
mode[0] = 'r';
|
|
}
|
|
|
|
if (!strchr(mode, 'b')) {
|
|
memmove(mode+2, mode+1, strlen(mode));
|
|
mode[1] = 'b';
|
|
}
|
|
} else if (mode[0] != 'r' && mode[0] != 'w' && mode[0] != 'a') {
|
|
PyErr_Format(PyExc_ValueError, "mode string must begin with "
|
|
"one of 'r', 'w', 'a' or 'U', not '%.200s'", mode);
|
|
return 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static PyObject *
|
|
open_the_file(PyFileObject *f, char *name, char *mode)
|
|
{
|
|
char *newmode;
|
|
assert(f != NULL);
|
|
assert(PyFile_Check(f));
|
|
#ifdef MS_WINDOWS
|
|
/* windows ignores the passed name in order to support Unicode */
|
|
assert(f->f_name != NULL);
|
|
#else
|
|
assert(name != NULL);
|
|
#endif
|
|
assert(mode != NULL);
|
|
assert(f->f_fp == NULL);
|
|
|
|
/* probably need to replace 'U' by 'rb' */
|
|
newmode = PyMem_MALLOC(strlen(mode) + 3);
|
|
if (!newmode) {
|
|
PyErr_NoMemory();
|
|
return NULL;
|
|
}
|
|
strcpy(newmode, mode);
|
|
|
|
if (sanitize_the_mode(newmode)) {
|
|
f = NULL;
|
|
goto cleanup;
|
|
}
|
|
|
|
/* rexec.py can't stop a user from getting the file() constructor --
|
|
all they have to do is get *any* file object f, and then do
|
|
type(f). Here we prevent them from doing damage with it. */
|
|
if (PyEval_GetRestricted()) {
|
|
PyErr_SetString(PyExc_IOError,
|
|
"file() constructor not accessible in restricted mode");
|
|
f = NULL;
|
|
goto cleanup;
|
|
}
|
|
errno = 0;
|
|
|
|
#ifdef MS_WINDOWS
|
|
if (PyUnicode_Check(f->f_name)) {
|
|
PyObject *wmode;
|
|
wmode = PyUnicode_DecodeASCII(newmode, strlen(newmode), NULL);
|
|
if (f->f_name && wmode) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
/* PyUnicode_AS_UNICODE OK without thread
|
|
lock as it is a simple dereference. */
|
|
f->f_fp = _wfopen(PyUnicode_AS_UNICODE(f->f_name),
|
|
PyUnicode_AS_UNICODE(wmode));
|
|
Py_END_ALLOW_THREADS
|
|
}
|
|
Py_XDECREF(wmode);
|
|
}
|
|
#endif
|
|
if (NULL == f->f_fp && NULL != name) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
f->f_fp = fopen(name, newmode);
|
|
Py_END_ALLOW_THREADS
|
|
}
|
|
|
|
if (f->f_fp == NULL) {
|
|
#ifdef _MSC_VER
|
|
/* MSVC 6 (Microsoft) leaves errno at 0 for bad mode strings,
|
|
* across all Windows flavors. When it sets EINVAL varies
|
|
* across Windows flavors, the exact conditions aren't
|
|
* documented, and the answer lies in the OS's implementation
|
|
* of Win32's CreateFile function (whose source is secret).
|
|
* Seems the best we can do is map EINVAL to ENOENT.
|
|
*/
|
|
if (errno == 0) /* bad mode string */
|
|
errno = EINVAL;
|
|
else if (errno == EINVAL) /* unknown, but not a mode string */
|
|
errno = ENOENT;
|
|
#endif
|
|
if (errno == EINVAL)
|
|
PyErr_Format(PyExc_IOError, "invalid mode: %s",
|
|
mode);
|
|
else
|
|
PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, f->f_name);
|
|
f = NULL;
|
|
}
|
|
if (f != NULL)
|
|
f = dircheck(f);
|
|
|
|
cleanup:
|
|
PyMem_FREE(newmode);
|
|
|
|
return (PyObject *)f;
|
|
}
|
|
|
|
PyObject *
|
|
PyFile_FromFile(FILE *fp, char *name, char *mode, int (*close)(FILE *))
|
|
{
|
|
PyFileObject *f = (PyFileObject *)PyFile_Type.tp_new(&PyFile_Type,
|
|
NULL, NULL);
|
|
if (f != NULL) {
|
|
PyObject *o_name = PyString_FromString(name);
|
|
if (fill_file_fields(f, fp, o_name, mode, close) == NULL) {
|
|
Py_DECREF(f);
|
|
f = NULL;
|
|
}
|
|
Py_DECREF(o_name);
|
|
}
|
|
return (PyObject *) f;
|
|
}
|
|
|
|
PyObject *
|
|
PyFile_FromString(char *name, char *mode)
|
|
{
|
|
extern int fclose(FILE *);
|
|
PyFileObject *f;
|
|
|
|
f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, fclose);
|
|
if (f != NULL) {
|
|
if (open_the_file(f, name, mode) == NULL) {
|
|
Py_DECREF(f);
|
|
f = NULL;
|
|
}
|
|
}
|
|
return (PyObject *)f;
|
|
}
|
|
|
|
void
|
|
PyFile_SetBufSize(PyObject *f, int bufsize)
|
|
{
|
|
PyFileObject *file = (PyFileObject *)f;
|
|
if (bufsize >= 0) {
|
|
int type;
|
|
switch (bufsize) {
|
|
case 0:
|
|
type = _IONBF;
|
|
break;
|
|
#ifdef HAVE_SETVBUF
|
|
case 1:
|
|
type = _IOLBF;
|
|
bufsize = BUFSIZ;
|
|
break;
|
|
#endif
|
|
default:
|
|
type = _IOFBF;
|
|
#ifndef HAVE_SETVBUF
|
|
bufsize = BUFSIZ;
|
|
#endif
|
|
break;
|
|
}
|
|
fflush(file->f_fp);
|
|
if (type == _IONBF) {
|
|
PyMem_Free(file->f_setbuf);
|
|
file->f_setbuf = NULL;
|
|
} else {
|
|
file->f_setbuf = (char *)PyMem_Realloc(file->f_setbuf,
|
|
bufsize);
|
|
}
|
|
#ifdef HAVE_SETVBUF
|
|
setvbuf(file->f_fp, file->f_setbuf, type, bufsize);
|
|
#else /* !HAVE_SETVBUF */
|
|
setbuf(file->f_fp, file->f_setbuf);
|
|
#endif /* !HAVE_SETVBUF */
|
|
}
|
|
}
|
|
|
|
/* Set the encoding used to output Unicode strings.
|
|
Returh 1 on success, 0 on failure. */
|
|
|
|
int
|
|
PyFile_SetEncoding(PyObject *f, const char *enc)
|
|
{
|
|
PyFileObject *file = (PyFileObject*)f;
|
|
PyObject *str = PyString_FromString(enc);
|
|
if (!str)
|
|
return 0;
|
|
Py_DECREF(file->f_encoding);
|
|
file->f_encoding = str;
|
|
return 1;
|
|
}
|
|
|
|
static PyObject *
|
|
err_closed(void)
|
|
{
|
|
PyErr_SetString(PyExc_ValueError, "I/O operation on closed file");
|
|
return NULL;
|
|
}
|
|
|
|
/* Refuse regular file I/O if there's data in the iteration-buffer.
|
|
* Mixing them would cause data to arrive out of order, as the read*
|
|
* methods don't use the iteration buffer. */
|
|
static PyObject *
|
|
err_iterbuffered(void)
|
|
{
|
|
PyErr_SetString(PyExc_ValueError,
|
|
"Mixing iteration and read methods would lose data");
|
|
return NULL;
|
|
}
|
|
|
|
static void drop_readahead(PyFileObject *);
|
|
|
|
/* Methods */
|
|
|
|
static void
|
|
file_dealloc(PyFileObject *f)
|
|
{
|
|
int sts = 0;
|
|
if (f->weakreflist != NULL)
|
|
PyObject_ClearWeakRefs((PyObject *) f);
|
|
if (f->f_fp != NULL && f->f_close != NULL) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
sts = (*f->f_close)(f->f_fp);
|
|
Py_END_ALLOW_THREADS
|
|
if (sts == EOF)
|
|
#ifdef HAVE_STRERROR
|
|
PySys_WriteStderr("close failed: [Errno %d] %s\n", errno, strerror(errno));
|
|
#else
|
|
PySys_WriteStderr("close failed: [Errno %d]\n", errno);
|
|
#endif
|
|
}
|
|
PyMem_Free(f->f_setbuf);
|
|
Py_XDECREF(f->f_name);
|
|
Py_XDECREF(f->f_mode);
|
|
Py_XDECREF(f->f_encoding);
|
|
drop_readahead(f);
|
|
f->ob_type->tp_free((PyObject *)f);
|
|
}
|
|
|
|
static PyObject *
|
|
file_repr(PyFileObject *f)
|
|
{
|
|
if (PyUnicode_Check(f->f_name)) {
|
|
#ifdef Py_USING_UNICODE
|
|
PyObject *ret = NULL;
|
|
PyObject *name;
|
|
name = PyUnicode_AsUnicodeEscapeString(f->f_name);
|
|
ret = PyString_FromFormat("<%s file u'%s', mode '%s' at %p>",
|
|
f->f_fp == NULL ? "closed" : "open",
|
|
PyString_AsString(name),
|
|
PyString_AsString(f->f_mode),
|
|
f);
|
|
Py_XDECREF(name);
|
|
return ret;
|
|
#endif
|
|
} else {
|
|
return PyString_FromFormat("<%s file '%s', mode '%s' at %p>",
|
|
f->f_fp == NULL ? "closed" : "open",
|
|
PyString_AsString(f->f_name),
|
|
PyString_AsString(f->f_mode),
|
|
f);
|
|
}
|
|
}
|
|
|
|
static PyObject *
|
|
file_close(PyFileObject *f)
|
|
{
|
|
int sts = 0;
|
|
if (f->f_fp != NULL) {
|
|
if (f->f_close != NULL) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
sts = (*f->f_close)(f->f_fp);
|
|
Py_END_ALLOW_THREADS
|
|
}
|
|
f->f_fp = NULL;
|
|
}
|
|
PyMem_Free(f->f_setbuf);
|
|
f->f_setbuf = NULL;
|
|
if (sts == EOF)
|
|
return PyErr_SetFromErrno(PyExc_IOError);
|
|
if (sts != 0)
|
|
return PyInt_FromLong((long)sts);
|
|
Py_INCREF(Py_None);
|
|
return Py_None;
|
|
}
|
|
|
|
|
|
/* Our very own off_t-like type, 64-bit if possible */
|
|
#if !defined(HAVE_LARGEFILE_SUPPORT)
|
|
typedef off_t Py_off_t;
|
|
#elif SIZEOF_OFF_T >= 8
|
|
typedef off_t Py_off_t;
|
|
#elif SIZEOF_FPOS_T >= 8
|
|
typedef fpos_t Py_off_t;
|
|
#else
|
|
#error "Large file support, but neither off_t nor fpos_t is large enough."
|
|
#endif
|
|
|
|
|
|
/* a portable fseek() function
|
|
return 0 on success, non-zero on failure (with errno set) */
|
|
static int
|
|
_portable_fseek(FILE *fp, Py_off_t offset, int whence)
|
|
{
|
|
#if !defined(HAVE_LARGEFILE_SUPPORT)
|
|
return fseek(fp, offset, whence);
|
|
#elif defined(HAVE_FSEEKO) && SIZEOF_OFF_T >= 8
|
|
return fseeko(fp, offset, whence);
|
|
#elif defined(HAVE_FSEEK64)
|
|
return fseek64(fp, offset, whence);
|
|
#elif defined(__BEOS__)
|
|
return _fseek(fp, offset, whence);
|
|
#elif SIZEOF_FPOS_T >= 8
|
|
/* lacking a 64-bit capable fseek(), use a 64-bit capable fsetpos()
|
|
and fgetpos() to implement fseek()*/
|
|
fpos_t pos;
|
|
switch (whence) {
|
|
case SEEK_END:
|
|
#ifdef MS_WINDOWS
|
|
fflush(fp);
|
|
if (_lseeki64(fileno(fp), 0, 2) == -1)
|
|
return -1;
|
|
#else
|
|
if (fseek(fp, 0, SEEK_END) != 0)
|
|
return -1;
|
|
#endif
|
|
/* fall through */
|
|
case SEEK_CUR:
|
|
if (fgetpos(fp, &pos) != 0)
|
|
return -1;
|
|
offset += pos;
|
|
break;
|
|
/* case SEEK_SET: break; */
|
|
}
|
|
return fsetpos(fp, &offset);
|
|
#else
|
|
#error "Large file support, but no way to fseek."
|
|
#endif
|
|
}
|
|
|
|
|
|
/* a portable ftell() function
|
|
Return -1 on failure with errno set appropriately, current file
|
|
position on success */
|
|
static Py_off_t
|
|
_portable_ftell(FILE* fp)
|
|
{
|
|
#if !defined(HAVE_LARGEFILE_SUPPORT)
|
|
return ftell(fp);
|
|
#elif defined(HAVE_FTELLO) && SIZEOF_OFF_T >= 8
|
|
return ftello(fp);
|
|
#elif defined(HAVE_FTELL64)
|
|
return ftell64(fp);
|
|
#elif SIZEOF_FPOS_T >= 8
|
|
fpos_t pos;
|
|
if (fgetpos(fp, &pos) != 0)
|
|
return -1;
|
|
return pos;
|
|
#else
|
|
#error "Large file support, but no way to ftell."
|
|
#endif
|
|
}
|
|
|
|
|
|
static PyObject *
|
|
file_seek(PyFileObject *f, PyObject *args)
|
|
{
|
|
int whence;
|
|
int ret;
|
|
Py_off_t offset;
|
|
PyObject *offobj;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
drop_readahead(f);
|
|
whence = 0;
|
|
if (!PyArg_ParseTuple(args, "O|i:seek", &offobj, &whence))
|
|
return NULL;
|
|
#if !defined(HAVE_LARGEFILE_SUPPORT)
|
|
offset = PyInt_AsLong(offobj);
|
|
#else
|
|
offset = PyLong_Check(offobj) ?
|
|
PyLong_AsLongLong(offobj) : PyInt_AsLong(offobj);
|
|
#endif
|
|
if (PyErr_Occurred())
|
|
return NULL;
|
|
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
ret = _portable_fseek(f->f_fp, offset, whence);
|
|
Py_END_ALLOW_THREADS
|
|
|
|
if (ret != 0) {
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
return NULL;
|
|
}
|
|
f->f_skipnextlf = 0;
|
|
Py_INCREF(Py_None);
|
|
return Py_None;
|
|
}
|
|
|
|
|
|
#ifdef HAVE_FTRUNCATE
|
|
static PyObject *
|
|
file_truncate(PyFileObject *f, PyObject *args)
|
|
{
|
|
Py_off_t newsize;
|
|
PyObject *newsizeobj = NULL;
|
|
Py_off_t initialpos;
|
|
int ret;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
if (!PyArg_UnpackTuple(args, "truncate", 0, 1, &newsizeobj))
|
|
return NULL;
|
|
|
|
/* Get current file position. If the file happens to be open for
|
|
* update and the last operation was an input operation, C doesn't
|
|
* define what the later fflush() will do, but we promise truncate()
|
|
* won't change the current position (and fflush() *does* change it
|
|
* then at least on Windows). The easiest thing is to capture
|
|
* current pos now and seek back to it at the end.
|
|
*/
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
initialpos = _portable_ftell(f->f_fp);
|
|
Py_END_ALLOW_THREADS
|
|
if (initialpos == -1)
|
|
goto onioerror;
|
|
|
|
/* Set newsize to current postion if newsizeobj NULL, else to the
|
|
* specified value.
|
|
*/
|
|
if (newsizeobj != NULL) {
|
|
#if !defined(HAVE_LARGEFILE_SUPPORT)
|
|
newsize = PyInt_AsLong(newsizeobj);
|
|
#else
|
|
newsize = PyLong_Check(newsizeobj) ?
|
|
PyLong_AsLongLong(newsizeobj) :
|
|
PyInt_AsLong(newsizeobj);
|
|
#endif
|
|
if (PyErr_Occurred())
|
|
return NULL;
|
|
}
|
|
else /* default to current position */
|
|
newsize = initialpos;
|
|
|
|
/* Flush the stream. We're mixing stream-level I/O with lower-level
|
|
* I/O, and a flush may be necessary to synch both platform views
|
|
* of the current file state.
|
|
*/
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
ret = fflush(f->f_fp);
|
|
Py_END_ALLOW_THREADS
|
|
if (ret != 0)
|
|
goto onioerror;
|
|
|
|
#ifdef MS_WINDOWS
|
|
/* MS _chsize doesn't work if newsize doesn't fit in 32 bits,
|
|
so don't even try using it. */
|
|
{
|
|
HANDLE hFile;
|
|
|
|
/* Have to move current pos to desired endpoint on Windows. */
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
ret = _portable_fseek(f->f_fp, newsize, SEEK_SET) != 0;
|
|
Py_END_ALLOW_THREADS
|
|
if (ret)
|
|
goto onioerror;
|
|
|
|
/* Truncate. Note that this may grow the file! */
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
hFile = (HANDLE)_get_osfhandle(fileno(f->f_fp));
|
|
ret = hFile == (HANDLE)-1;
|
|
if (ret == 0) {
|
|
ret = SetEndOfFile(hFile) == 0;
|
|
if (ret)
|
|
errno = EACCES;
|
|
}
|
|
Py_END_ALLOW_THREADS
|
|
if (ret)
|
|
goto onioerror;
|
|
}
|
|
#else
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
ret = ftruncate(fileno(f->f_fp), newsize);
|
|
Py_END_ALLOW_THREADS
|
|
if (ret != 0)
|
|
goto onioerror;
|
|
#endif /* !MS_WINDOWS */
|
|
|
|
/* Restore original file position. */
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
ret = _portable_fseek(f->f_fp, initialpos, SEEK_SET) != 0;
|
|
Py_END_ALLOW_THREADS
|
|
if (ret)
|
|
goto onioerror;
|
|
|
|
Py_INCREF(Py_None);
|
|
return Py_None;
|
|
|
|
onioerror:
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
return NULL;
|
|
}
|
|
#endif /* HAVE_FTRUNCATE */
|
|
|
|
static PyObject *
|
|
file_tell(PyFileObject *f)
|
|
{
|
|
Py_off_t pos;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
pos = _portable_ftell(f->f_fp);
|
|
Py_END_ALLOW_THREADS
|
|
if (pos == -1) {
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
return NULL;
|
|
}
|
|
if (f->f_skipnextlf) {
|
|
int c;
|
|
c = GETC(f->f_fp);
|
|
if (c == '\n') {
|
|
pos++;
|
|
f->f_skipnextlf = 0;
|
|
} else if (c != EOF) ungetc(c, f->f_fp);
|
|
}
|
|
#if !defined(HAVE_LARGEFILE_SUPPORT)
|
|
return PyInt_FromLong(pos);
|
|
#else
|
|
return PyLong_FromLongLong(pos);
|
|
#endif
|
|
}
|
|
|
|
static PyObject *
|
|
file_fileno(PyFileObject *f)
|
|
{
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
return PyInt_FromLong((long) fileno(f->f_fp));
|
|
}
|
|
|
|
static PyObject *
|
|
file_flush(PyFileObject *f)
|
|
{
|
|
int res;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
res = fflush(f->f_fp);
|
|
Py_END_ALLOW_THREADS
|
|
if (res != 0) {
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
return NULL;
|
|
}
|
|
Py_INCREF(Py_None);
|
|
return Py_None;
|
|
}
|
|
|
|
static PyObject *
|
|
file_isatty(PyFileObject *f)
|
|
{
|
|
long res;
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
Py_BEGIN_ALLOW_THREADS
|
|
res = isatty((int)fileno(f->f_fp));
|
|
Py_END_ALLOW_THREADS
|
|
return PyBool_FromLong(res);
|
|
}
|
|
|
|
|
|
#if BUFSIZ < 8192
|
|
#define SMALLCHUNK 8192
|
|
#else
|
|
#define SMALLCHUNK BUFSIZ
|
|
#endif
|
|
|
|
#if SIZEOF_INT < 4
|
|
#define BIGCHUNK (512 * 32)
|
|
#else
|
|
#define BIGCHUNK (512 * 1024)
|
|
#endif
|
|
|
|
static size_t
|
|
new_buffersize(PyFileObject *f, size_t currentsize)
|
|
{
|
|
#ifdef HAVE_FSTAT
|
|
off_t pos, end;
|
|
struct stat st;
|
|
if (fstat(fileno(f->f_fp), &st) == 0) {
|
|
end = st.st_size;
|
|
/* The following is not a bug: we really need to call lseek()
|
|
*and* ftell(). The reason is that some stdio libraries
|
|
mistakenly flush their buffer when ftell() is called and
|
|
the lseek() call it makes fails, thereby throwing away
|
|
data that cannot be recovered in any way. To avoid this,
|
|
we first test lseek(), and only call ftell() if lseek()
|
|
works. We can't use the lseek() value either, because we
|
|
need to take the amount of buffered data into account.
|
|
(Yet another reason why stdio stinks. :-) */
|
|
pos = lseek(fileno(f->f_fp), 0L, SEEK_CUR);
|
|
if (pos >= 0) {
|
|
pos = ftell(f->f_fp);
|
|
}
|
|
if (pos < 0)
|
|
clearerr(f->f_fp);
|
|
if (end > pos && pos >= 0)
|
|
return currentsize + end - pos + 1;
|
|
/* Add 1 so if the file were to grow we'd notice. */
|
|
}
|
|
#endif
|
|
if (currentsize > SMALLCHUNK) {
|
|
/* Keep doubling until we reach BIGCHUNK;
|
|
then keep adding BIGCHUNK. */
|
|
if (currentsize <= BIGCHUNK)
|
|
return currentsize + currentsize;
|
|
else
|
|
return currentsize + BIGCHUNK;
|
|
}
|
|
return currentsize + SMALLCHUNK;
|
|
}
|
|
|
|
#if defined(EWOULDBLOCK) && defined(EAGAIN) && EWOULDBLOCK != EAGAIN
|
|
#define BLOCKED_ERRNO(x) ((x) == EWOULDBLOCK || (x) == EAGAIN)
|
|
#else
|
|
#ifdef EWOULDBLOCK
|
|
#define BLOCKED_ERRNO(x) ((x) == EWOULDBLOCK)
|
|
#else
|
|
#ifdef EAGAIN
|
|
#define BLOCKED_ERRNO(x) ((x) == EAGAIN)
|
|
#else
|
|
#define BLOCKED_ERRNO(x) 0
|
|
#endif
|
|
#endif
|
|
#endif
|
|
|
|
static PyObject *
|
|
file_read(PyFileObject *f, PyObject *args)
|
|
{
|
|
long bytesrequested = -1;
|
|
size_t bytesread, buffersize, chunksize;
|
|
PyObject *v;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
/* refuse to mix with f.next() */
|
|
if (f->f_buf != NULL &&
|
|
(f->f_bufend - f->f_bufptr) > 0 &&
|
|
f->f_buf[0] != '\0')
|
|
return err_iterbuffered();
|
|
if (!PyArg_ParseTuple(args, "|l:read", &bytesrequested))
|
|
return NULL;
|
|
if (bytesrequested < 0)
|
|
buffersize = new_buffersize(f, (size_t)0);
|
|
else
|
|
buffersize = bytesrequested;
|
|
if (buffersize > PY_SSIZE_T_MAX) {
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
"requested number of bytes is more than a Python string can hold");
|
|
return NULL;
|
|
}
|
|
v = PyString_FromStringAndSize((char *)NULL, buffersize);
|
|
if (v == NULL)
|
|
return NULL;
|
|
bytesread = 0;
|
|
for (;;) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
chunksize = Py_UniversalNewlineFread(BUF(v) + bytesread,
|
|
buffersize - bytesread, f->f_fp, (PyObject *)f);
|
|
Py_END_ALLOW_THREADS
|
|
if (chunksize == 0) {
|
|
if (!ferror(f->f_fp))
|
|
break;
|
|
clearerr(f->f_fp);
|
|
/* When in non-blocking mode, data shouldn't
|
|
* be discarded if a blocking signal was
|
|
* received. That will also happen if
|
|
* chunksize != 0, but bytesread < buffersize. */
|
|
if (bytesread > 0 && BLOCKED_ERRNO(errno))
|
|
break;
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
Py_DECREF(v);
|
|
return NULL;
|
|
}
|
|
bytesread += chunksize;
|
|
if (bytesread < buffersize) {
|
|
clearerr(f->f_fp);
|
|
break;
|
|
}
|
|
if (bytesrequested < 0) {
|
|
buffersize = new_buffersize(f, buffersize);
|
|
if (_PyString_Resize(&v, buffersize) < 0)
|
|
return NULL;
|
|
} else {
|
|
/* Got what was requested. */
|
|
break;
|
|
}
|
|
}
|
|
if (bytesread != buffersize)
|
|
_PyString_Resize(&v, bytesread);
|
|
return v;
|
|
}
|
|
|
|
static PyObject *
|
|
file_readinto(PyFileObject *f, PyObject *args)
|
|
{
|
|
char *ptr;
|
|
Py_ssize_t ntodo;
|
|
Py_ssize_t ndone, nnow;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
if (!f->f_binary) {
|
|
PyErr_SetString(PyExc_TypeError,
|
|
"readinto() requires binary mode");
|
|
return NULL;
|
|
}
|
|
/* refuse to mix with f.next() */
|
|
if (f->f_buf != NULL &&
|
|
(f->f_bufend - f->f_bufptr) > 0 &&
|
|
f->f_buf[0] != '\0')
|
|
return err_iterbuffered();
|
|
if (!PyArg_ParseTuple(args, "w#", &ptr, &ntodo))
|
|
return NULL;
|
|
ndone = 0;
|
|
while (ntodo > 0) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
nnow = Py_UniversalNewlineFread(ptr+ndone, ntodo, f->f_fp,
|
|
(PyObject *)f);
|
|
Py_END_ALLOW_THREADS
|
|
if (nnow == 0) {
|
|
if (!ferror(f->f_fp))
|
|
break;
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
return NULL;
|
|
}
|
|
ndone += nnow;
|
|
ntodo -= nnow;
|
|
}
|
|
return PyInt_FromLong((long)ndone);
|
|
}
|
|
|
|
/**************************************************************************
|
|
Routine to get next line using platform fgets().
|
|
|
|
Under MSVC 6:
|
|
|
|
+ MS threadsafe getc is very slow (multiple layers of function calls before+
|
|
after each character, to lock+unlock the stream).
|
|
+ The stream-locking functions are MS-internal -- can't access them from user
|
|
code.
|
|
+ There's nothing Tim could find in the MS C or platform SDK libraries that
|
|
can worm around this.
|
|
+ MS fgets locks/unlocks only once per line; it's the only hook we have.
|
|
|
|
So we use fgets for speed(!), despite that it's painful.
|
|
|
|
MS realloc is also slow.
|
|
|
|
Reports from other platforms on this method vs getc_unlocked (which MS doesn't
|
|
have):
|
|
Linux a wash
|
|
Solaris a wash
|
|
Tru64 Unix getline_via_fgets significantly faster
|
|
|
|
CAUTION: The C std isn't clear about this: in those cases where fgets
|
|
writes something into the buffer, can it write into any position beyond the
|
|
required trailing null byte? MSVC 6 fgets does not, and no platform is (yet)
|
|
known on which it does; and it would be a strange way to code fgets. Still,
|
|
getline_via_fgets may not work correctly if it does. The std test
|
|
test_bufio.py should fail if platform fgets() routinely writes beyond the
|
|
trailing null byte. #define DONT_USE_FGETS_IN_GETLINE to disable this code.
|
|
**************************************************************************/
|
|
|
|
/* Use this routine if told to, or by default on non-get_unlocked()
|
|
* platforms unless told not to. Yikes! Let's spell that out:
|
|
* On a platform with getc_unlocked():
|
|
* By default, use getc_unlocked().
|
|
* If you want to use fgets() instead, #define USE_FGETS_IN_GETLINE.
|
|
* On a platform without getc_unlocked():
|
|
* By default, use fgets().
|
|
* If you don't want to use fgets(), #define DONT_USE_FGETS_IN_GETLINE.
|
|
*/
|
|
#if !defined(USE_FGETS_IN_GETLINE) && !defined(HAVE_GETC_UNLOCKED)
|
|
#define USE_FGETS_IN_GETLINE
|
|
#endif
|
|
|
|
#if defined(DONT_USE_FGETS_IN_GETLINE) && defined(USE_FGETS_IN_GETLINE)
|
|
#undef USE_FGETS_IN_GETLINE
|
|
#endif
|
|
|
|
#ifdef USE_FGETS_IN_GETLINE
|
|
static PyObject*
|
|
getline_via_fgets(FILE *fp)
|
|
{
|
|
/* INITBUFSIZE is the maximum line length that lets us get away with the fast
|
|
* no-realloc, one-fgets()-call path. Boosting it isn't free, because we have
|
|
* to fill this much of the buffer with a known value in order to figure out
|
|
* how much of the buffer fgets() overwrites. So if INITBUFSIZE is larger
|
|
* than "most" lines, we waste time filling unused buffer slots. 100 is
|
|
* surely adequate for most peoples' email archives, chewing over source code,
|
|
* etc -- "regular old text files".
|
|
* MAXBUFSIZE is the maximum line length that lets us get away with the less
|
|
* fast (but still zippy) no-realloc, two-fgets()-call path. See above for
|
|
* cautions about boosting that. 300 was chosen because the worst real-life
|
|
* text-crunching job reported on Python-Dev was a mail-log crawler where over
|
|
* half the lines were 254 chars.
|
|
*/
|
|
#define INITBUFSIZE 100
|
|
#define MAXBUFSIZE 300
|
|
char* p; /* temp */
|
|
char buf[MAXBUFSIZE];
|
|
PyObject* v; /* the string object result */
|
|
char* pvfree; /* address of next free slot */
|
|
char* pvend; /* address one beyond last free slot */
|
|
size_t nfree; /* # of free buffer slots; pvend-pvfree */
|
|
size_t total_v_size; /* total # of slots in buffer */
|
|
size_t increment; /* amount to increment the buffer */
|
|
|
|
/* Optimize for normal case: avoid _PyString_Resize if at all
|
|
* possible via first reading into stack buffer "buf".
|
|
*/
|
|
total_v_size = INITBUFSIZE; /* start small and pray */
|
|
pvfree = buf;
|
|
for (;;) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
pvend = buf + total_v_size;
|
|
nfree = pvend - pvfree;
|
|
memset(pvfree, '\n', nfree);
|
|
assert(nfree < INT_MAX); /* Should be atmost MAXBUFSIZE */
|
|
p = fgets(pvfree, (int)nfree, fp);
|
|
Py_END_ALLOW_THREADS
|
|
|
|
if (p == NULL) {
|
|
clearerr(fp);
|
|
if (PyErr_CheckSignals())
|
|
return NULL;
|
|
v = PyString_FromStringAndSize(buf, pvfree - buf);
|
|
return v;
|
|
}
|
|
/* fgets read *something* */
|
|
p = memchr(pvfree, '\n', nfree);
|
|
if (p != NULL) {
|
|
/* Did the \n come from fgets or from us?
|
|
* Since fgets stops at the first \n, and then writes
|
|
* \0, if it's from fgets a \0 must be next. But if
|
|
* that's so, it could not have come from us, since
|
|
* the \n's we filled the buffer with have only more
|
|
* \n's to the right.
|
|
*/
|
|
if (p+1 < pvend && *(p+1) == '\0') {
|
|
/* It's from fgets: we win! In particular,
|
|
* we haven't done any mallocs yet, and can
|
|
* build the final result on the first try.
|
|
*/
|
|
++p; /* include \n from fgets */
|
|
}
|
|
else {
|
|
/* Must be from us: fgets didn't fill the
|
|
* buffer and didn't find a newline, so it
|
|
* must be the last and newline-free line of
|
|
* the file.
|
|
*/
|
|
assert(p > pvfree && *(p-1) == '\0');
|
|
--p; /* don't include \0 from fgets */
|
|
}
|
|
v = PyString_FromStringAndSize(buf, p - buf);
|
|
return v;
|
|
}
|
|
/* yuck: fgets overwrote all the newlines, i.e. the entire
|
|
* buffer. So this line isn't over yet, or maybe it is but
|
|
* we're exactly at EOF. If we haven't already, try using the
|
|
* rest of the stack buffer.
|
|
*/
|
|
assert(*(pvend-1) == '\0');
|
|
if (pvfree == buf) {
|
|
pvfree = pvend - 1; /* overwrite trailing null */
|
|
total_v_size = MAXBUFSIZE;
|
|
}
|
|
else
|
|
break;
|
|
}
|
|
|
|
/* The stack buffer isn't big enough; malloc a string object and read
|
|
* into its buffer.
|
|
*/
|
|
total_v_size = MAXBUFSIZE << 1;
|
|
v = PyString_FromStringAndSize((char*)NULL, (int)total_v_size);
|
|
if (v == NULL)
|
|
return v;
|
|
/* copy over everything except the last null byte */
|
|
memcpy(BUF(v), buf, MAXBUFSIZE-1);
|
|
pvfree = BUF(v) + MAXBUFSIZE - 1;
|
|
|
|
/* Keep reading stuff into v; if it ever ends successfully, break
|
|
* after setting p one beyond the end of the line. The code here is
|
|
* very much like the code above, except reads into v's buffer; see
|
|
* the code above for detailed comments about the logic.
|
|
*/
|
|
for (;;) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
pvend = BUF(v) + total_v_size;
|
|
nfree = pvend - pvfree;
|
|
memset(pvfree, '\n', nfree);
|
|
assert(nfree < INT_MAX);
|
|
p = fgets(pvfree, (int)nfree, fp);
|
|
Py_END_ALLOW_THREADS
|
|
|
|
if (p == NULL) {
|
|
clearerr(fp);
|
|
if (PyErr_CheckSignals()) {
|
|
Py_DECREF(v);
|
|
return NULL;
|
|
}
|
|
p = pvfree;
|
|
break;
|
|
}
|
|
p = memchr(pvfree, '\n', nfree);
|
|
if (p != NULL) {
|
|
if (p+1 < pvend && *(p+1) == '\0') {
|
|
/* \n came from fgets */
|
|
++p;
|
|
break;
|
|
}
|
|
/* \n came from us; last line of file, no newline */
|
|
assert(p > pvfree && *(p-1) == '\0');
|
|
--p;
|
|
break;
|
|
}
|
|
/* expand buffer and try again */
|
|
assert(*(pvend-1) == '\0');
|
|
increment = total_v_size >> 2; /* mild exponential growth */
|
|
total_v_size += increment;
|
|
if (total_v_size > PY_SSIZE_T_MAX) {
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
"line is longer than a Python string can hold");
|
|
Py_DECREF(v);
|
|
return NULL;
|
|
}
|
|
if (_PyString_Resize(&v, (int)total_v_size) < 0)
|
|
return NULL;
|
|
/* overwrite the trailing null byte */
|
|
pvfree = BUF(v) + (total_v_size - increment - 1);
|
|
}
|
|
if (BUF(v) + total_v_size != p)
|
|
_PyString_Resize(&v, p - BUF(v));
|
|
return v;
|
|
#undef INITBUFSIZE
|
|
#undef MAXBUFSIZE
|
|
}
|
|
#endif /* ifdef USE_FGETS_IN_GETLINE */
|
|
|
|
/* Internal routine to get a line.
|
|
Size argument interpretation:
|
|
> 0: max length;
|
|
<= 0: read arbitrary line
|
|
*/
|
|
|
|
static PyObject *
|
|
get_line(PyFileObject *f, int n)
|
|
{
|
|
FILE *fp = f->f_fp;
|
|
int c;
|
|
char *buf, *end;
|
|
size_t total_v_size; /* total # of slots in buffer */
|
|
size_t used_v_size; /* # used slots in buffer */
|
|
size_t increment; /* amount to increment the buffer */
|
|
PyObject *v;
|
|
int newlinetypes = f->f_newlinetypes;
|
|
int skipnextlf = f->f_skipnextlf;
|
|
int univ_newline = f->f_univ_newline;
|
|
|
|
#if defined(USE_FGETS_IN_GETLINE)
|
|
if (n <= 0 && !univ_newline )
|
|
return getline_via_fgets(fp);
|
|
#endif
|
|
total_v_size = n > 0 ? n : 100;
|
|
v = PyString_FromStringAndSize((char *)NULL, total_v_size);
|
|
if (v == NULL)
|
|
return NULL;
|
|
buf = BUF(v);
|
|
end = buf + total_v_size;
|
|
|
|
for (;;) {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
FLOCKFILE(fp);
|
|
if (univ_newline) {
|
|
c = 'x'; /* Shut up gcc warning */
|
|
while ( buf != end && (c = GETC(fp)) != EOF ) {
|
|
if (skipnextlf ) {
|
|
skipnextlf = 0;
|
|
if (c == '\n') {
|
|
/* Seeing a \n here with
|
|
* skipnextlf true means we
|
|
* saw a \r before.
|
|
*/
|
|
newlinetypes |= NEWLINE_CRLF;
|
|
c = GETC(fp);
|
|
if (c == EOF) break;
|
|
} else {
|
|
newlinetypes |= NEWLINE_CR;
|
|
}
|
|
}
|
|
if (c == '\r') {
|
|
skipnextlf = 1;
|
|
c = '\n';
|
|
} else if ( c == '\n')
|
|
newlinetypes |= NEWLINE_LF;
|
|
*buf++ = c;
|
|
if (c == '\n') break;
|
|
}
|
|
if ( c == EOF && skipnextlf )
|
|
newlinetypes |= NEWLINE_CR;
|
|
} else /* If not universal newlines use the normal loop */
|
|
while ((c = GETC(fp)) != EOF &&
|
|
(*buf++ = c) != '\n' &&
|
|
buf != end)
|
|
;
|
|
FUNLOCKFILE(fp);
|
|
Py_END_ALLOW_THREADS
|
|
f->f_newlinetypes = newlinetypes;
|
|
f->f_skipnextlf = skipnextlf;
|
|
if (c == '\n')
|
|
break;
|
|
if (c == EOF) {
|
|
if (ferror(fp)) {
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(fp);
|
|
Py_DECREF(v);
|
|
return NULL;
|
|
}
|
|
clearerr(fp);
|
|
if (PyErr_CheckSignals()) {
|
|
Py_DECREF(v);
|
|
return NULL;
|
|
}
|
|
break;
|
|
}
|
|
/* Must be because buf == end */
|
|
if (n > 0)
|
|
break;
|
|
used_v_size = total_v_size;
|
|
increment = total_v_size >> 2; /* mild exponential growth */
|
|
total_v_size += increment;
|
|
if (total_v_size > PY_SSIZE_T_MAX) {
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
"line is longer than a Python string can hold");
|
|
Py_DECREF(v);
|
|
return NULL;
|
|
}
|
|
if (_PyString_Resize(&v, total_v_size) < 0)
|
|
return NULL;
|
|
buf = BUF(v) + used_v_size;
|
|
end = BUF(v) + total_v_size;
|
|
}
|
|
|
|
used_v_size = buf - BUF(v);
|
|
if (used_v_size != total_v_size)
|
|
_PyString_Resize(&v, used_v_size);
|
|
return v;
|
|
}
|
|
|
|
/* External C interface */
|
|
|
|
PyObject *
|
|
PyFile_GetLine(PyObject *f, int n)
|
|
{
|
|
PyObject *result;
|
|
|
|
if (f == NULL) {
|
|
PyErr_BadInternalCall();
|
|
return NULL;
|
|
}
|
|
|
|
if (PyFile_Check(f)) {
|
|
PyFileObject *fo = (PyFileObject *)f;
|
|
if (fo->f_fp == NULL)
|
|
return err_closed();
|
|
/* refuse to mix with f.next() */
|
|
if (fo->f_buf != NULL &&
|
|
(fo->f_bufend - fo->f_bufptr) > 0 &&
|
|
fo->f_buf[0] != '\0')
|
|
return err_iterbuffered();
|
|
result = get_line(fo, n);
|
|
}
|
|
else {
|
|
PyObject *reader;
|
|
PyObject *args;
|
|
|
|
reader = PyObject_GetAttrString(f, "readline");
|
|
if (reader == NULL)
|
|
return NULL;
|
|
if (n <= 0)
|
|
args = PyTuple_New(0);
|
|
else
|
|
args = Py_BuildValue("(i)", n);
|
|
if (args == NULL) {
|
|
Py_DECREF(reader);
|
|
return NULL;
|
|
}
|
|
result = PyEval_CallObject(reader, args);
|
|
Py_DECREF(reader);
|
|
Py_DECREF(args);
|
|
if (result != NULL && !PyString_Check(result) &&
|
|
!PyUnicode_Check(result)) {
|
|
Py_DECREF(result);
|
|
result = NULL;
|
|
PyErr_SetString(PyExc_TypeError,
|
|
"object.readline() returned non-string");
|
|
}
|
|
}
|
|
|
|
if (n < 0 && result != NULL && PyString_Check(result)) {
|
|
char *s = PyString_AS_STRING(result);
|
|
Py_ssize_t len = PyString_GET_SIZE(result);
|
|
if (len == 0) {
|
|
Py_DECREF(result);
|
|
result = NULL;
|
|
PyErr_SetString(PyExc_EOFError,
|
|
"EOF when reading a line");
|
|
}
|
|
else if (s[len-1] == '\n') {
|
|
if (result->ob_refcnt == 1)
|
|
_PyString_Resize(&result, len-1);
|
|
else {
|
|
PyObject *v;
|
|
v = PyString_FromStringAndSize(s, len-1);
|
|
Py_DECREF(result);
|
|
result = v;
|
|
}
|
|
}
|
|
}
|
|
#ifdef Py_USING_UNICODE
|
|
if (n < 0 && result != NULL && PyUnicode_Check(result)) {
|
|
Py_UNICODE *s = PyUnicode_AS_UNICODE(result);
|
|
Py_ssize_t len = PyUnicode_GET_SIZE(result);
|
|
if (len == 0) {
|
|
Py_DECREF(result);
|
|
result = NULL;
|
|
PyErr_SetString(PyExc_EOFError,
|
|
"EOF when reading a line");
|
|
}
|
|
else if (s[len-1] == '\n') {
|
|
if (result->ob_refcnt == 1)
|
|
PyUnicode_Resize(&result, len-1);
|
|
else {
|
|
PyObject *v;
|
|
v = PyUnicode_FromUnicode(s, len-1);
|
|
Py_DECREF(result);
|
|
result = v;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
return result;
|
|
}
|
|
|
|
/* Python method */
|
|
|
|
static PyObject *
|
|
file_readline(PyFileObject *f, PyObject *args)
|
|
{
|
|
int n = -1;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
/* refuse to mix with f.next() */
|
|
if (f->f_buf != NULL &&
|
|
(f->f_bufend - f->f_bufptr) > 0 &&
|
|
f->f_buf[0] != '\0')
|
|
return err_iterbuffered();
|
|
if (!PyArg_ParseTuple(args, "|i:readline", &n))
|
|
return NULL;
|
|
if (n == 0)
|
|
return PyString_FromString("");
|
|
if (n < 0)
|
|
n = 0;
|
|
return get_line(f, n);
|
|
}
|
|
|
|
static PyObject *
|
|
file_readlines(PyFileObject *f, PyObject *args)
|
|
{
|
|
long sizehint = 0;
|
|
PyObject *list;
|
|
PyObject *line;
|
|
char small_buffer[SMALLCHUNK];
|
|
char *buffer = small_buffer;
|
|
size_t buffersize = SMALLCHUNK;
|
|
PyObject *big_buffer = NULL;
|
|
size_t nfilled = 0;
|
|
size_t nread;
|
|
size_t totalread = 0;
|
|
char *p, *q, *end;
|
|
int err;
|
|
int shortread = 0;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
/* refuse to mix with f.next() */
|
|
if (f->f_buf != NULL &&
|
|
(f->f_bufend - f->f_bufptr) > 0 &&
|
|
f->f_buf[0] != '\0')
|
|
return err_iterbuffered();
|
|
if (!PyArg_ParseTuple(args, "|l:readlines", &sizehint))
|
|
return NULL;
|
|
if ((list = PyList_New(0)) == NULL)
|
|
return NULL;
|
|
for (;;) {
|
|
if (shortread)
|
|
nread = 0;
|
|
else {
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
nread = Py_UniversalNewlineFread(buffer+nfilled,
|
|
buffersize-nfilled, f->f_fp, (PyObject *)f);
|
|
Py_END_ALLOW_THREADS
|
|
shortread = (nread < buffersize-nfilled);
|
|
}
|
|
if (nread == 0) {
|
|
sizehint = 0;
|
|
if (!ferror(f->f_fp))
|
|
break;
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
error:
|
|
Py_DECREF(list);
|
|
list = NULL;
|
|
goto cleanup;
|
|
}
|
|
totalread += nread;
|
|
p = (char *)memchr(buffer+nfilled, '\n', nread);
|
|
if (p == NULL) {
|
|
/* Need a larger buffer to fit this line */
|
|
nfilled += nread;
|
|
buffersize *= 2;
|
|
if (buffersize > PY_SSIZE_T_MAX) {
|
|
PyErr_SetString(PyExc_OverflowError,
|
|
"line is longer than a Python string can hold");
|
|
goto error;
|
|
}
|
|
if (big_buffer == NULL) {
|
|
/* Create the big buffer */
|
|
big_buffer = PyString_FromStringAndSize(
|
|
NULL, buffersize);
|
|
if (big_buffer == NULL)
|
|
goto error;
|
|
buffer = PyString_AS_STRING(big_buffer);
|
|
memcpy(buffer, small_buffer, nfilled);
|
|
}
|
|
else {
|
|
/* Grow the big buffer */
|
|
if ( _PyString_Resize(&big_buffer, buffersize) < 0 )
|
|
goto error;
|
|
buffer = PyString_AS_STRING(big_buffer);
|
|
}
|
|
continue;
|
|
}
|
|
end = buffer+nfilled+nread;
|
|
q = buffer;
|
|
do {
|
|
/* Process complete lines */
|
|
p++;
|
|
line = PyString_FromStringAndSize(q, p-q);
|
|
if (line == NULL)
|
|
goto error;
|
|
err = PyList_Append(list, line);
|
|
Py_DECREF(line);
|
|
if (err != 0)
|
|
goto error;
|
|
q = p;
|
|
p = (char *)memchr(q, '\n', end-q);
|
|
} while (p != NULL);
|
|
/* Move the remaining incomplete line to the start */
|
|
nfilled = end-q;
|
|
memmove(buffer, q, nfilled);
|
|
if (sizehint > 0)
|
|
if (totalread >= (size_t)sizehint)
|
|
break;
|
|
}
|
|
if (nfilled != 0) {
|
|
/* Partial last line */
|
|
line = PyString_FromStringAndSize(buffer, nfilled);
|
|
if (line == NULL)
|
|
goto error;
|
|
if (sizehint > 0) {
|
|
/* Need to complete the last line */
|
|
PyObject *rest = get_line(f, 0);
|
|
if (rest == NULL) {
|
|
Py_DECREF(line);
|
|
goto error;
|
|
}
|
|
PyString_Concat(&line, rest);
|
|
Py_DECREF(rest);
|
|
if (line == NULL)
|
|
goto error;
|
|
}
|
|
err = PyList_Append(list, line);
|
|
Py_DECREF(line);
|
|
if (err != 0)
|
|
goto error;
|
|
}
|
|
cleanup:
|
|
Py_XDECREF(big_buffer);
|
|
return list;
|
|
}
|
|
|
|
static PyObject *
|
|
file_write(PyFileObject *f, PyObject *args)
|
|
{
|
|
char *s;
|
|
Py_ssize_t n, n2;
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
if (!PyArg_ParseTuple(args, f->f_binary ? "s#" : "t#", &s, &n))
|
|
return NULL;
|
|
f->f_softspace = 0;
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
n2 = fwrite(s, 1, n, f->f_fp);
|
|
Py_END_ALLOW_THREADS
|
|
if (n2 != n) {
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
return NULL;
|
|
}
|
|
Py_INCREF(Py_None);
|
|
return Py_None;
|
|
}
|
|
|
|
static PyObject *
|
|
file_writelines(PyFileObject *f, PyObject *seq)
|
|
{
|
|
#define CHUNKSIZE 1000
|
|
PyObject *list, *line;
|
|
PyObject *it; /* iter(seq) */
|
|
PyObject *result;
|
|
int index, islist;
|
|
Py_ssize_t i, j, nwritten, len;
|
|
|
|
assert(seq != NULL);
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
|
|
result = NULL;
|
|
list = NULL;
|
|
islist = PyList_Check(seq);
|
|
if (islist)
|
|
it = NULL;
|
|
else {
|
|
it = PyObject_GetIter(seq);
|
|
if (it == NULL) {
|
|
PyErr_SetString(PyExc_TypeError,
|
|
"writelines() requires an iterable argument");
|
|
return NULL;
|
|
}
|
|
/* From here on, fail by going to error, to reclaim "it". */
|
|
list = PyList_New(CHUNKSIZE);
|
|
if (list == NULL)
|
|
goto error;
|
|
}
|
|
|
|
/* Strategy: slurp CHUNKSIZE lines into a private list,
|
|
checking that they are all strings, then write that list
|
|
without holding the interpreter lock, then come back for more. */
|
|
for (index = 0; ; index += CHUNKSIZE) {
|
|
if (islist) {
|
|
Py_XDECREF(list);
|
|
list = PyList_GetSlice(seq, index, index+CHUNKSIZE);
|
|
if (list == NULL)
|
|
goto error;
|
|
j = PyList_GET_SIZE(list);
|
|
}
|
|
else {
|
|
for (j = 0; j < CHUNKSIZE; j++) {
|
|
line = PyIter_Next(it);
|
|
if (line == NULL) {
|
|
if (PyErr_Occurred())
|
|
goto error;
|
|
break;
|
|
}
|
|
PyList_SetItem(list, j, line);
|
|
}
|
|
}
|
|
if (j == 0)
|
|
break;
|
|
|
|
/* Check that all entries are indeed strings. If not,
|
|
apply the same rules as for file.write() and
|
|
convert the results to strings. This is slow, but
|
|
seems to be the only way since all conversion APIs
|
|
could potentially execute Python code. */
|
|
for (i = 0; i < j; i++) {
|
|
PyObject *v = PyList_GET_ITEM(list, i);
|
|
if (!PyString_Check(v)) {
|
|
const char *buffer;
|
|
if (((f->f_binary &&
|
|
PyObject_AsReadBuffer(v,
|
|
(const void**)&buffer,
|
|
&len)) ||
|
|
PyObject_AsCharBuffer(v,
|
|
&buffer,
|
|
&len))) {
|
|
PyErr_SetString(PyExc_TypeError,
|
|
"writelines() argument must be a sequence of strings");
|
|
goto error;
|
|
}
|
|
line = PyString_FromStringAndSize(buffer,
|
|
len);
|
|
if (line == NULL)
|
|
goto error;
|
|
Py_DECREF(v);
|
|
PyList_SET_ITEM(list, i, line);
|
|
}
|
|
}
|
|
|
|
/* Since we are releasing the global lock, the
|
|
following code may *not* execute Python code. */
|
|
Py_BEGIN_ALLOW_THREADS
|
|
f->f_softspace = 0;
|
|
errno = 0;
|
|
for (i = 0; i < j; i++) {
|
|
line = PyList_GET_ITEM(list, i);
|
|
len = PyString_GET_SIZE(line);
|
|
nwritten = fwrite(PyString_AS_STRING(line),
|
|
1, len, f->f_fp);
|
|
if (nwritten != len) {
|
|
Py_BLOCK_THREADS
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
goto error;
|
|
}
|
|
}
|
|
Py_END_ALLOW_THREADS
|
|
|
|
if (j < CHUNKSIZE)
|
|
break;
|
|
}
|
|
|
|
Py_INCREF(Py_None);
|
|
result = Py_None;
|
|
error:
|
|
Py_XDECREF(list);
|
|
Py_XDECREF(it);
|
|
return result;
|
|
#undef CHUNKSIZE
|
|
}
|
|
|
|
static PyObject *
|
|
file_self(PyFileObject *f)
|
|
{
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
Py_INCREF(f);
|
|
return (PyObject *)f;
|
|
}
|
|
|
|
static PyObject *
|
|
file_exit(PyFileObject *f, PyObject *args)
|
|
{
|
|
PyObject *ret = file_close(f);
|
|
if (!ret)
|
|
/* If error occurred, pass through */
|
|
return NULL;
|
|
Py_DECREF(ret);
|
|
/* We cannot return the result of close since a true
|
|
* value will be interpreted as "yes, swallow the
|
|
* exception if one was raised inside the with block". */
|
|
Py_RETURN_NONE;
|
|
}
|
|
|
|
PyDoc_STRVAR(readline_doc,
|
|
"readline([size]) -> next line from the file, as a string.\n"
|
|
"\n"
|
|
"Retain newline. A non-negative size argument limits the maximum\n"
|
|
"number of bytes to return (an incomplete line may be returned then).\n"
|
|
"Return an empty string at EOF.");
|
|
|
|
PyDoc_STRVAR(read_doc,
|
|
"read([size]) -> read at most size bytes, returned as a string.\n"
|
|
"\n"
|
|
"If the size argument is negative or omitted, read until EOF is reached.\n"
|
|
"Notice that when in non-blocking mode, less data than what was requested\n"
|
|
"may be returned, even if no size parameter was given.");
|
|
|
|
PyDoc_STRVAR(write_doc,
|
|
"write(str) -> None. Write string str to file.\n"
|
|
"\n"
|
|
"Note that due to buffering, flush() or close() may be needed before\n"
|
|
"the file on disk reflects the data written.");
|
|
|
|
PyDoc_STRVAR(fileno_doc,
|
|
"fileno() -> integer \"file descriptor\".\n"
|
|
"\n"
|
|
"This is needed for lower-level file interfaces, such os.read().");
|
|
|
|
PyDoc_STRVAR(seek_doc,
|
|
"seek(offset[, whence]) -> None. Move to new file position.\n"
|
|
"\n"
|
|
"Argument offset is a byte count. Optional argument whence defaults to\n"
|
|
"0 (offset from start of file, offset should be >= 0); other values are 1\n"
|
|
"(move relative to current position, positive or negative), and 2 (move\n"
|
|
"relative to end of file, usually negative, although many platforms allow\n"
|
|
"seeking beyond the end of a file). If the file is opened in text mode,\n"
|
|
"only offsets returned by tell() are legal. Use of other offsets causes\n"
|
|
"undefined behavior."
|
|
"\n"
|
|
"Note that not all file objects are seekable.");
|
|
|
|
#ifdef HAVE_FTRUNCATE
|
|
PyDoc_STRVAR(truncate_doc,
|
|
"truncate([size]) -> None. Truncate the file to at most size bytes.\n"
|
|
"\n"
|
|
"Size defaults to the current file position, as returned by tell().");
|
|
#endif
|
|
|
|
PyDoc_STRVAR(tell_doc,
|
|
"tell() -> current file position, an integer (may be a long integer).");
|
|
|
|
PyDoc_STRVAR(readinto_doc,
|
|
"readinto() -> Undocumented. Don't use this; it may go away.");
|
|
|
|
PyDoc_STRVAR(readlines_doc,
|
|
"readlines([size]) -> list of strings, each a line from the file.\n"
|
|
"\n"
|
|
"Call readline() repeatedly and return a list of the lines so read.\n"
|
|
"The optional size argument, if given, is an approximate bound on the\n"
|
|
"total number of bytes in the lines returned.");
|
|
|
|
PyDoc_STRVAR(writelines_doc,
|
|
"writelines(sequence_of_strings) -> None. Write the strings to the file.\n"
|
|
"\n"
|
|
"Note that newlines are not added. The sequence can be any iterable object\n"
|
|
"producing strings. This is equivalent to calling write() for each string.");
|
|
|
|
PyDoc_STRVAR(flush_doc,
|
|
"flush() -> None. Flush the internal I/O buffer.");
|
|
|
|
PyDoc_STRVAR(close_doc,
|
|
"close() -> None or (perhaps) an integer. Close the file.\n"
|
|
"\n"
|
|
"Sets data attribute .closed to True. A closed file cannot be used for\n"
|
|
"further I/O operations. close() may be called more than once without\n"
|
|
"error. Some kinds of file objects (for example, opened by popen())\n"
|
|
"may return an exit status upon closing.");
|
|
|
|
PyDoc_STRVAR(isatty_doc,
|
|
"isatty() -> true or false. True if the file is connected to a tty device.");
|
|
|
|
PyDoc_STRVAR(enter_doc,
|
|
"__enter__() -> self.");
|
|
|
|
PyDoc_STRVAR(exit_doc,
|
|
"__exit__(*excinfo) -> None. Closes the file.");
|
|
|
|
static PyMethodDef file_methods[] = {
|
|
{"readline", (PyCFunction)file_readline, METH_VARARGS, readline_doc},
|
|
{"read", (PyCFunction)file_read, METH_VARARGS, read_doc},
|
|
{"write", (PyCFunction)file_write, METH_VARARGS, write_doc},
|
|
{"fileno", (PyCFunction)file_fileno, METH_NOARGS, fileno_doc},
|
|
{"seek", (PyCFunction)file_seek, METH_VARARGS, seek_doc},
|
|
#ifdef HAVE_FTRUNCATE
|
|
{"truncate", (PyCFunction)file_truncate, METH_VARARGS, truncate_doc},
|
|
#endif
|
|
{"tell", (PyCFunction)file_tell, METH_NOARGS, tell_doc},
|
|
{"readinto", (PyCFunction)file_readinto, METH_VARARGS, readinto_doc},
|
|
{"readlines", (PyCFunction)file_readlines,METH_VARARGS, readlines_doc},
|
|
{"writelines",(PyCFunction)file_writelines, METH_O, writelines_doc},
|
|
{"flush", (PyCFunction)file_flush, METH_NOARGS, flush_doc},
|
|
{"close", (PyCFunction)file_close, METH_NOARGS, close_doc},
|
|
{"isatty", (PyCFunction)file_isatty, METH_NOARGS, isatty_doc},
|
|
{"__enter__", (PyCFunction)file_self, METH_NOARGS, enter_doc},
|
|
{"__exit__", (PyCFunction)file_exit, METH_VARARGS, exit_doc},
|
|
{NULL, NULL} /* sentinel */
|
|
};
|
|
|
|
#define OFF(x) offsetof(PyFileObject, x)
|
|
|
|
static PyMemberDef file_memberlist[] = {
|
|
{"softspace", T_INT, OFF(f_softspace), 0,
|
|
"flag indicating that a space needs to be printed; used by print"},
|
|
{"mode", T_OBJECT, OFF(f_mode), RO,
|
|
"file mode ('r', 'U', 'w', 'a', possibly with 'b' or '+' added)"},
|
|
{"name", T_OBJECT, OFF(f_name), RO,
|
|
"file name"},
|
|
{"encoding", T_OBJECT, OFF(f_encoding), RO,
|
|
"file encoding"},
|
|
/* getattr(f, "closed") is implemented without this table */
|
|
{NULL} /* Sentinel */
|
|
};
|
|
|
|
static PyObject *
|
|
get_closed(PyFileObject *f, void *closure)
|
|
{
|
|
return PyBool_FromLong((long)(f->f_fp == 0));
|
|
}
|
|
static PyObject *
|
|
get_newlines(PyFileObject *f, void *closure)
|
|
{
|
|
switch (f->f_newlinetypes) {
|
|
case NEWLINE_UNKNOWN:
|
|
Py_INCREF(Py_None);
|
|
return Py_None;
|
|
case NEWLINE_CR:
|
|
return PyString_FromString("\r");
|
|
case NEWLINE_LF:
|
|
return PyString_FromString("\n");
|
|
case NEWLINE_CR|NEWLINE_LF:
|
|
return Py_BuildValue("(ss)", "\r", "\n");
|
|
case NEWLINE_CRLF:
|
|
return PyString_FromString("\r\n");
|
|
case NEWLINE_CR|NEWLINE_CRLF:
|
|
return Py_BuildValue("(ss)", "\r", "\r\n");
|
|
case NEWLINE_LF|NEWLINE_CRLF:
|
|
return Py_BuildValue("(ss)", "\n", "\r\n");
|
|
case NEWLINE_CR|NEWLINE_LF|NEWLINE_CRLF:
|
|
return Py_BuildValue("(sss)", "\r", "\n", "\r\n");
|
|
default:
|
|
PyErr_Format(PyExc_SystemError,
|
|
"Unknown newlines value 0x%x\n",
|
|
f->f_newlinetypes);
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
static PyGetSetDef file_getsetlist[] = {
|
|
{"closed", (getter)get_closed, NULL, "True if the file is closed"},
|
|
{"newlines", (getter)get_newlines, NULL,
|
|
"end-of-line convention used in this file"},
|
|
{0},
|
|
};
|
|
|
|
static void
|
|
drop_readahead(PyFileObject *f)
|
|
{
|
|
if (f->f_buf != NULL) {
|
|
PyMem_Free(f->f_buf);
|
|
f->f_buf = NULL;
|
|
}
|
|
}
|
|
|
|
/* Make sure that file has a readahead buffer with at least one byte
|
|
(unless at EOF) and no more than bufsize. Returns negative value on
|
|
error, will set MemoryError if bufsize bytes cannot be allocated. */
|
|
static int
|
|
readahead(PyFileObject *f, int bufsize)
|
|
{
|
|
Py_ssize_t chunksize;
|
|
|
|
if (f->f_buf != NULL) {
|
|
if( (f->f_bufend - f->f_bufptr) >= 1)
|
|
return 0;
|
|
else
|
|
drop_readahead(f);
|
|
}
|
|
if ((f->f_buf = (char *)PyMem_Malloc(bufsize)) == NULL) {
|
|
PyErr_NoMemory();
|
|
return -1;
|
|
}
|
|
Py_BEGIN_ALLOW_THREADS
|
|
errno = 0;
|
|
chunksize = Py_UniversalNewlineFread(
|
|
f->f_buf, bufsize, f->f_fp, (PyObject *)f);
|
|
Py_END_ALLOW_THREADS
|
|
if (chunksize == 0) {
|
|
if (ferror(f->f_fp)) {
|
|
PyErr_SetFromErrno(PyExc_IOError);
|
|
clearerr(f->f_fp);
|
|
drop_readahead(f);
|
|
return -1;
|
|
}
|
|
}
|
|
f->f_bufptr = f->f_buf;
|
|
f->f_bufend = f->f_buf + chunksize;
|
|
return 0;
|
|
}
|
|
|
|
/* Used by file_iternext. The returned string will start with 'skip'
|
|
uninitialized bytes followed by the remainder of the line. Don't be
|
|
horrified by the recursive call: maximum recursion depth is limited by
|
|
logarithmic buffer growth to about 50 even when reading a 1gb line. */
|
|
|
|
static PyStringObject *
|
|
readahead_get_line_skip(PyFileObject *f, int skip, int bufsize)
|
|
{
|
|
PyStringObject* s;
|
|
char *bufptr;
|
|
char *buf;
|
|
Py_ssize_t len;
|
|
|
|
if (f->f_buf == NULL)
|
|
if (readahead(f, bufsize) < 0)
|
|
return NULL;
|
|
|
|
len = f->f_bufend - f->f_bufptr;
|
|
if (len == 0)
|
|
return (PyStringObject *)
|
|
PyString_FromStringAndSize(NULL, skip);
|
|
bufptr = (char *)memchr(f->f_bufptr, '\n', len);
|
|
if (bufptr != NULL) {
|
|
bufptr++; /* Count the '\n' */
|
|
len = bufptr - f->f_bufptr;
|
|
s = (PyStringObject *)
|
|
PyString_FromStringAndSize(NULL, skip+len);
|
|
if (s == NULL)
|
|
return NULL;
|
|
memcpy(PyString_AS_STRING(s)+skip, f->f_bufptr, len);
|
|
f->f_bufptr = bufptr;
|
|
if (bufptr == f->f_bufend)
|
|
drop_readahead(f);
|
|
} else {
|
|
bufptr = f->f_bufptr;
|
|
buf = f->f_buf;
|
|
f->f_buf = NULL; /* Force new readahead buffer */
|
|
assert(skip+len < INT_MAX);
|
|
s = readahead_get_line_skip(
|
|
f, (int)(skip+len), bufsize + (bufsize>>2) );
|
|
if (s == NULL) {
|
|
PyMem_Free(buf);
|
|
return NULL;
|
|
}
|
|
memcpy(PyString_AS_STRING(s)+skip, bufptr, len);
|
|
PyMem_Free(buf);
|
|
}
|
|
return s;
|
|
}
|
|
|
|
/* A larger buffer size may actually decrease performance. */
|
|
#define READAHEAD_BUFSIZE 8192
|
|
|
|
static PyObject *
|
|
file_iternext(PyFileObject *f)
|
|
{
|
|
PyStringObject* l;
|
|
|
|
if (f->f_fp == NULL)
|
|
return err_closed();
|
|
|
|
l = readahead_get_line_skip(f, 0, READAHEAD_BUFSIZE);
|
|
if (l == NULL || PyString_GET_SIZE(l) == 0) {
|
|
Py_XDECREF(l);
|
|
return NULL;
|
|
}
|
|
return (PyObject *)l;
|
|
}
|
|
|
|
|
|
static PyObject *
|
|
file_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
|
|
{
|
|
PyObject *self;
|
|
static PyObject *not_yet_string;
|
|
|
|
assert(type != NULL && type->tp_alloc != NULL);
|
|
|
|
if (not_yet_string == NULL) {
|
|
not_yet_string = PyString_FromString("<uninitialized file>");
|
|
if (not_yet_string == NULL)
|
|
return NULL;
|
|
}
|
|
|
|
self = type->tp_alloc(type, 0);
|
|
if (self != NULL) {
|
|
/* Always fill in the name and mode, so that nobody else
|
|
needs to special-case NULLs there. */
|
|
Py_INCREF(not_yet_string);
|
|
((PyFileObject *)self)->f_name = not_yet_string;
|
|
Py_INCREF(not_yet_string);
|
|
((PyFileObject *)self)->f_mode = not_yet_string;
|
|
Py_INCREF(Py_None);
|
|
((PyFileObject *)self)->f_encoding = Py_None;
|
|
((PyFileObject *)self)->weakreflist = NULL;
|
|
}
|
|
return self;
|
|
}
|
|
|
|
static int
|
|
file_init(PyObject *self, PyObject *args, PyObject *kwds)
|
|
{
|
|
PyFileObject *foself = (PyFileObject *)self;
|
|
int ret = 0;
|
|
static char *kwlist[] = {"name", "mode", "buffering", 0};
|
|
char *name = NULL;
|
|
char *mode = "r";
|
|
int bufsize = -1;
|
|
int wideargument = 0;
|
|
|
|
assert(PyFile_Check(self));
|
|
if (foself->f_fp != NULL) {
|
|
/* Have to close the existing file first. */
|
|
PyObject *closeresult = file_close(foself);
|
|
if (closeresult == NULL)
|
|
return -1;
|
|
Py_DECREF(closeresult);
|
|
}
|
|
|
|
#ifdef Py_WIN_WIDE_FILENAMES
|
|
if (GetVersion() < 0x80000000) { /* On NT, so wide API available */
|
|
PyObject *po;
|
|
if (PyArg_ParseTupleAndKeywords(args, kwds, "U|si:file",
|
|
kwlist, &po, &mode, &bufsize)) {
|
|
wideargument = 1;
|
|
if (fill_file_fields(foself, NULL, po, mode,
|
|
fclose) == NULL)
|
|
goto Error;
|
|
} else {
|
|
/* Drop the argument parsing error as narrow
|
|
strings are also valid. */
|
|
PyErr_Clear();
|
|
}
|
|
}
|
|
#endif
|
|
|
|
if (!wideargument) {
|
|
PyObject *o_name;
|
|
|
|
if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:file", kwlist,
|
|
Py_FileSystemDefaultEncoding,
|
|
&name,
|
|
&mode, &bufsize))
|
|
return -1;
|
|
|
|
/* We parse again to get the name as a PyObject */
|
|
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|si:file",
|
|
kwlist, &o_name, &mode,
|
|
&bufsize))
|
|
return -1;
|
|
|
|
if (fill_file_fields(foself, NULL, o_name, mode,
|
|
fclose) == NULL)
|
|
goto Error;
|
|
}
|
|
if (open_the_file(foself, name, mode) == NULL)
|
|
goto Error;
|
|
foself->f_setbuf = NULL;
|
|
PyFile_SetBufSize(self, bufsize);
|
|
goto Done;
|
|
|
|
Error:
|
|
ret = -1;
|
|
/* fall through */
|
|
Done:
|
|
PyMem_Free(name); /* free the encoded string */
|
|
return ret;
|
|
}
|
|
|
|
PyDoc_VAR(file_doc) =
|
|
PyDoc_STR(
|
|
"file(name[, mode[, buffering]]) -> file object\n"
|
|
"\n"
|
|
"Open a file. The mode can be 'r', 'w' or 'a' for reading (default),\n"
|
|
"writing or appending. The file will be created if it doesn't exist\n"
|
|
"when opened for writing or appending; it will be truncated when\n"
|
|
"opened for writing. Add a 'b' to the mode for binary files.\n"
|
|
"Add a '+' to the mode to allow simultaneous reading and writing.\n"
|
|
"If the buffering argument is given, 0 means unbuffered, 1 means line\n"
|
|
"buffered, and larger numbers specify the buffer size.\n"
|
|
)
|
|
PyDoc_STR(
|
|
"Add a 'U' to mode to open the file for input with universal newline\n"
|
|
"support. Any line ending in the input file will be seen as a '\\n'\n"
|
|
"in Python. Also, a file so opened gains the attribute 'newlines';\n"
|
|
"the value for this attribute is one of None (no newline read yet),\n"
|
|
"'\\r', '\\n', '\\r\\n' or a tuple containing all the newline types seen.\n"
|
|
"\n"
|
|
"'U' cannot be combined with 'w' or '+' mode.\n"
|
|
);
|
|
|
|
PyTypeObject PyFile_Type = {
|
|
PyObject_HEAD_INIT(&PyType_Type)
|
|
0,
|
|
"file",
|
|
sizeof(PyFileObject),
|
|
0,
|
|
(destructor)file_dealloc, /* tp_dealloc */
|
|
0, /* tp_print */
|
|
0, /* tp_getattr */
|
|
0, /* tp_setattr */
|
|
0, /* tp_compare */
|
|
(reprfunc)file_repr, /* tp_repr */
|
|
0, /* tp_as_number */
|
|
0, /* tp_as_sequence */
|
|
0, /* tp_as_mapping */
|
|
0, /* tp_hash */
|
|
0, /* tp_call */
|
|
0, /* tp_str */
|
|
PyObject_GenericGetAttr, /* tp_getattro */
|
|
/* softspace is writable: we must supply tp_setattro */
|
|
PyObject_GenericSetAttr, /* tp_setattro */
|
|
0, /* tp_as_buffer */
|
|
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_WEAKREFS, /* tp_flags */
|
|
file_doc, /* tp_doc */
|
|
0, /* tp_traverse */
|
|
0, /* tp_clear */
|
|
0, /* tp_richcompare */
|
|
offsetof(PyFileObject, weakreflist), /* tp_weaklistoffset */
|
|
(getiterfunc)file_self, /* tp_iter */
|
|
(iternextfunc)file_iternext, /* tp_iternext */
|
|
file_methods, /* tp_methods */
|
|
file_memberlist, /* tp_members */
|
|
file_getsetlist, /* tp_getset */
|
|
0, /* tp_base */
|
|
0, /* tp_dict */
|
|
0, /* tp_descr_get */
|
|
0, /* tp_descr_set */
|
|
0, /* tp_dictoffset */
|
|
file_init, /* tp_init */
|
|
PyType_GenericAlloc, /* tp_alloc */
|
|
file_new, /* tp_new */
|
|
PyObject_Del, /* tp_free */
|
|
};
|
|
|
|
/* Interface for the 'soft space' between print items. */
|
|
|
|
int
|
|
PyFile_SoftSpace(PyObject *f, int newflag)
|
|
{
|
|
long oldflag = 0;
|
|
if (f == NULL) {
|
|
/* Do nothing */
|
|
}
|
|
else if (PyFile_Check(f)) {
|
|
oldflag = ((PyFileObject *)f)->f_softspace;
|
|
((PyFileObject *)f)->f_softspace = newflag;
|
|
}
|
|
else {
|
|
PyObject *v;
|
|
v = PyObject_GetAttrString(f, "softspace");
|
|
if (v == NULL)
|
|
PyErr_Clear();
|
|
else {
|
|
if (PyInt_Check(v))
|
|
oldflag = PyInt_AsLong(v);
|
|
assert(oldflag < INT_MAX);
|
|
Py_DECREF(v);
|
|
}
|
|
v = PyInt_FromLong((long)newflag);
|
|
if (v == NULL)
|
|
PyErr_Clear();
|
|
else {
|
|
if (PyObject_SetAttrString(f, "softspace", v) != 0)
|
|
PyErr_Clear();
|
|
Py_DECREF(v);
|
|
}
|
|
}
|
|
return (int)oldflag;
|
|
}
|
|
|
|
/* Interfaces to write objects/strings to file-like objects */
|
|
|
|
int
|
|
PyFile_WriteObject(PyObject *v, PyObject *f, int flags)
|
|
{
|
|
PyObject *writer, *value, *args, *result;
|
|
if (f == NULL) {
|
|
PyErr_SetString(PyExc_TypeError, "writeobject with NULL file");
|
|
return -1;
|
|
}
|
|
else if (PyFile_Check(f)) {
|
|
FILE *fp = PyFile_AsFile(f);
|
|
#ifdef Py_USING_UNICODE
|
|
PyObject *enc = ((PyFileObject*)f)->f_encoding;
|
|
int result;
|
|
#endif
|
|
if (fp == NULL) {
|
|
err_closed();
|
|
return -1;
|
|
}
|
|
#ifdef Py_USING_UNICODE
|
|
if ((flags & Py_PRINT_RAW) &&
|
|
PyUnicode_Check(v) && enc != Py_None) {
|
|
char *cenc = PyString_AS_STRING(enc);
|
|
value = PyUnicode_AsEncodedString(v, cenc, "strict");
|
|
if (value == NULL)
|
|
return -1;
|
|
} else {
|
|
value = v;
|
|
Py_INCREF(value);
|
|
}
|
|
result = PyObject_Print(value, fp, flags);
|
|
Py_DECREF(value);
|
|
return result;
|
|
#else
|
|
return PyObject_Print(v, fp, flags);
|
|
#endif
|
|
}
|
|
writer = PyObject_GetAttrString(f, "write");
|
|
if (writer == NULL)
|
|
return -1;
|
|
if (flags & Py_PRINT_RAW) {
|
|
if (PyUnicode_Check(v)) {
|
|
value = v;
|
|
Py_INCREF(value);
|
|
} else
|
|
value = PyObject_Str(v);
|
|
}
|
|
else
|
|
value = PyObject_Repr(v);
|
|
if (value == NULL) {
|
|
Py_DECREF(writer);
|
|
return -1;
|
|
}
|
|
args = PyTuple_Pack(1, value);
|
|
if (args == NULL) {
|
|
Py_DECREF(value);
|
|
Py_DECREF(writer);
|
|
return -1;
|
|
}
|
|
result = PyEval_CallObject(writer, args);
|
|
Py_DECREF(args);
|
|
Py_DECREF(value);
|
|
Py_DECREF(writer);
|
|
if (result == NULL)
|
|
return -1;
|
|
Py_DECREF(result);
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
PyFile_WriteString(const char *s, PyObject *f)
|
|
{
|
|
if (f == NULL) {
|
|
/* Should be caused by a pre-existing error */
|
|
if (!PyErr_Occurred())
|
|
PyErr_SetString(PyExc_SystemError,
|
|
"null file for PyFile_WriteString");
|
|
return -1;
|
|
}
|
|
else if (PyFile_Check(f)) {
|
|
FILE *fp = PyFile_AsFile(f);
|
|
if (fp == NULL) {
|
|
err_closed();
|
|
return -1;
|
|
}
|
|
fputs(s, fp);
|
|
return 0;
|
|
}
|
|
else if (!PyErr_Occurred()) {
|
|
PyObject *v = PyString_FromString(s);
|
|
int err;
|
|
if (v == NULL)
|
|
return -1;
|
|
err = PyFile_WriteObject(v, f, Py_PRINT_RAW);
|
|
Py_DECREF(v);
|
|
return err;
|
|
}
|
|
else
|
|
return -1;
|
|
}
|
|
|
|
/* Try to get a file-descriptor from a Python object. If the object
|
|
is an integer or long integer, its value is returned. If not, the
|
|
object's fileno() method is called if it exists; the method must return
|
|
an integer or long integer, which is returned as the file descriptor value.
|
|
-1 is returned on failure.
|
|
*/
|
|
|
|
int PyObject_AsFileDescriptor(PyObject *o)
|
|
{
|
|
int fd;
|
|
PyObject *meth;
|
|
|
|
if (PyInt_Check(o)) {
|
|
fd = PyInt_AsLong(o);
|
|
}
|
|
else if (PyLong_Check(o)) {
|
|
fd = PyLong_AsLong(o);
|
|
}
|
|
else if ((meth = PyObject_GetAttrString(o, "fileno")) != NULL)
|
|
{
|
|
PyObject *fno = PyEval_CallObject(meth, NULL);
|
|
Py_DECREF(meth);
|
|
if (fno == NULL)
|
|
return -1;
|
|
|
|
if (PyInt_Check(fno)) {
|
|
fd = PyInt_AsLong(fno);
|
|
Py_DECREF(fno);
|
|
}
|
|
else if (PyLong_Check(fno)) {
|
|
fd = PyLong_AsLong(fno);
|
|
Py_DECREF(fno);
|
|
}
|
|
else {
|
|
PyErr_SetString(PyExc_TypeError,
|
|
"fileno() returned a non-integer");
|
|
Py_DECREF(fno);
|
|
return -1;
|
|
}
|
|
}
|
|
else {
|
|
PyErr_SetString(PyExc_TypeError,
|
|
"argument must be an int, or have a fileno() method.");
|
|
return -1;
|
|
}
|
|
|
|
if (fd < 0) {
|
|
PyErr_Format(PyExc_ValueError,
|
|
"file descriptor cannot be a negative integer (%i)",
|
|
fd);
|
|
return -1;
|
|
}
|
|
return fd;
|
|
}
|
|
|
|
/* From here on we need access to the real fgets and fread */
|
|
#undef fgets
|
|
#undef fread
|
|
|
|
/*
|
|
** Py_UniversalNewlineFgets is an fgets variation that understands
|
|
** all of \r, \n and \r\n conventions.
|
|
** The stream should be opened in binary mode.
|
|
** If fobj is NULL the routine always does newline conversion, and
|
|
** it may peek one char ahead to gobble the second char in \r\n.
|
|
** If fobj is non-NULL it must be a PyFileObject. In this case there
|
|
** is no readahead but in stead a flag is used to skip a following
|
|
** \n on the next read. Also, if the file is open in binary mode
|
|
** the whole conversion is skipped. Finally, the routine keeps track of
|
|
** the different types of newlines seen.
|
|
** Note that we need no error handling: fgets() treats error and eof
|
|
** identically.
|
|
*/
|
|
char *
|
|
Py_UniversalNewlineFgets(char *buf, int n, FILE *stream, PyObject *fobj)
|
|
{
|
|
char *p = buf;
|
|
int c;
|
|
int newlinetypes = 0;
|
|
int skipnextlf = 0;
|
|
int univ_newline = 1;
|
|
|
|
if (fobj) {
|
|
if (!PyFile_Check(fobj)) {
|
|
errno = ENXIO; /* What can you do... */
|
|
return NULL;
|
|
}
|
|
univ_newline = ((PyFileObject *)fobj)->f_univ_newline;
|
|
if ( !univ_newline )
|
|
return fgets(buf, n, stream);
|
|
newlinetypes = ((PyFileObject *)fobj)->f_newlinetypes;
|
|
skipnextlf = ((PyFileObject *)fobj)->f_skipnextlf;
|
|
}
|
|
FLOCKFILE(stream);
|
|
c = 'x'; /* Shut up gcc warning */
|
|
while (--n > 0 && (c = GETC(stream)) != EOF ) {
|
|
if (skipnextlf ) {
|
|
skipnextlf = 0;
|
|
if (c == '\n') {
|
|
/* Seeing a \n here with skipnextlf true
|
|
** means we saw a \r before.
|
|
*/
|
|
newlinetypes |= NEWLINE_CRLF;
|
|
c = GETC(stream);
|
|
if (c == EOF) break;
|
|
} else {
|
|
/*
|
|
** Note that c == EOF also brings us here,
|
|
** so we're okay if the last char in the file
|
|
** is a CR.
|
|
*/
|
|
newlinetypes |= NEWLINE_CR;
|
|
}
|
|
}
|
|
if (c == '\r') {
|
|
/* A \r is translated into a \n, and we skip
|
|
** an adjacent \n, if any. We don't set the
|
|
** newlinetypes flag until we've seen the next char.
|
|
*/
|
|
skipnextlf = 1;
|
|
c = '\n';
|
|
} else if ( c == '\n') {
|
|
newlinetypes |= NEWLINE_LF;
|
|
}
|
|
*p++ = c;
|
|
if (c == '\n') break;
|
|
}
|
|
if ( c == EOF && skipnextlf )
|
|
newlinetypes |= NEWLINE_CR;
|
|
FUNLOCKFILE(stream);
|
|
*p = '\0';
|
|
if (fobj) {
|
|
((PyFileObject *)fobj)->f_newlinetypes = newlinetypes;
|
|
((PyFileObject *)fobj)->f_skipnextlf = skipnextlf;
|
|
} else if ( skipnextlf ) {
|
|
/* If we have no file object we cannot save the
|
|
** skipnextlf flag. We have to readahead, which
|
|
** will cause a pause if we're reading from an
|
|
** interactive stream, but that is very unlikely
|
|
** unless we're doing something silly like
|
|
** execfile("/dev/tty").
|
|
*/
|
|
c = GETC(stream);
|
|
if ( c != '\n' )
|
|
ungetc(c, stream);
|
|
}
|
|
if (p == buf)
|
|
return NULL;
|
|
return buf;
|
|
}
|
|
|
|
/*
|
|
** Py_UniversalNewlineFread is an fread variation that understands
|
|
** all of \r, \n and \r\n conventions.
|
|
** The stream should be opened in binary mode.
|
|
** fobj must be a PyFileObject. In this case there
|
|
** is no readahead but in stead a flag is used to skip a following
|
|
** \n on the next read. Also, if the file is open in binary mode
|
|
** the whole conversion is skipped. Finally, the routine keeps track of
|
|
** the different types of newlines seen.
|
|
*/
|
|
size_t
|
|
Py_UniversalNewlineFread(char *buf, size_t n,
|
|
FILE *stream, PyObject *fobj)
|
|
{
|
|
char *dst = buf;
|
|
PyFileObject *f = (PyFileObject *)fobj;
|
|
int newlinetypes, skipnextlf;
|
|
|
|
assert(buf != NULL);
|
|
assert(stream != NULL);
|
|
|
|
if (!fobj || !PyFile_Check(fobj)) {
|
|
errno = ENXIO; /* What can you do... */
|
|
return 0;
|
|
}
|
|
if (!f->f_univ_newline)
|
|
return fread(buf, 1, n, stream);
|
|
newlinetypes = f->f_newlinetypes;
|
|
skipnextlf = f->f_skipnextlf;
|
|
/* Invariant: n is the number of bytes remaining to be filled
|
|
* in the buffer.
|
|
*/
|
|
while (n) {
|
|
size_t nread;
|
|
int shortread;
|
|
char *src = dst;
|
|
|
|
nread = fread(dst, 1, n, stream);
|
|
assert(nread <= n);
|
|
if (nread == 0)
|
|
break;
|
|
|
|
n -= nread; /* assuming 1 byte out for each in; will adjust */
|
|
shortread = n != 0; /* true iff EOF or error */
|
|
while (nread--) {
|
|
char c = *src++;
|
|
if (c == '\r') {
|
|
/* Save as LF and set flag to skip next LF. */
|
|
*dst++ = '\n';
|
|
skipnextlf = 1;
|
|
}
|
|
else if (skipnextlf && c == '\n') {
|
|
/* Skip LF, and remember we saw CR LF. */
|
|
skipnextlf = 0;
|
|
newlinetypes |= NEWLINE_CRLF;
|
|
++n;
|
|
}
|
|
else {
|
|
/* Normal char to be stored in buffer. Also
|
|
* update the newlinetypes flag if either this
|
|
* is an LF or the previous char was a CR.
|
|
*/
|
|
if (c == '\n')
|
|
newlinetypes |= NEWLINE_LF;
|
|
else if (skipnextlf)
|
|
newlinetypes |= NEWLINE_CR;
|
|
*dst++ = c;
|
|
skipnextlf = 0;
|
|
}
|
|
}
|
|
if (shortread) {
|
|
/* If this is EOF, update type flags. */
|
|
if (skipnextlf && feof(stream))
|
|
newlinetypes |= NEWLINE_CR;
|
|
break;
|
|
}
|
|
}
|
|
f->f_newlinetypes = newlinetypes;
|
|
f->f_skipnextlf = skipnextlf;
|
|
return dst - buf;
|
|
}
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|