mirror of
https://github.com/python/cpython.git
synced 2025-08-04 00:48:58 +00:00
Issue #9205: concurrent.futures.ProcessPoolExecutor now detects killed
children and raises BrokenProcessPool in such a situation. Previously it would reliably freeze/deadlock.
This commit is contained in:
parent
4a5e5de03f
commit
dd69649660
8 changed files with 587 additions and 107 deletions
|
@ -169,6 +169,12 @@ to a :class:`ProcessPoolExecutor` will result in deadlock.
|
||||||
of at most *max_workers* processes. If *max_workers* is ``None`` or not
|
of at most *max_workers* processes. If *max_workers* is ``None`` or not
|
||||||
given, it will default to the number of processors on the machine.
|
given, it will default to the number of processors on the machine.
|
||||||
|
|
||||||
|
.. versionchanged:: 3.3
|
||||||
|
When one of the worker processes terminates abruptly, a
|
||||||
|
:exc:`BrokenProcessPool` error is now raised. Previously, behaviour
|
||||||
|
was undefined but operations on the executor or its futures would often
|
||||||
|
freeze or deadlock.
|
||||||
|
|
||||||
|
|
||||||
.. _processpoolexecutor-example:
|
.. _processpoolexecutor-example:
|
||||||
|
|
||||||
|
@ -369,3 +375,16 @@ Module Functions
|
||||||
:pep:`3148` -- futures - execute computations asynchronously
|
:pep:`3148` -- futures - execute computations asynchronously
|
||||||
The proposal which described this feature for inclusion in the Python
|
The proposal which described this feature for inclusion in the Python
|
||||||
standard library.
|
standard library.
|
||||||
|
|
||||||
|
|
||||||
|
Exception classes
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
.. exception:: BrokenProcessPool
|
||||||
|
|
||||||
|
Derived from :exc:`RuntimeError`, this exception class is raised when
|
||||||
|
one of the workers of a :class:`ProcessPoolExecutor` has terminated
|
||||||
|
in a non-clean fashion (for example, if it was killed from the outside).
|
||||||
|
|
||||||
|
.. versionadded:: 3.3
|
||||||
|
|
||||||
|
|
|
@ -46,10 +46,11 @@ Process #1..n:
|
||||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||||
|
|
||||||
import atexit
|
import atexit
|
||||||
|
import os
|
||||||
from concurrent.futures import _base
|
from concurrent.futures import _base
|
||||||
import queue
|
import queue
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
from multiprocessing.queues import SimpleQueue
|
from multiprocessing.queues import SimpleQueue, SentinelReady
|
||||||
import threading
|
import threading
|
||||||
import weakref
|
import weakref
|
||||||
|
|
||||||
|
@ -122,7 +123,7 @@ def _process_worker(call_queue, result_queue):
|
||||||
call_item = call_queue.get(block=True)
|
call_item = call_queue.get(block=True)
|
||||||
if call_item is None:
|
if call_item is None:
|
||||||
# Wake up queue management thread
|
# Wake up queue management thread
|
||||||
result_queue.put(None)
|
result_queue.put(os.getpid())
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
r = call_item.fn(*call_item.args, **call_item.kwargs)
|
r = call_item.fn(*call_item.args, **call_item.kwargs)
|
||||||
|
@ -194,29 +195,63 @@ def _queue_management_worker(executor_reference,
|
||||||
result_queue: A multiprocessing.Queue of _ResultItems generated by the
|
result_queue: A multiprocessing.Queue of _ResultItems generated by the
|
||||||
process workers.
|
process workers.
|
||||||
"""
|
"""
|
||||||
nb_shutdown_processes = 0
|
|
||||||
def shutdown_one_process():
|
def shutdown_worker():
|
||||||
"""Tell a worker to terminate, which will in turn wake us again"""
|
# This is an upper bound
|
||||||
nonlocal nb_shutdown_processes
|
nb_children_alive = sum(p.is_alive() for p in processes.values())
|
||||||
call_queue.put(None)
|
for i in range(0, nb_children_alive):
|
||||||
nb_shutdown_processes += 1
|
call_queue.put(None)
|
||||||
|
# If .join() is not called on the created processes then
|
||||||
|
# some multiprocessing.Queue methods may deadlock on Mac OS
|
||||||
|
# X.
|
||||||
|
for p in processes.values():
|
||||||
|
p.join()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
_add_call_item_to_queue(pending_work_items,
|
_add_call_item_to_queue(pending_work_items,
|
||||||
work_ids_queue,
|
work_ids_queue,
|
||||||
call_queue)
|
call_queue)
|
||||||
|
|
||||||
result_item = result_queue.get()
|
sentinels = [p.sentinel for p in processes.values()]
|
||||||
if result_item is not None:
|
assert sentinels
|
||||||
work_item = pending_work_items[result_item.work_id]
|
try:
|
||||||
del pending_work_items[result_item.work_id]
|
result_item = result_queue.get(sentinels=sentinels)
|
||||||
|
except SentinelReady as e:
|
||||||
if result_item.exception:
|
# Mark the process pool broken so that submits fail right now.
|
||||||
work_item.future.set_exception(result_item.exception)
|
executor = executor_reference()
|
||||||
else:
|
if executor is not None:
|
||||||
work_item.future.set_result(result_item.result)
|
executor._broken = True
|
||||||
continue
|
executor._shutdown_thread = True
|
||||||
# If we come here, we either got a timeout or were explicitly woken up.
|
del executor
|
||||||
# In either case, check whether we should start shutting down.
|
# All futures in flight must be marked failed
|
||||||
|
for work_id, work_item in pending_work_items.items():
|
||||||
|
work_item.future.set_exception(
|
||||||
|
BrokenProcessPool(
|
||||||
|
"A process in the process pool was "
|
||||||
|
"terminated abruptly while the future was "
|
||||||
|
"running or pending."
|
||||||
|
))
|
||||||
|
pending_work_items.clear()
|
||||||
|
# Terminate remaining workers forcibly: the queues or their
|
||||||
|
# locks may be in a dirty state and block forever.
|
||||||
|
for p in processes.values():
|
||||||
|
p.terminate()
|
||||||
|
for p in processes.values():
|
||||||
|
p.join()
|
||||||
|
return
|
||||||
|
if isinstance(result_item, int):
|
||||||
|
# Clean shutdown of a worker using its PID
|
||||||
|
# (avoids marking the executor broken)
|
||||||
|
del processes[result_item]
|
||||||
|
elif result_item is not None:
|
||||||
|
work_item = pending_work_items.pop(result_item.work_id, None)
|
||||||
|
# work_item can be None if another process terminated (see above)
|
||||||
|
if work_item is not None:
|
||||||
|
if result_item.exception:
|
||||||
|
work_item.future.set_exception(result_item.exception)
|
||||||
|
else:
|
||||||
|
work_item.future.set_result(result_item.result)
|
||||||
|
# Check whether we should start shutting down.
|
||||||
executor = executor_reference()
|
executor = executor_reference()
|
||||||
# No more work items can be added if:
|
# No more work items can be added if:
|
||||||
# - The interpreter is shutting down OR
|
# - The interpreter is shutting down OR
|
||||||
|
@ -226,17 +261,11 @@ def _queue_management_worker(executor_reference,
|
||||||
# Since no new work items can be added, it is safe to shutdown
|
# Since no new work items can be added, it is safe to shutdown
|
||||||
# this thread if there are no pending work items.
|
# this thread if there are no pending work items.
|
||||||
if not pending_work_items:
|
if not pending_work_items:
|
||||||
while nb_shutdown_processes < len(processes):
|
shutdown_worker()
|
||||||
shutdown_one_process()
|
|
||||||
# If .join() is not called on the created processes then
|
|
||||||
# some multiprocessing.Queue methods may deadlock on Mac OS
|
|
||||||
# X.
|
|
||||||
for p in processes:
|
|
||||||
p.join()
|
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# Start shutting down by telling a process it can exit.
|
# Start shutting down by telling a process it can exit.
|
||||||
shutdown_one_process()
|
call_queue.put(None)
|
||||||
del executor
|
del executor
|
||||||
|
|
||||||
_system_limits_checked = False
|
_system_limits_checked = False
|
||||||
|
@ -264,6 +293,14 @@ def _check_system_limits():
|
||||||
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
|
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
|
||||||
raise NotImplementedError(_system_limited)
|
raise NotImplementedError(_system_limited)
|
||||||
|
|
||||||
|
|
||||||
|
class BrokenProcessPool(RuntimeError):
|
||||||
|
"""
|
||||||
|
Raised when a process in a ProcessPoolExecutor terminated abruptly
|
||||||
|
while a future was in the running state.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
class ProcessPoolExecutor(_base.Executor):
|
class ProcessPoolExecutor(_base.Executor):
|
||||||
def __init__(self, max_workers=None):
|
def __init__(self, max_workers=None):
|
||||||
"""Initializes a new ProcessPoolExecutor instance.
|
"""Initializes a new ProcessPoolExecutor instance.
|
||||||
|
@ -288,11 +325,13 @@ class ProcessPoolExecutor(_base.Executor):
|
||||||
self._result_queue = SimpleQueue()
|
self._result_queue = SimpleQueue()
|
||||||
self._work_ids = queue.Queue()
|
self._work_ids = queue.Queue()
|
||||||
self._queue_management_thread = None
|
self._queue_management_thread = None
|
||||||
self._processes = set()
|
# Map of pids to processes
|
||||||
|
self._processes = {}
|
||||||
|
|
||||||
# Shutdown is a two-step process.
|
# Shutdown is a two-step process.
|
||||||
self._shutdown_thread = False
|
self._shutdown_thread = False
|
||||||
self._shutdown_lock = threading.Lock()
|
self._shutdown_lock = threading.Lock()
|
||||||
|
self._broken = False
|
||||||
self._queue_count = 0
|
self._queue_count = 0
|
||||||
self._pending_work_items = {}
|
self._pending_work_items = {}
|
||||||
|
|
||||||
|
@ -302,6 +341,8 @@ class ProcessPoolExecutor(_base.Executor):
|
||||||
def weakref_cb(_, q=self._result_queue):
|
def weakref_cb(_, q=self._result_queue):
|
||||||
q.put(None)
|
q.put(None)
|
||||||
if self._queue_management_thread is None:
|
if self._queue_management_thread is None:
|
||||||
|
# Start the processes so that their sentinels are known.
|
||||||
|
self._adjust_process_count()
|
||||||
self._queue_management_thread = threading.Thread(
|
self._queue_management_thread = threading.Thread(
|
||||||
target=_queue_management_worker,
|
target=_queue_management_worker,
|
||||||
args=(weakref.ref(self, weakref_cb),
|
args=(weakref.ref(self, weakref_cb),
|
||||||
|
@ -321,10 +362,13 @@ class ProcessPoolExecutor(_base.Executor):
|
||||||
args=(self._call_queue,
|
args=(self._call_queue,
|
||||||
self._result_queue))
|
self._result_queue))
|
||||||
p.start()
|
p.start()
|
||||||
self._processes.add(p)
|
self._processes[p.pid] = p
|
||||||
|
|
||||||
def submit(self, fn, *args, **kwargs):
|
def submit(self, fn, *args, **kwargs):
|
||||||
with self._shutdown_lock:
|
with self._shutdown_lock:
|
||||||
|
if self._broken:
|
||||||
|
raise BrokenProcessPool('A child process terminated '
|
||||||
|
'abruptly, the process pool is not usable anymore')
|
||||||
if self._shutdown_thread:
|
if self._shutdown_thread:
|
||||||
raise RuntimeError('cannot schedule new futures after shutdown')
|
raise RuntimeError('cannot schedule new futures after shutdown')
|
||||||
|
|
||||||
|
@ -338,7 +382,6 @@ class ProcessPoolExecutor(_base.Executor):
|
||||||
self._result_queue.put(None)
|
self._result_queue.put(None)
|
||||||
|
|
||||||
self._start_queue_management_thread()
|
self._start_queue_management_thread()
|
||||||
self._adjust_process_count()
|
|
||||||
return f
|
return f
|
||||||
submit.__doc__ = _base.Executor.submit.__doc__
|
submit.__doc__ = _base.Executor.submit.__doc__
|
||||||
|
|
||||||
|
|
|
@ -48,14 +48,18 @@ import itertools
|
||||||
|
|
||||||
import _multiprocessing
|
import _multiprocessing
|
||||||
from multiprocessing import current_process, AuthenticationError, BufferTooShort
|
from multiprocessing import current_process, AuthenticationError, BufferTooShort
|
||||||
from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug
|
from multiprocessing.util import (
|
||||||
|
get_temp_dir, Finalize, sub_debug, debug, _eintr_retry)
|
||||||
try:
|
try:
|
||||||
from _multiprocessing import win32
|
from _multiprocessing import win32
|
||||||
|
from _subprocess import WAIT_OBJECT_0, WAIT_TIMEOUT, INFINITE
|
||||||
except ImportError:
|
except ImportError:
|
||||||
if sys.platform == 'win32':
|
if sys.platform == 'win32':
|
||||||
raise
|
raise
|
||||||
win32 = None
|
win32 = None
|
||||||
|
|
||||||
|
_select = _eintr_retry(select.select)
|
||||||
|
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
@ -118,6 +122,15 @@ def address_type(address):
|
||||||
else:
|
else:
|
||||||
raise ValueError('address type of %r unrecognized' % address)
|
raise ValueError('address type of %r unrecognized' % address)
|
||||||
|
|
||||||
|
|
||||||
|
class SentinelReady(Exception):
|
||||||
|
"""
|
||||||
|
Raised when a sentinel is ready when polling.
|
||||||
|
"""
|
||||||
|
def __init__(self, *args):
|
||||||
|
Exception.__init__(self, *args)
|
||||||
|
self.sentinels = args[0]
|
||||||
|
|
||||||
#
|
#
|
||||||
# Connection classes
|
# Connection classes
|
||||||
#
|
#
|
||||||
|
@ -253,19 +266,17 @@ class _ConnectionBase:
|
||||||
(offset + size) // itemsize])
|
(offset + size) // itemsize])
|
||||||
return size
|
return size
|
||||||
|
|
||||||
def recv(self):
|
def recv(self, sentinels=None):
|
||||||
"""Receive a (picklable) object"""
|
"""Receive a (picklable) object"""
|
||||||
self._check_closed()
|
self._check_closed()
|
||||||
self._check_readable()
|
self._check_readable()
|
||||||
buf = self._recv_bytes()
|
buf = self._recv_bytes(sentinels=sentinels)
|
||||||
return pickle.loads(buf.getbuffer())
|
return pickle.loads(buf.getbuffer())
|
||||||
|
|
||||||
def poll(self, timeout=0.0):
|
def poll(self, timeout=0.0):
|
||||||
"""Whether there is any input available to be read"""
|
"""Whether there is any input available to be read"""
|
||||||
self._check_closed()
|
self._check_closed()
|
||||||
self._check_readable()
|
self._check_readable()
|
||||||
if timeout < 0.0:
|
|
||||||
timeout = None
|
|
||||||
return self._poll(timeout)
|
return self._poll(timeout)
|
||||||
|
|
||||||
|
|
||||||
|
@ -274,61 +285,88 @@ if win32:
|
||||||
class PipeConnection(_ConnectionBase):
|
class PipeConnection(_ConnectionBase):
|
||||||
"""
|
"""
|
||||||
Connection class based on a Windows named pipe.
|
Connection class based on a Windows named pipe.
|
||||||
|
Overlapped I/O is used, so the handles must have been created
|
||||||
|
with FILE_FLAG_OVERLAPPED.
|
||||||
"""
|
"""
|
||||||
|
_buffered = b''
|
||||||
|
|
||||||
def _close(self):
|
def _close(self):
|
||||||
win32.CloseHandle(self._handle)
|
win32.CloseHandle(self._handle)
|
||||||
|
|
||||||
def _send_bytes(self, buf):
|
def _send_bytes(self, buf):
|
||||||
nwritten = win32.WriteFile(self._handle, buf)
|
overlapped = win32.WriteFile(self._handle, buf, overlapped=True)
|
||||||
|
nwritten, complete = overlapped.GetOverlappedResult(True)
|
||||||
|
assert complete
|
||||||
assert nwritten == len(buf)
|
assert nwritten == len(buf)
|
||||||
|
|
||||||
def _recv_bytes(self, maxsize=None):
|
def _recv_bytes(self, maxsize=None, sentinels=()):
|
||||||
|
if sentinels:
|
||||||
|
self._poll(-1.0, sentinels)
|
||||||
buf = io.BytesIO()
|
buf = io.BytesIO()
|
||||||
bufsize = 512
|
firstchunk = self._buffered
|
||||||
if maxsize is not None:
|
if firstchunk:
|
||||||
bufsize = min(bufsize, maxsize)
|
lenfirstchunk = len(firstchunk)
|
||||||
try:
|
buf.write(firstchunk)
|
||||||
firstchunk, complete = win32.ReadFile(self._handle, bufsize)
|
self._buffered = b''
|
||||||
except IOError as e:
|
else:
|
||||||
if e.errno == win32.ERROR_BROKEN_PIPE:
|
# A reasonable size for the first chunk transfer
|
||||||
raise EOFError
|
bufsize = 128
|
||||||
raise
|
if maxsize is not None and maxsize < bufsize:
|
||||||
lenfirstchunk = len(firstchunk)
|
bufsize = maxsize
|
||||||
buf.write(firstchunk)
|
try:
|
||||||
if complete:
|
overlapped = win32.ReadFile(self._handle, bufsize, overlapped=True)
|
||||||
return buf
|
lenfirstchunk, complete = overlapped.GetOverlappedResult(True)
|
||||||
|
firstchunk = overlapped.getbuffer()
|
||||||
|
assert lenfirstchunk == len(firstchunk)
|
||||||
|
except IOError as e:
|
||||||
|
if e.errno == win32.ERROR_BROKEN_PIPE:
|
||||||
|
raise EOFError
|
||||||
|
raise
|
||||||
|
buf.write(firstchunk)
|
||||||
|
if complete:
|
||||||
|
return buf
|
||||||
navail, nleft = win32.PeekNamedPipe(self._handle)
|
navail, nleft = win32.PeekNamedPipe(self._handle)
|
||||||
if maxsize is not None and lenfirstchunk + nleft > maxsize:
|
if maxsize is not None and lenfirstchunk + nleft > maxsize:
|
||||||
return None
|
return None
|
||||||
lastchunk, complete = win32.ReadFile(self._handle, nleft)
|
if nleft > 0:
|
||||||
assert complete
|
overlapped = win32.ReadFile(self._handle, nleft, overlapped=True)
|
||||||
buf.write(lastchunk)
|
res, complete = overlapped.GetOverlappedResult(True)
|
||||||
|
assert res == nleft
|
||||||
|
assert complete
|
||||||
|
buf.write(overlapped.getbuffer())
|
||||||
return buf
|
return buf
|
||||||
|
|
||||||
def _poll(self, timeout):
|
def _poll(self, timeout, sentinels=()):
|
||||||
|
# Fast non-blocking path
|
||||||
navail, nleft = win32.PeekNamedPipe(self._handle)
|
navail, nleft = win32.PeekNamedPipe(self._handle)
|
||||||
if navail > 0:
|
if navail > 0:
|
||||||
return True
|
return True
|
||||||
elif timeout == 0.0:
|
elif timeout == 0.0:
|
||||||
return False
|
return False
|
||||||
# Setup a polling loop (translated straight from old
|
# Blocking: use overlapped I/O
|
||||||
# pipe_connection.c)
|
|
||||||
if timeout < 0.0:
|
if timeout < 0.0:
|
||||||
deadline = None
|
timeout = INFINITE
|
||||||
else:
|
else:
|
||||||
deadline = time.time() + timeout
|
timeout = int(timeout * 1000 + 0.5)
|
||||||
delay = 0.001
|
overlapped = win32.ReadFile(self._handle, 1, overlapped=True)
|
||||||
max_delay = 0.02
|
try:
|
||||||
while True:
|
handles = [overlapped.event]
|
||||||
time.sleep(delay)
|
handles += sentinels
|
||||||
navail, nleft = win32.PeekNamedPipe(self._handle)
|
res = win32.WaitForMultipleObjects(handles, False, timeout)
|
||||||
if navail > 0:
|
finally:
|
||||||
return True
|
# Always cancel overlapped I/O in the same thread
|
||||||
if deadline and time.time() > deadline:
|
# (because CancelIoEx() appears only in Vista)
|
||||||
return False
|
overlapped.cancel()
|
||||||
if delay < max_delay:
|
if res == WAIT_TIMEOUT:
|
||||||
delay += 0.001
|
return False
|
||||||
|
idx = res - WAIT_OBJECT_0
|
||||||
|
if idx == 0:
|
||||||
|
# I/O was successful, store received data
|
||||||
|
overlapped.GetOverlappedResult(True)
|
||||||
|
self._buffered += overlapped.getbuffer()
|
||||||
|
return True
|
||||||
|
assert 0 < idx < len(handles)
|
||||||
|
raise SentinelReady([handles[idx]])
|
||||||
|
|
||||||
|
|
||||||
class Connection(_ConnectionBase):
|
class Connection(_ConnectionBase):
|
||||||
|
@ -357,11 +395,18 @@ class Connection(_ConnectionBase):
|
||||||
break
|
break
|
||||||
buf = buf[n:]
|
buf = buf[n:]
|
||||||
|
|
||||||
def _recv(self, size, read=_read):
|
def _recv(self, size, sentinels=(), read=_read):
|
||||||
buf = io.BytesIO()
|
buf = io.BytesIO()
|
||||||
|
handle = self._handle
|
||||||
|
if sentinels:
|
||||||
|
handles = [handle] + sentinels
|
||||||
remaining = size
|
remaining = size
|
||||||
while remaining > 0:
|
while remaining > 0:
|
||||||
chunk = read(self._handle, remaining)
|
if sentinels:
|
||||||
|
r = _select(handles, [], [])[0]
|
||||||
|
if handle not in r:
|
||||||
|
raise SentinelReady(r)
|
||||||
|
chunk = read(handle, remaining)
|
||||||
n = len(chunk)
|
n = len(chunk)
|
||||||
if n == 0:
|
if n == 0:
|
||||||
if remaining == size:
|
if remaining == size:
|
||||||
|
@ -381,15 +426,17 @@ class Connection(_ConnectionBase):
|
||||||
if n > 0:
|
if n > 0:
|
||||||
self._send(buf)
|
self._send(buf)
|
||||||
|
|
||||||
def _recv_bytes(self, maxsize=None):
|
def _recv_bytes(self, maxsize=None, sentinels=()):
|
||||||
buf = self._recv(4)
|
buf = self._recv(4, sentinels)
|
||||||
size, = struct.unpack("=i", buf.getvalue())
|
size, = struct.unpack("=i", buf.getvalue())
|
||||||
if maxsize is not None and size > maxsize:
|
if maxsize is not None and size > maxsize:
|
||||||
return None
|
return None
|
||||||
return self._recv(size)
|
return self._recv(size, sentinels)
|
||||||
|
|
||||||
def _poll(self, timeout):
|
def _poll(self, timeout):
|
||||||
r = select.select([self._handle], [], [], timeout)[0]
|
if timeout < 0.0:
|
||||||
|
timeout = None
|
||||||
|
r = _select([self._handle], [], [], timeout)[0]
|
||||||
return bool(r)
|
return bool(r)
|
||||||
|
|
||||||
|
|
||||||
|
@ -495,23 +542,21 @@ else:
|
||||||
obsize, ibsize = 0, BUFSIZE
|
obsize, ibsize = 0, BUFSIZE
|
||||||
|
|
||||||
h1 = win32.CreateNamedPipe(
|
h1 = win32.CreateNamedPipe(
|
||||||
address, openmode,
|
address, openmode | win32.FILE_FLAG_OVERLAPPED,
|
||||||
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
||||||
win32.PIPE_WAIT,
|
win32.PIPE_WAIT,
|
||||||
1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
||||||
)
|
)
|
||||||
h2 = win32.CreateFile(
|
h2 = win32.CreateFile(
|
||||||
address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
|
address, access, 0, win32.NULL, win32.OPEN_EXISTING,
|
||||||
|
win32.FILE_FLAG_OVERLAPPED, win32.NULL
|
||||||
)
|
)
|
||||||
win32.SetNamedPipeHandleState(
|
win32.SetNamedPipeHandleState(
|
||||||
h2, win32.PIPE_READMODE_MESSAGE, None, None
|
h2, win32.PIPE_READMODE_MESSAGE, None, None
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
overlapped = win32.ConnectNamedPipe(h1, overlapped=True)
|
||||||
win32.ConnectNamedPipe(h1, win32.NULL)
|
overlapped.GetOverlappedResult(True)
|
||||||
except WindowsError as e:
|
|
||||||
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
|
|
||||||
raise
|
|
||||||
|
|
||||||
c1 = PipeConnection(h1, writable=duplex)
|
c1 = PipeConnection(h1, writable=duplex)
|
||||||
c2 = PipeConnection(h2, readable=duplex)
|
c2 = PipeConnection(h2, readable=duplex)
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import signal
|
import signal
|
||||||
|
import select
|
||||||
|
|
||||||
from multiprocessing import util, process
|
from multiprocessing import util, process
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ import weakref
|
||||||
|
|
||||||
from queue import Empty, Full
|
from queue import Empty, Full
|
||||||
import _multiprocessing
|
import _multiprocessing
|
||||||
from multiprocessing import Pipe
|
from multiprocessing.connection import Pipe, SentinelReady
|
||||||
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
|
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
|
||||||
from multiprocessing.util import debug, info, Finalize, register_after_fork
|
from multiprocessing.util import debug, info, Finalize, register_after_fork
|
||||||
from multiprocessing.forking import assert_spawning
|
from multiprocessing.forking import assert_spawning
|
||||||
|
@ -372,10 +372,10 @@ class SimpleQueue(object):
|
||||||
def _make_methods(self):
|
def _make_methods(self):
|
||||||
recv = self._reader.recv
|
recv = self._reader.recv
|
||||||
racquire, rrelease = self._rlock.acquire, self._rlock.release
|
racquire, rrelease = self._rlock.acquire, self._rlock.release
|
||||||
def get():
|
def get(*, sentinels=None):
|
||||||
racquire()
|
racquire()
|
||||||
try:
|
try:
|
||||||
return recv()
|
return recv(sentinels)
|
||||||
finally:
|
finally:
|
||||||
rrelease()
|
rrelease()
|
||||||
self.get = get
|
self.get = get
|
||||||
|
|
|
@ -19,7 +19,7 @@ import unittest
|
||||||
from concurrent import futures
|
from concurrent import futures
|
||||||
from concurrent.futures._base import (
|
from concurrent.futures._base import (
|
||||||
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
|
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
|
||||||
import concurrent.futures.process
|
from concurrent.futures.process import BrokenProcessPool
|
||||||
|
|
||||||
|
|
||||||
def create_future(state=PENDING, exception=None, result=None):
|
def create_future(state=PENDING, exception=None, result=None):
|
||||||
|
@ -154,7 +154,7 @@ class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
|
||||||
processes = self.executor._processes
|
processes = self.executor._processes
|
||||||
self.executor.shutdown()
|
self.executor.shutdown()
|
||||||
|
|
||||||
for p in processes:
|
for p in processes.values():
|
||||||
p.join()
|
p.join()
|
||||||
|
|
||||||
def test_context_manager_shutdown(self):
|
def test_context_manager_shutdown(self):
|
||||||
|
@ -163,7 +163,7 @@ class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
|
||||||
self.assertEqual(list(e.map(abs, range(-5, 5))),
|
self.assertEqual(list(e.map(abs, range(-5, 5))),
|
||||||
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
|
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
|
||||||
|
|
||||||
for p in processes:
|
for p in processes.values():
|
||||||
p.join()
|
p.join()
|
||||||
|
|
||||||
def test_del_shutdown(self):
|
def test_del_shutdown(self):
|
||||||
|
@ -174,7 +174,7 @@ class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
|
||||||
del executor
|
del executor
|
||||||
|
|
||||||
queue_management_thread.join()
|
queue_management_thread.join()
|
||||||
for p in processes:
|
for p in processes.values():
|
||||||
p.join()
|
p.join()
|
||||||
|
|
||||||
class WaitTests(unittest.TestCase):
|
class WaitTests(unittest.TestCase):
|
||||||
|
@ -381,7 +381,17 @@ class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
|
||||||
|
|
||||||
|
|
||||||
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
|
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
|
||||||
pass
|
def test_killed_child(self):
|
||||||
|
# When a child process is abruptly terminated, the whole pool gets
|
||||||
|
# "broken".
|
||||||
|
futures = [self.executor.submit(time.sleep, 3)]
|
||||||
|
# Get one of the processes, and terminate (kill) it
|
||||||
|
p = next(iter(self.executor._processes.values()))
|
||||||
|
p.terminate()
|
||||||
|
for fut in futures:
|
||||||
|
self.assertRaises(BrokenProcessPool, fut.result)
|
||||||
|
# Submitting other jobs fails as well.
|
||||||
|
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
|
||||||
|
|
||||||
|
|
||||||
class FutureTests(unittest.TestCase):
|
class FutureTests(unittest.TestCase):
|
||||||
|
|
|
@ -187,6 +187,10 @@ Core and Builtins
|
||||||
Library
|
Library
|
||||||
-------
|
-------
|
||||||
|
|
||||||
|
- Issue #9205: concurrent.futures.ProcessPoolExecutor now detects killed
|
||||||
|
children and raises BrokenProcessPool in such a situation. Previously it
|
||||||
|
would reliably freeze/deadlock.
|
||||||
|
|
||||||
- Issue #12040: Expose a new attribute ``sentinel`` on instances of
|
- Issue #12040: Expose a new attribute ``sentinel`` on instances of
|
||||||
:class:`multiprocessing.Process`. Also, fix Process.join() to not use
|
:class:`multiprocessing.Process`. Also, fix Process.join() to not use
|
||||||
polling anymore, when given a timeout.
|
polling anymore, when given a timeout.
|
||||||
|
|
|
@ -12,10 +12,223 @@
|
||||||
#define WIN32_FUNCTION(func) \
|
#define WIN32_FUNCTION(func) \
|
||||||
{#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}
|
{#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}
|
||||||
|
|
||||||
|
#define WIN32_KWARGS_FUNCTION(func) \
|
||||||
|
{#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_KEYWORDS | METH_STATIC, ""}
|
||||||
|
|
||||||
#define WIN32_CONSTANT(fmt, con) \
|
#define WIN32_CONSTANT(fmt, con) \
|
||||||
PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))
|
PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))
|
||||||
|
|
||||||
|
|
||||||
|
/* Grab CancelIoEx dynamically from kernel32 */
|
||||||
|
static int has_CancelIoEx = -1;
|
||||||
|
static BOOL (CALLBACK *Py_CancelIoEx)(HANDLE, LPOVERLAPPED);
|
||||||
|
|
||||||
|
static int
|
||||||
|
check_CancelIoEx()
|
||||||
|
{
|
||||||
|
if (has_CancelIoEx == -1)
|
||||||
|
{
|
||||||
|
HINSTANCE hKernel32 = GetModuleHandle("KERNEL32");
|
||||||
|
* (FARPROC *) &Py_CancelIoEx = GetProcAddress(hKernel32,
|
||||||
|
"CancelIoEx");
|
||||||
|
has_CancelIoEx = (Py_CancelIoEx != NULL);
|
||||||
|
}
|
||||||
|
return has_CancelIoEx;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A Python object wrapping an OVERLAPPED structure and other useful data
|
||||||
|
* for overlapped I/O
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
OVERLAPPED overlapped;
|
||||||
|
/* For convenience, we store the file handle too */
|
||||||
|
HANDLE handle;
|
||||||
|
/* Whether there's I/O in flight */
|
||||||
|
int pending;
|
||||||
|
/* Whether I/O completed successfully */
|
||||||
|
int completed;
|
||||||
|
/* Buffer used for reading (optional) */
|
||||||
|
PyObject *read_buffer;
|
||||||
|
/* Buffer used for writing (optional) */
|
||||||
|
Py_buffer write_buffer;
|
||||||
|
} OverlappedObject;
|
||||||
|
|
||||||
|
static void
|
||||||
|
overlapped_dealloc(OverlappedObject *self)
|
||||||
|
{
|
||||||
|
int err = GetLastError();
|
||||||
|
if (self->pending) {
|
||||||
|
if (check_CancelIoEx())
|
||||||
|
Py_CancelIoEx(self->handle, &self->overlapped);
|
||||||
|
else {
|
||||||
|
PyErr_SetString(PyExc_RuntimeError,
|
||||||
|
"I/O operations still in flight while destroying "
|
||||||
|
"Overlapped object, the process may crash");
|
||||||
|
PyErr_WriteUnraisable(NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CloseHandle(self->overlapped.hEvent);
|
||||||
|
SetLastError(err);
|
||||||
|
if (self->write_buffer.obj)
|
||||||
|
PyBuffer_Release(&self->write_buffer);
|
||||||
|
Py_CLEAR(self->read_buffer);
|
||||||
|
PyObject_Del(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
static PyObject *
|
||||||
|
overlapped_GetOverlappedResult(OverlappedObject *self, PyObject *waitobj)
|
||||||
|
{
|
||||||
|
int wait;
|
||||||
|
BOOL res;
|
||||||
|
DWORD transferred = 0;
|
||||||
|
|
||||||
|
wait = PyObject_IsTrue(waitobj);
|
||||||
|
if (wait < 0)
|
||||||
|
return NULL;
|
||||||
|
Py_BEGIN_ALLOW_THREADS
|
||||||
|
res = GetOverlappedResult(self->handle, &self->overlapped, &transferred,
|
||||||
|
wait != 0);
|
||||||
|
Py_END_ALLOW_THREADS
|
||||||
|
|
||||||
|
if (!res) {
|
||||||
|
int err = GetLastError();
|
||||||
|
if (err == ERROR_IO_INCOMPLETE)
|
||||||
|
Py_RETURN_NONE;
|
||||||
|
if (err != ERROR_MORE_DATA) {
|
||||||
|
self->pending = 0;
|
||||||
|
return PyErr_SetExcFromWindowsErr(PyExc_IOError, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self->pending = 0;
|
||||||
|
self->completed = 1;
|
||||||
|
if (self->read_buffer) {
|
||||||
|
assert(PyBytes_CheckExact(self->read_buffer));
|
||||||
|
if (_PyBytes_Resize(&self->read_buffer, transferred))
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return Py_BuildValue("lN", (long) transferred, PyBool_FromLong(res));
|
||||||
|
}
|
||||||
|
|
||||||
|
static PyObject *
|
||||||
|
overlapped_getbuffer(OverlappedObject *self)
|
||||||
|
{
|
||||||
|
PyObject *res;
|
||||||
|
if (!self->completed) {
|
||||||
|
PyErr_SetString(PyExc_ValueError,
|
||||||
|
"can't get read buffer before GetOverlappedResult() "
|
||||||
|
"signals the operation completed");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
res = self->read_buffer ? self->read_buffer : Py_None;
|
||||||
|
Py_INCREF(res);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PyObject *
|
||||||
|
overlapped_cancel(OverlappedObject *self)
|
||||||
|
{
|
||||||
|
BOOL res = TRUE;
|
||||||
|
|
||||||
|
if (self->pending) {
|
||||||
|
Py_BEGIN_ALLOW_THREADS
|
||||||
|
if (check_CancelIoEx())
|
||||||
|
res = Py_CancelIoEx(self->handle, &self->overlapped);
|
||||||
|
else
|
||||||
|
res = CancelIo(self->handle);
|
||||||
|
Py_END_ALLOW_THREADS
|
||||||
|
}
|
||||||
|
|
||||||
|
/* CancelIoEx returns ERROR_NOT_FOUND if the I/O completed in-between */
|
||||||
|
if (!res && GetLastError() != ERROR_NOT_FOUND)
|
||||||
|
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
||||||
|
self->pending = 0;
|
||||||
|
Py_RETURN_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static PyMethodDef overlapped_methods[] = {
|
||||||
|
{"GetOverlappedResult", (PyCFunction) overlapped_GetOverlappedResult,
|
||||||
|
METH_O, NULL},
|
||||||
|
{"getbuffer", (PyCFunction) overlapped_getbuffer, METH_NOARGS, NULL},
|
||||||
|
{"cancel", (PyCFunction) overlapped_cancel, METH_NOARGS, NULL},
|
||||||
|
{NULL}
|
||||||
|
};
|
||||||
|
|
||||||
|
static PyMemberDef overlapped_members[] = {
|
||||||
|
{"event", T_HANDLE,
|
||||||
|
offsetof(OverlappedObject, overlapped) + offsetof(OVERLAPPED, hEvent),
|
||||||
|
READONLY, "overlapped event handle"},
|
||||||
|
{NULL}
|
||||||
|
};
|
||||||
|
|
||||||
|
PyTypeObject OverlappedType = {
|
||||||
|
PyVarObject_HEAD_INIT(NULL, 0)
|
||||||
|
/* tp_name */ "_multiprocessing.win32.Overlapped",
|
||||||
|
/* tp_basicsize */ sizeof(OverlappedObject),
|
||||||
|
/* tp_itemsize */ 0,
|
||||||
|
/* tp_dealloc */ (destructor) overlapped_dealloc,
|
||||||
|
/* tp_print */ 0,
|
||||||
|
/* tp_getattr */ 0,
|
||||||
|
/* tp_setattr */ 0,
|
||||||
|
/* tp_reserved */ 0,
|
||||||
|
/* tp_repr */ 0,
|
||||||
|
/* tp_as_number */ 0,
|
||||||
|
/* tp_as_sequence */ 0,
|
||||||
|
/* tp_as_mapping */ 0,
|
||||||
|
/* tp_hash */ 0,
|
||||||
|
/* tp_call */ 0,
|
||||||
|
/* tp_str */ 0,
|
||||||
|
/* tp_getattro */ 0,
|
||||||
|
/* tp_setattro */ 0,
|
||||||
|
/* tp_as_buffer */ 0,
|
||||||
|
/* tp_flags */ Py_TPFLAGS_DEFAULT,
|
||||||
|
/* tp_doc */ "OVERLAPPED structure wrapper",
|
||||||
|
/* tp_traverse */ 0,
|
||||||
|
/* tp_clear */ 0,
|
||||||
|
/* tp_richcompare */ 0,
|
||||||
|
/* tp_weaklistoffset */ 0,
|
||||||
|
/* tp_iter */ 0,
|
||||||
|
/* tp_iternext */ 0,
|
||||||
|
/* tp_methods */ overlapped_methods,
|
||||||
|
/* tp_members */ overlapped_members,
|
||||||
|
/* tp_getset */ 0,
|
||||||
|
/* tp_base */ 0,
|
||||||
|
/* tp_dict */ 0,
|
||||||
|
/* tp_descr_get */ 0,
|
||||||
|
/* tp_descr_set */ 0,
|
||||||
|
/* tp_dictoffset */ 0,
|
||||||
|
/* tp_init */ 0,
|
||||||
|
/* tp_alloc */ 0,
|
||||||
|
/* tp_new */ 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
static OverlappedObject *
|
||||||
|
new_overlapped(HANDLE handle)
|
||||||
|
{
|
||||||
|
OverlappedObject *self;
|
||||||
|
|
||||||
|
self = PyObject_New(OverlappedObject, &OverlappedType);
|
||||||
|
if (!self)
|
||||||
|
return NULL;
|
||||||
|
self->handle = handle;
|
||||||
|
self->read_buffer = NULL;
|
||||||
|
self->pending = 0;
|
||||||
|
self->completed = 0;
|
||||||
|
memset(&self->overlapped, 0, sizeof(OVERLAPPED));
|
||||||
|
memset(&self->write_buffer, 0, sizeof(Py_buffer));
|
||||||
|
/* Manual reset, initially non-signalled */
|
||||||
|
self->overlapped.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Module functions
|
||||||
|
*/
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
win32_CloseHandle(PyObject *self, PyObject *args)
|
win32_CloseHandle(PyObject *self, PyObject *args)
|
||||||
{
|
{
|
||||||
|
@ -36,20 +249,44 @@ win32_CloseHandle(PyObject *self, PyObject *args)
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
win32_ConnectNamedPipe(PyObject *self, PyObject *args)
|
win32_ConnectNamedPipe(PyObject *self, PyObject *args, PyObject *kwds)
|
||||||
{
|
{
|
||||||
HANDLE hNamedPipe;
|
HANDLE hNamedPipe;
|
||||||
LPOVERLAPPED lpOverlapped;
|
int use_overlapped = 0;
|
||||||
BOOL success;
|
BOOL success;
|
||||||
|
OverlappedObject *overlapped = NULL;
|
||||||
|
static char *kwlist[] = {"handle", "overlapped", NULL};
|
||||||
|
|
||||||
if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER,
|
if (!PyArg_ParseTupleAndKeywords(args, kwds,
|
||||||
&hNamedPipe, &lpOverlapped))
|
F_HANDLE "|i", kwlist,
|
||||||
|
&hNamedPipe, &use_overlapped))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (use_overlapped) {
|
||||||
|
overlapped = new_overlapped(hNamedPipe);
|
||||||
|
if (!overlapped)
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
Py_BEGIN_ALLOW_THREADS
|
Py_BEGIN_ALLOW_THREADS
|
||||||
success = ConnectNamedPipe(hNamedPipe, lpOverlapped);
|
success = ConnectNamedPipe(hNamedPipe,
|
||||||
|
overlapped ? &overlapped->overlapped : NULL);
|
||||||
Py_END_ALLOW_THREADS
|
Py_END_ALLOW_THREADS
|
||||||
|
|
||||||
|
if (overlapped) {
|
||||||
|
int err = GetLastError();
|
||||||
|
/* Overlapped ConnectNamedPipe never returns a success code */
|
||||||
|
assert(success == 0);
|
||||||
|
if (err == ERROR_IO_PENDING)
|
||||||
|
overlapped->pending = 1;
|
||||||
|
else if (err == ERROR_PIPE_CONNECTED)
|
||||||
|
SetEvent(overlapped->overlapped.hEvent);
|
||||||
|
else {
|
||||||
|
Py_DECREF(overlapped);
|
||||||
|
return PyErr_SetFromWindowsErr(err);
|
||||||
|
}
|
||||||
|
return (PyObject *) overlapped;
|
||||||
|
}
|
||||||
if (!success)
|
if (!success)
|
||||||
return PyErr_SetFromWindowsErr(0);
|
return PyErr_SetFromWindowsErr(0);
|
||||||
|
|
||||||
|
@ -280,46 +517,109 @@ win32_send(PyObject *self, PyObject *args)
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
win32_WriteFile(PyObject *self, PyObject *args)
|
win32_WriteFile(PyObject *self, PyObject *args, PyObject *kwds)
|
||||||
{
|
{
|
||||||
HANDLE handle;
|
HANDLE handle;
|
||||||
Py_buffer buf;
|
Py_buffer _buf, *buf;
|
||||||
|
PyObject *bufobj;
|
||||||
int written;
|
int written;
|
||||||
BOOL ret;
|
BOOL ret;
|
||||||
|
int use_overlapped = 0;
|
||||||
|
OverlappedObject *overlapped = NULL;
|
||||||
|
static char *kwlist[] = {"handle", "buffer", "overlapped", NULL};
|
||||||
|
|
||||||
if (!PyArg_ParseTuple(args, F_HANDLE "y*:WriteFile" , &handle, &buf))
|
/* First get handle and use_overlapped to know which Py_buffer to use */
|
||||||
|
if (!PyArg_ParseTupleAndKeywords(args, kwds,
|
||||||
|
F_HANDLE "O|i:WriteFile", kwlist,
|
||||||
|
&handle, &bufobj, &use_overlapped))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (use_overlapped) {
|
||||||
|
overlapped = new_overlapped(handle);
|
||||||
|
if (!overlapped)
|
||||||
|
return NULL;
|
||||||
|
buf = &overlapped->write_buffer;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
buf = &_buf;
|
||||||
|
|
||||||
|
if (!PyArg_Parse(bufobj, "y*", buf)) {
|
||||||
|
Py_XDECREF(overlapped);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
Py_BEGIN_ALLOW_THREADS
|
Py_BEGIN_ALLOW_THREADS
|
||||||
ret = WriteFile(handle, buf.buf, buf.len, &written, NULL);
|
ret = WriteFile(handle, buf->buf, buf->len, &written,
|
||||||
|
overlapped ? &overlapped->overlapped : NULL);
|
||||||
Py_END_ALLOW_THREADS
|
Py_END_ALLOW_THREADS
|
||||||
|
|
||||||
PyBuffer_Release(&buf);
|
if (overlapped) {
|
||||||
|
int err = GetLastError();
|
||||||
|
if (!ret) {
|
||||||
|
if (err == ERROR_IO_PENDING)
|
||||||
|
overlapped->pending = 1;
|
||||||
|
else {
|
||||||
|
Py_DECREF(overlapped);
|
||||||
|
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (PyObject *) overlapped;
|
||||||
|
}
|
||||||
|
|
||||||
|
PyBuffer_Release(buf);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
||||||
return PyLong_FromLong(written);
|
return PyLong_FromLong(written);
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *
|
static PyObject *
|
||||||
win32_ReadFile(PyObject *self, PyObject *args)
|
win32_ReadFile(PyObject *self, PyObject *args, PyObject *kwds)
|
||||||
{
|
{
|
||||||
HANDLE handle;
|
HANDLE handle;
|
||||||
int size;
|
int size;
|
||||||
DWORD nread;
|
DWORD nread;
|
||||||
PyObject *buf;
|
PyObject *buf;
|
||||||
BOOL ret;
|
BOOL ret;
|
||||||
|
int use_overlapped = 0;
|
||||||
|
OverlappedObject *overlapped = NULL;
|
||||||
|
static char *kwlist[] = {"handle", "size", "overlapped", NULL};
|
||||||
|
|
||||||
if (!PyArg_ParseTuple(args, F_HANDLE "i:ReadFile" , &handle, &size))
|
if (!PyArg_ParseTupleAndKeywords(args, kwds,
|
||||||
|
F_HANDLE "i|i:ReadFile", kwlist,
|
||||||
|
&handle, &size, &use_overlapped))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
buf = PyBytes_FromStringAndSize(NULL, size);
|
buf = PyBytes_FromStringAndSize(NULL, size);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if (use_overlapped) {
|
||||||
|
overlapped = new_overlapped(handle);
|
||||||
|
if (!overlapped) {
|
||||||
|
Py_DECREF(buf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
/* Steals reference to buf */
|
||||||
|
overlapped->read_buffer = buf;
|
||||||
|
}
|
||||||
|
|
||||||
Py_BEGIN_ALLOW_THREADS
|
Py_BEGIN_ALLOW_THREADS
|
||||||
ret = ReadFile(handle, PyBytes_AS_STRING(buf), size, &nread, NULL);
|
ret = ReadFile(handle, PyBytes_AS_STRING(buf), size, &nread,
|
||||||
|
overlapped ? &overlapped->overlapped : NULL);
|
||||||
Py_END_ALLOW_THREADS
|
Py_END_ALLOW_THREADS
|
||||||
|
|
||||||
|
if (overlapped) {
|
||||||
|
int err = GetLastError();
|
||||||
|
if (!ret) {
|
||||||
|
if (err == ERROR_IO_PENDING)
|
||||||
|
overlapped->pending = 1;
|
||||||
|
else if (err != ERROR_MORE_DATA) {
|
||||||
|
Py_DECREF(overlapped);
|
||||||
|
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (PyObject *) overlapped;
|
||||||
|
}
|
||||||
|
|
||||||
if (!ret && GetLastError() != ERROR_MORE_DATA) {
|
if (!ret && GetLastError() != ERROR_MORE_DATA) {
|
||||||
Py_DECREF(buf);
|
Py_DECREF(buf);
|
||||||
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
||||||
|
@ -373,19 +673,71 @@ win32_PeekNamedPipe(PyObject *self, PyObject *args)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static PyObject *
|
||||||
|
win32_WaitForMultipleObjects(PyObject* self, PyObject* args)
|
||||||
|
{
|
||||||
|
DWORD result;
|
||||||
|
PyObject *handle_seq;
|
||||||
|
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
|
||||||
|
Py_ssize_t nhandles, i;
|
||||||
|
int wait_flag;
|
||||||
|
int milliseconds = INFINITE;
|
||||||
|
|
||||||
|
if (!PyArg_ParseTuple(args, "Oi|i:WaitForMultipleObjects",
|
||||||
|
&handle_seq, &wait_flag, &milliseconds))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!PySequence_Check(handle_seq)) {
|
||||||
|
PyErr_Format(PyExc_TypeError,
|
||||||
|
"sequence type expected, got '%s'",
|
||||||
|
Py_TYPE(handle_seq)->tp_doc);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
nhandles = PySequence_Length(handle_seq);
|
||||||
|
if (nhandles == -1)
|
||||||
|
return NULL;
|
||||||
|
if (nhandles < 0 || nhandles >= MAXIMUM_WAIT_OBJECTS) {
|
||||||
|
PyErr_Format(PyExc_ValueError,
|
||||||
|
"need at most %zd handles, got a sequence of length %zd",
|
||||||
|
MAXIMUM_WAIT_OBJECTS, nhandles);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
for (i = 0; i < nhandles; i++) {
|
||||||
|
HANDLE h;
|
||||||
|
PyObject *v = PySequence_GetItem(handle_seq, i);
|
||||||
|
if (v == NULL)
|
||||||
|
return NULL;
|
||||||
|
if (!PyArg_Parse(v, F_HANDLE, &h))
|
||||||
|
return NULL;
|
||||||
|
handles[i] = h;
|
||||||
|
}
|
||||||
|
|
||||||
|
Py_BEGIN_ALLOW_THREADS
|
||||||
|
result = WaitForMultipleObjects((DWORD) nhandles, handles,
|
||||||
|
(BOOL) wait_flag, (DWORD) milliseconds);
|
||||||
|
Py_END_ALLOW_THREADS
|
||||||
|
|
||||||
|
if (result == WAIT_FAILED)
|
||||||
|
return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
|
||||||
|
|
||||||
|
return PyLong_FromLong((int) result);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static PyMethodDef win32_methods[] = {
|
static PyMethodDef win32_methods[] = {
|
||||||
WIN32_FUNCTION(CloseHandle),
|
WIN32_FUNCTION(CloseHandle),
|
||||||
WIN32_FUNCTION(GetLastError),
|
WIN32_FUNCTION(GetLastError),
|
||||||
WIN32_FUNCTION(OpenProcess),
|
WIN32_FUNCTION(OpenProcess),
|
||||||
WIN32_FUNCTION(ExitProcess),
|
WIN32_FUNCTION(ExitProcess),
|
||||||
WIN32_FUNCTION(ConnectNamedPipe),
|
WIN32_KWARGS_FUNCTION(ConnectNamedPipe),
|
||||||
WIN32_FUNCTION(CreateFile),
|
WIN32_FUNCTION(CreateFile),
|
||||||
WIN32_FUNCTION(CreateNamedPipe),
|
WIN32_FUNCTION(CreateNamedPipe),
|
||||||
WIN32_FUNCTION(ReadFile),
|
WIN32_KWARGS_FUNCTION(ReadFile),
|
||||||
WIN32_FUNCTION(PeekNamedPipe),
|
WIN32_FUNCTION(PeekNamedPipe),
|
||||||
WIN32_FUNCTION(SetNamedPipeHandleState),
|
WIN32_FUNCTION(SetNamedPipeHandleState),
|
||||||
|
WIN32_FUNCTION(WaitForMultipleObjects),
|
||||||
WIN32_FUNCTION(WaitNamedPipe),
|
WIN32_FUNCTION(WaitNamedPipe),
|
||||||
WIN32_FUNCTION(WriteFile),
|
WIN32_KWARGS_FUNCTION(WriteFile),
|
||||||
WIN32_FUNCTION(closesocket),
|
WIN32_FUNCTION(closesocket),
|
||||||
WIN32_FUNCTION(recv),
|
WIN32_FUNCTION(recv),
|
||||||
WIN32_FUNCTION(send),
|
WIN32_FUNCTION(send),
|
||||||
|
@ -407,12 +759,18 @@ create_win32_namespace(void)
|
||||||
return NULL;
|
return NULL;
|
||||||
Py_INCREF(&Win32Type);
|
Py_INCREF(&Win32Type);
|
||||||
|
|
||||||
|
if (PyType_Ready(&OverlappedType) < 0)
|
||||||
|
return NULL;
|
||||||
|
PyDict_SetItemString(Win32Type.tp_dict, "Overlapped",
|
||||||
|
(PyObject *) &OverlappedType);
|
||||||
|
|
||||||
WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
|
WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
|
||||||
WIN32_CONSTANT(F_DWORD, ERROR_BROKEN_PIPE);
|
WIN32_CONSTANT(F_DWORD, ERROR_BROKEN_PIPE);
|
||||||
WIN32_CONSTANT(F_DWORD, ERROR_NO_SYSTEM_RESOURCES);
|
WIN32_CONSTANT(F_DWORD, ERROR_NO_SYSTEM_RESOURCES);
|
||||||
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
|
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
|
||||||
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
|
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
|
||||||
WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
|
WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
|
||||||
|
WIN32_CONSTANT(F_DWORD, FILE_FLAG_OVERLAPPED);
|
||||||
WIN32_CONSTANT(F_DWORD, GENERIC_READ);
|
WIN32_CONSTANT(F_DWORD, GENERIC_READ);
|
||||||
WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);
|
WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);
|
||||||
WIN32_CONSTANT(F_DWORD, INFINITE);
|
WIN32_CONSTANT(F_DWORD, INFINITE);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue