mirror of
https://github.com/python/cpython.git
synced 2025-07-19 17:25:54 +00:00
gh-110693: Pending Calls Machinery Cleanups (gh-118296)
This does some cleanup in preparation for later changes.
This commit is contained in:
parent
d5df25268b
commit
09c2947581
7 changed files with 319 additions and 117 deletions
|
@ -84,15 +84,15 @@ update_eval_breaker_for_thread(PyInterpreterState *interp, PyThreadState *tstate
|
|||
return;
|
||||
#endif
|
||||
|
||||
int32_t calls_to_do = _Py_atomic_load_int32_relaxed(
|
||||
&interp->ceval.pending.calls_to_do);
|
||||
if (calls_to_do) {
|
||||
int32_t npending = _Py_atomic_load_int32_relaxed(
|
||||
&interp->ceval.pending.npending);
|
||||
if (npending) {
|
||||
_Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT);
|
||||
}
|
||||
else if (_Py_IsMainThread()) {
|
||||
calls_to_do = _Py_atomic_load_int32_relaxed(
|
||||
&_PyRuntime.ceval.pending_mainthread.calls_to_do);
|
||||
if (calls_to_do) {
|
||||
npending = _Py_atomic_load_int32_relaxed(
|
||||
&_PyRuntime.ceval.pending_mainthread.npending);
|
||||
if (npending) {
|
||||
_Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT);
|
||||
}
|
||||
}
|
||||
|
@ -624,6 +624,34 @@ PyEval_RestoreThread(PyThreadState *tstate)
|
|||
}
|
||||
|
||||
|
||||
void
|
||||
_PyEval_SignalReceived(void)
|
||||
{
|
||||
_Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_SIGNALS_PENDING_BIT);
|
||||
}
|
||||
|
||||
|
||||
#ifndef Py_GIL_DISABLED
|
||||
static void
|
||||
signal_active_thread(PyInterpreterState *interp, uintptr_t bit)
|
||||
{
|
||||
struct _gil_runtime_state *gil = interp->ceval.gil;
|
||||
|
||||
// If a thread from the targeted interpreter is holding the GIL, signal
|
||||
// that thread. Otherwise, the next thread to run from the targeted
|
||||
// interpreter will have its bit set as part of taking the GIL.
|
||||
MUTEX_LOCK(gil->mutex);
|
||||
if (_Py_atomic_load_int_relaxed(&gil->locked)) {
|
||||
PyThreadState *holder = (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder);
|
||||
if (holder->interp == interp) {
|
||||
_Py_set_eval_breaker_bit(holder, bit);
|
||||
}
|
||||
}
|
||||
MUTEX_UNLOCK(gil->mutex);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
|
||||
signal handlers or Mac I/O completion routines) can schedule calls
|
||||
to a function to be called synchronously.
|
||||
|
@ -646,29 +674,31 @@ PyEval_RestoreThread(PyThreadState *tstate)
|
|||
threadstate.
|
||||
*/
|
||||
|
||||
void
|
||||
_PyEval_SignalReceived(void)
|
||||
{
|
||||
_Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_SIGNALS_PENDING_BIT);
|
||||
}
|
||||
|
||||
/* Push one item onto the queue while holding the lock. */
|
||||
static int
|
||||
_push_pending_call(struct _pending_calls *pending,
|
||||
_Py_pending_call_func func, void *arg, int flags)
|
||||
{
|
||||
int i = pending->last;
|
||||
int j = (i + 1) % NPENDINGCALLS;
|
||||
if (j == pending->first) {
|
||||
return -1; /* Queue full */
|
||||
if (pending->npending == pending->max) {
|
||||
return _Py_ADD_PENDING_FULL;
|
||||
}
|
||||
assert(pending->npending < pending->max);
|
||||
|
||||
int i = pending->next;
|
||||
assert(pending->calls[i].func == NULL);
|
||||
|
||||
pending->calls[i].func = func;
|
||||
pending->calls[i].arg = arg;
|
||||
pending->calls[i].flags = flags;
|
||||
pending->last = j;
|
||||
assert(pending->calls_to_do < NPENDINGCALLS);
|
||||
_Py_atomic_add_int32(&pending->calls_to_do, 1);
|
||||
return 0;
|
||||
|
||||
assert(pending->npending < PENDINGCALLSARRAYSIZE);
|
||||
_Py_atomic_add_int32(&pending->npending, 1);
|
||||
|
||||
pending->next = (i + 1) % PENDINGCALLSARRAYSIZE;
|
||||
assert(pending->next != pending->first
|
||||
|| pending->npending == pending->max);
|
||||
|
||||
return _Py_ADD_PENDING_SUCCESS;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -676,8 +706,9 @@ _next_pending_call(struct _pending_calls *pending,
|
|||
int (**func)(void *), void **arg, int *flags)
|
||||
{
|
||||
int i = pending->first;
|
||||
if (i == pending->last) {
|
||||
if (pending->npending == 0) {
|
||||
/* Queue empty */
|
||||
assert(i == pending->next);
|
||||
assert(pending->calls[i].func == NULL);
|
||||
return -1;
|
||||
}
|
||||
|
@ -695,38 +726,18 @@ _pop_pending_call(struct _pending_calls *pending,
|
|||
int i = _next_pending_call(pending, func, arg, flags);
|
||||
if (i >= 0) {
|
||||
pending->calls[i] = (struct _pending_call){0};
|
||||
pending->first = (i + 1) % NPENDINGCALLS;
|
||||
assert(pending->calls_to_do > 0);
|
||||
_Py_atomic_add_int32(&pending->calls_to_do, -1);
|
||||
pending->first = (i + 1) % PENDINGCALLSARRAYSIZE;
|
||||
assert(pending->npending > 0);
|
||||
_Py_atomic_add_int32(&pending->npending, -1);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef Py_GIL_DISABLED
|
||||
static void
|
||||
signal_active_thread(PyInterpreterState *interp, uintptr_t bit)
|
||||
{
|
||||
struct _gil_runtime_state *gil = interp->ceval.gil;
|
||||
|
||||
// If a thread from the targeted interpreter is holding the GIL, signal
|
||||
// that thread. Otherwise, the next thread to run from the targeted
|
||||
// interpreter will have its bit set as part of taking the GIL.
|
||||
MUTEX_LOCK(gil->mutex);
|
||||
if (_Py_atomic_load_int_relaxed(&gil->locked)) {
|
||||
PyThreadState *holder = (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder);
|
||||
if (holder->interp == interp) {
|
||||
_Py_set_eval_breaker_bit(holder, bit);
|
||||
}
|
||||
}
|
||||
MUTEX_UNLOCK(gil->mutex);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* This implementation is thread-safe. It allows
|
||||
scheduling to be made from any thread, and even from an executing
|
||||
callback.
|
||||
*/
|
||||
|
||||
int
|
||||
_Py_add_pending_call_result
|
||||
_PyEval_AddPendingCall(PyInterpreterState *interp,
|
||||
_Py_pending_call_func func, void *arg, int flags)
|
||||
{
|
||||
|
@ -739,7 +750,8 @@ _PyEval_AddPendingCall(PyInterpreterState *interp,
|
|||
}
|
||||
|
||||
PyMutex_Lock(&pending->mutex);
|
||||
int result = _push_pending_call(pending, func, arg, flags);
|
||||
_Py_add_pending_call_result result =
|
||||
_push_pending_call(pending, func, arg, flags);
|
||||
PyMutex_Unlock(&pending->mutex);
|
||||
|
||||
if (main_only) {
|
||||
|
@ -762,7 +774,15 @@ Py_AddPendingCall(_Py_pending_call_func func, void *arg)
|
|||
/* Legacy users of this API will continue to target the main thread
|
||||
(of the main interpreter). */
|
||||
PyInterpreterState *interp = _PyInterpreterState_Main();
|
||||
return _PyEval_AddPendingCall(interp, func, arg, _Py_PENDING_MAINTHREADONLY);
|
||||
_Py_add_pending_call_result r =
|
||||
_PyEval_AddPendingCall(interp, func, arg, _Py_PENDING_MAINTHREADONLY);
|
||||
if (r == _Py_ADD_PENDING_FULL) {
|
||||
return -1;
|
||||
}
|
||||
else {
|
||||
assert(r == _Py_ADD_PENDING_SUCCESS);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -782,10 +802,21 @@ handle_signals(PyThreadState *tstate)
|
|||
}
|
||||
|
||||
static int
|
||||
_make_pending_calls(struct _pending_calls *pending)
|
||||
_make_pending_calls(struct _pending_calls *pending, int32_t *p_npending)
|
||||
{
|
||||
int res = 0;
|
||||
int32_t npending = -1;
|
||||
|
||||
assert(sizeof(pending->max) <= sizeof(size_t)
|
||||
&& ((size_t)pending->max) <= Py_ARRAY_LENGTH(pending->calls));
|
||||
int32_t maxloop = pending->maxloop;
|
||||
if (maxloop == 0) {
|
||||
maxloop = pending->max;
|
||||
}
|
||||
assert(maxloop > 0 && maxloop <= pending->max);
|
||||
|
||||
/* perform a bounded number of calls, in case of recursion */
|
||||
for (int i=0; i<NPENDINGCALLS; i++) {
|
||||
for (int i=0; i<maxloop; i++) {
|
||||
_Py_pending_call_func func = NULL;
|
||||
void *arg = NULL;
|
||||
int flags = 0;
|
||||
|
@ -793,21 +824,29 @@ _make_pending_calls(struct _pending_calls *pending)
|
|||
/* pop one item off the queue while holding the lock */
|
||||
PyMutex_Lock(&pending->mutex);
|
||||
_pop_pending_call(pending, &func, &arg, &flags);
|
||||
npending = pending->npending;
|
||||
PyMutex_Unlock(&pending->mutex);
|
||||
|
||||
/* having released the lock, perform the callback */
|
||||
/* Check if there are any more pending calls. */
|
||||
if (func == NULL) {
|
||||
assert(npending == 0);
|
||||
break;
|
||||
}
|
||||
int res = func(arg);
|
||||
|
||||
/* having released the lock, perform the callback */
|
||||
res = func(arg);
|
||||
if ((flags & _Py_PENDING_RAWFREE) && arg != NULL) {
|
||||
PyMem_RawFree(arg);
|
||||
}
|
||||
if (res != 0) {
|
||||
return -1;
|
||||
res = -1;
|
||||
goto finally;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
finally:
|
||||
*p_npending = npending;
|
||||
return res;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -861,26 +900,36 @@ make_pending_calls(PyThreadState *tstate)
|
|||
added in-between re-signals */
|
||||
unsignal_pending_calls(tstate, interp);
|
||||
|
||||
if (_make_pending_calls(pending) != 0) {
|
||||
int32_t npending;
|
||||
if (_make_pending_calls(pending, &npending) != 0) {
|
||||
pending->busy = 0;
|
||||
/* There might not be more calls to make, but we play it safe. */
|
||||
signal_pending_calls(tstate, interp);
|
||||
return -1;
|
||||
}
|
||||
if (npending > 0) {
|
||||
/* We hit pending->maxloop. */
|
||||
signal_pending_calls(tstate, interp);
|
||||
}
|
||||
|
||||
if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
|
||||
if (_make_pending_calls(pending_main) != 0) {
|
||||
if (_make_pending_calls(pending_main, &npending) != 0) {
|
||||
pending->busy = 0;
|
||||
/* There might not be more calls to make, but we play it safe. */
|
||||
signal_pending_calls(tstate, interp);
|
||||
return -1;
|
||||
}
|
||||
if (npending > 0) {
|
||||
/* We hit pending_main->maxloop. */
|
||||
signal_pending_calls(tstate, interp);
|
||||
}
|
||||
}
|
||||
|
||||
pending->busy = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit)
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue