bpo-40513: Per-interpreter gil_drop_request (GH-19927)

Move gil_drop_request member from _PyRuntimeState.ceval to
PyInterpreterState.ceval.
This commit is contained in:
Victor Stinner 2020-05-05 16:14:31 +02:00 committed by GitHub
parent 4e01946caf
commit 0b1e3307e2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 45 additions and 47 deletions

View file

@ -42,6 +42,8 @@ struct _ceval_state {
/* This single variable consolidates all requests to break out of /* This single variable consolidates all requests to break out of
the fast path in the eval loop. */ the fast path in the eval loop. */
_Py_atomic_int eval_breaker; _Py_atomic_int eval_breaker;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
struct _pending_calls pending; struct _pending_calls pending;
/* Request for checking signals. */ /* Request for checking signals. */
_Py_atomic_int signals_pending; _Py_atomic_int signals_pending;

View file

@ -15,8 +15,6 @@ extern "C" {
struct _ceval_runtime_state { struct _ceval_runtime_state {
int recursion_limit; int recursion_limit;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
struct _gil_runtime_state gil; struct _gil_runtime_state gil;
}; };

View file

@ -143,77 +143,70 @@ is_tstate_valid(PyThreadState *tstate)
the GIL eventually anyway. */ the GIL eventually anyway. */
static inline void static inline void
COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
struct _ceval_runtime_state *ceval, struct _ceval_state *ceval)
struct _ceval_state *ceval2)
{ {
_Py_atomic_store_relaxed(&ceval2->eval_breaker, _Py_atomic_store_relaxed(&ceval->eval_breaker,
_Py_atomic_load_relaxed(&ceval->gil_drop_request) _Py_atomic_load_relaxed(&ceval->gil_drop_request)
| (_Py_atomic_load_relaxed(&ceval2->signals_pending) | (_Py_atomic_load_relaxed(&ceval->signals_pending)
&& _Py_ThreadCanHandleSignals(interp)) && _Py_ThreadCanHandleSignals(interp))
| (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do) | (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do)
&& _Py_ThreadCanHandlePendingCalls()) && _Py_ThreadCanHandlePendingCalls())
| ceval2->pending.async_exc); | ceval->pending.async_exc);
} }
static inline void static inline void
SET_GIL_DROP_REQUEST(PyInterpreterState *interp) SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{ {
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_state *ceval = &interp->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->gil_drop_request, 1); _Py_atomic_store_relaxed(&ceval->gil_drop_request, 1);
_Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); _Py_atomic_store_relaxed(&ceval->eval_breaker, 1);
} }
static inline void static inline void
RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
{ {
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_state *ceval = &interp->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
_Py_atomic_store_relaxed(&ceval->gil_drop_request, 0); _Py_atomic_store_relaxed(&ceval->gil_drop_request, 0);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); COMPUTE_EVAL_BREAKER(interp, ceval);
} }
static inline void static inline void
SIGNAL_PENDING_CALLS(PyInterpreterState *interp) SIGNAL_PENDING_CALLS(PyInterpreterState *interp)
{ {
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_state *ceval = &interp->ceval;
struct _ceval_state *ceval2 = &interp->ceval; _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 1);
_Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1); COMPUTE_EVAL_BREAKER(interp, ceval);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
static inline void static inline void
UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
{ {
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_state *ceval = &interp->ceval;
struct _ceval_state *ceval2 = &interp->ceval; _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 0);
_Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); COMPUTE_EVAL_BREAKER(interp, ceval);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
static inline void static inline void
SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
{ {
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_state *ceval = &interp->ceval;
struct _ceval_state *ceval2 = &interp->ceval; _Py_atomic_store_relaxed(&ceval->signals_pending, 1);
_Py_atomic_store_relaxed(&ceval2->signals_pending, 1);
/* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); COMPUTE_EVAL_BREAKER(interp, ceval);
} }
static inline void static inline void
UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
{ {
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_state *ceval = &interp->ceval;
struct _ceval_state *ceval2 = &interp->ceval; _Py_atomic_store_relaxed(&ceval->signals_pending, 0);
_Py_atomic_store_relaxed(&ceval2->signals_pending, 0); COMPUTE_EVAL_BREAKER(interp, ceval);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
@ -229,10 +222,9 @@ SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
static inline void static inline void
UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
{ {
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_state *ceval = &interp->ceval;
struct _ceval_state *ceval2 = &interp->ceval; ceval->pending.async_exc = 0;
ceval2->pending.async_exc = 0; COMPUTE_EVAL_BREAKER(interp, ceval);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
@ -357,17 +349,19 @@ PyEval_ReleaseLock(void)
{ {
_PyRuntimeState *runtime = &_PyRuntime; _PyRuntimeState *runtime = &_PyRuntime;
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
struct _ceval_state *ceval2 = &tstate->interp->ceval;
/* This function must succeed when the current thread state is NULL. /* This function must succeed when the current thread state is NULL.
We therefore avoid PyThreadState_Get() which dumps a fatal error We therefore avoid PyThreadState_Get() which dumps a fatal error
in debug mode. */ in debug mode. */
drop_gil(&runtime->ceval, tstate); drop_gil(&runtime->ceval, ceval2, tstate);
} }
void void
_PyEval_ReleaseLock(PyThreadState *tstate) _PyEval_ReleaseLock(PyThreadState *tstate)
{ {
struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval; struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval;
drop_gil(ceval, tstate); struct _ceval_state *ceval2 = &tstate->interp->ceval;
drop_gil(ceval, ceval2, tstate);
} }
void void
@ -393,7 +387,9 @@ PyEval_ReleaseThread(PyThreadState *tstate)
if (new_tstate != tstate) { if (new_tstate != tstate) {
Py_FatalError("wrong thread state"); Py_FatalError("wrong thread state");
} }
drop_gil(&runtime->ceval, tstate); struct _ceval_runtime_state *ceval = &runtime->ceval;
struct _ceval_state *ceval2 = &tstate->interp->ceval;
drop_gil(ceval, ceval2, tstate);
} }
#ifdef HAVE_FORK #ifdef HAVE_FORK
@ -439,13 +435,14 @@ PyThreadState *
PyEval_SaveThread(void) PyEval_SaveThread(void)
{ {
_PyRuntimeState *runtime = &_PyRuntime; _PyRuntimeState *runtime = &_PyRuntime;
struct _ceval_runtime_state *ceval = &runtime->ceval;
PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);
ensure_tstate_not_null(__func__, tstate); ensure_tstate_not_null(__func__, tstate);
struct _ceval_runtime_state *ceval = &runtime->ceval;
struct _ceval_state *ceval2 = &tstate->interp->ceval;
assert(gil_created(&ceval->gil)); assert(gil_created(&ceval->gil));
drop_gil(ceval, tstate); drop_gil(ceval, ceval2, tstate);
return tstate; return tstate;
} }
@ -847,12 +844,12 @@ eval_frame_handle_pending(PyThreadState *tstate)
} }
/* GIL drop request */ /* GIL drop request */
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
/* Give another thread a chance */ /* Give another thread a chance */
if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) { if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) {
Py_FatalError("tstate mix-up"); Py_FatalError("tstate mix-up");
} }
drop_gil(ceval, tstate); drop_gil(ceval, ceval2, tstate);
/* Other threads may run now */ /* Other threads may run now */

View file

@ -141,7 +141,8 @@ static void recreate_gil(struct _gil_runtime_state *gil)
} }
static void static void
drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate) drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
PyThreadState *tstate)
{ {
struct _gil_runtime_state *gil = &ceval->gil; struct _gil_runtime_state *gil = &ceval->gil;
if (!_Py_atomic_load_relaxed(&gil->locked)) { if (!_Py_atomic_load_relaxed(&gil->locked)) {
@ -163,7 +164,7 @@ drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
MUTEX_UNLOCK(gil->mutex); MUTEX_UNLOCK(gil->mutex);
#ifdef FORCE_SWITCHING #ifdef FORCE_SWITCHING
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) { if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) {
MUTEX_LOCK(gil->switch_mutex); MUTEX_LOCK(gil->switch_mutex);
/* Not switched yet => wait */ /* Not switched yet => wait */
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
@ -226,6 +227,7 @@ take_gil(PyThreadState *tstate)
assert(is_tstate_valid(tstate)); assert(is_tstate_valid(tstate));
PyInterpreterState *interp = tstate->interp; PyInterpreterState *interp = tstate->interp;
struct _ceval_runtime_state *ceval = &interp->runtime->ceval; struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
struct _ceval_state *ceval2 = &interp->ceval;
struct _gil_runtime_state *gil = &ceval->gil; struct _gil_runtime_state *gil = &ceval->gil;
/* Check that _PyEval_InitThreads() was called to create the lock */ /* Check that _PyEval_InitThreads() was called to create the lock */
@ -289,12 +291,12 @@ _ready:
in take_gil() while the main thread called in take_gil() while the main thread called
wait_for_thread_shutdown() from Py_Finalize(). */ wait_for_thread_shutdown() from Py_Finalize(). */
MUTEX_UNLOCK(gil->mutex); MUTEX_UNLOCK(gil->mutex);
drop_gil(ceval, tstate); drop_gil(ceval, ceval2, tstate);
PyThread_exit_thread(); PyThread_exit_thread();
} }
assert(is_tstate_valid(tstate)); assert(is_tstate_valid(tstate));
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) {
RESET_GIL_DROP_REQUEST(interp); RESET_GIL_DROP_REQUEST(interp);
} }
else { else {
@ -303,8 +305,7 @@ _ready:
handle signals. handle signals.
Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
struct _ceval_state *ceval2 = &interp->ceval; COMPUTE_EVAL_BREAKER(interp, ceval2);
COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
} }
/* Don't access tstate if the thread must exit */ /* Don't access tstate if the thread must exit */