gh-124878: Fix race conditions during interpreter finalization (#130649)

The PyThreadState field gains a reference count field to avoid
issues with PyThreadState being a dangling pointer to freed memory.
The refcount starts with a value of two: one reference is owned by the
interpreter's linked list of thread states and one reference is owned by
the OS thread. The reference count is decremented when the thread state
is removed from the interpreter's linked list and before the OS thread
calls `PyThread_hang_thread()`. The thread that decrements it to zero
frees the `PyThreadState` memory.

The `holds_gil` field is moved out of the `_status` bit field, to avoid
a data race where on thread calls `PyThreadState_Clear()`, modifying the
`_status` bit field while the OS thread reads `holds_gil` when
attempting to acquire the GIL.

The `PyThreadState.state` field now has `_Py_THREAD_SHUTTING_DOWN` as a
possible value. This corresponds to the `_PyThreadState_MustExit()`
check. This avoids race conditions in the free threading build when
checking `_PyThreadState_MustExit()`.
This commit is contained in:
Sam Gross 2025-03-06 10:38:34 -05:00 committed by GitHub
parent c6dd2348ca
commit 052cb717f5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 109 additions and 81 deletions

View file

@ -6,8 +6,8 @@
#include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
#include "pycore_pylifecycle.h" // _PyErr_Print()
#include "pycore_pymem.h" // _PyMem_IsPtrFreed()
#include "pycore_pystate.h" // PyThread_hang_thread()
#include "pycore_pystats.h" // _Py_PrintSpecializationStats()
#include "pycore_pythread.h" // PyThread_hang_thread()
/*
Notes about the implementation:
@ -206,7 +206,7 @@ drop_gil_impl(PyThreadState *tstate, struct _gil_runtime_state *gil)
_Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
_Py_atomic_store_int_relaxed(&gil->locked, 0);
if (tstate != NULL) {
tstate->_status.holds_gil = 0;
tstate->holds_gil = 0;
}
COND_SIGNAL(gil->cond);
MUTEX_UNLOCK(gil->mutex);
@ -231,7 +231,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate, int final_release)
// Check if we have the GIL before dropping it. tstate will be NULL if
// take_gil() detected that this thread has been destroyed, in which case
// we know we have the GIL.
if (tstate != NULL && !tstate->_status.holds_gil) {
if (tstate != NULL && !tstate->holds_gil) {
return;
}
#endif
@ -296,15 +296,14 @@ take_gil(PyThreadState *tstate)
thread which called Py_Finalize(), this thread cannot continue.
This code path can be reached by a daemon thread after Py_Finalize()
completes. In this case, tstate is a dangling pointer: points to
PyThreadState freed memory.
completes.
This used to call a *thread_exit API, but that was not safe as it
lacks stack unwinding and local variable destruction important to
C++. gh-87135: The best that can be done is to hang the thread as
the public APIs calling this have no error reporting mechanism (!).
*/
PyThread_hang_thread();
_PyThreadState_HangThread(tstate);
}
assert(_PyThreadState_CheckConsistency(tstate));
@ -353,7 +352,7 @@ take_gil(PyThreadState *tstate)
}
// gh-87135: hang the thread as *thread_exit() is not a safe
// API. It lacks stack unwind and local variable destruction.
PyThread_hang_thread();
_PyThreadState_HangThread(tstate);
}
assert(_PyThreadState_CheckConsistency(tstate));
@ -404,11 +403,11 @@ take_gil(PyThreadState *tstate)
/* tstate could be a dangling pointer, so don't pass it to
drop_gil(). */
drop_gil(interp, NULL, 1);
PyThread_hang_thread();
_PyThreadState_HangThread(tstate);
}
assert(_PyThreadState_CheckConsistency(tstate));
tstate->_status.holds_gil = 1;
tstate->holds_gil = 1;
_Py_unset_eval_breaker_bit(tstate, _PY_GIL_DROP_REQUEST_BIT);
update_eval_breaker_for_thread(interp, tstate);
@ -460,7 +459,7 @@ PyEval_ThreadsInitialized(void)
static inline int
current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
{
int holds_gil = tstate->_status.holds_gil;
int holds_gil = tstate->holds_gil;
// holds_gil is the source of truth; check that last_holder and gil->locked
// are consistent with it.

View file

@ -2036,18 +2036,23 @@ _Py_Finalize(_PyRuntimeState *runtime)
// XXX Call something like _PyImport_Disable() here?
/* Destroy the state of all threads of the interpreter, except of the
/* Remove the state of all threads of the interpreter, except for the
current thread. In practice, only daemon threads should still be alive,
except if wait_for_thread_shutdown() has been cancelled by CTRL+C.
Clear frames of other threads to call objects destructors. Destructors
will be called in the current Python thread. Since
_PyRuntimeState_SetFinalizing() has been called, no other Python thread
can take the GIL at this point: if they try, they will exit
immediately. We start the world once we are the only thread state left,
We start the world once we are the only thread state left,
before we call destructors. */
PyThreadState *list = _PyThreadState_RemoveExcept(tstate);
for (PyThreadState *p = list; p != NULL; p = p->next) {
_PyThreadState_SetShuttingDown(p);
}
_PyEval_StartTheWorldAll(runtime);
_PyThreadState_DeleteList(list);
/* Clear frames of other threads to call objects destructors. Destructors
will be called in the current Python thread. Since
_PyRuntimeState_SetFinalizing() has been called, no other Python thread
can take the GIL at this point: if they try, they will hang in
_PyThreadState_HangThread. */
_PyThreadState_DeleteList(list, /*is_after_fork=*/0);
/* At this point no Python code should be running at all.
The only thread state left should be the main thread of the main

View file

@ -1474,6 +1474,15 @@ free_threadstate(_PyThreadStateImpl *tstate)
}
}
static void
decref_threadstate(_PyThreadStateImpl *tstate)
{
if (_Py_atomic_add_ssize(&tstate->refcount, -1) == 1) {
// The last reference to the thread state is gone.
free_threadstate(tstate);
}
}
/* Get the thread state to a minimal consistent state.
Further init happens in pylifecycle.c before it can be used.
All fields not initialized here are expected to be zeroed out,
@ -1938,8 +1947,12 @@ _PyThreadState_RemoveExcept(PyThreadState *tstate)
// Deletes the thread states in the linked list `list`.
//
// This is intended to be used in conjunction with _PyThreadState_RemoveExcept.
//
// If `is_after_fork` is true, the thread states are immediately freed.
// Otherwise, they are decref'd because they may still be referenced by an
// OS thread.
void
_PyThreadState_DeleteList(PyThreadState *list)
_PyThreadState_DeleteList(PyThreadState *list, int is_after_fork)
{
// The world can't be stopped because we PyThreadState_Clear() can
// call destructors.
@ -1949,7 +1962,12 @@ _PyThreadState_DeleteList(PyThreadState *list)
for (p = list; p; p = next) {
next = p->next;
PyThreadState_Clear(p);
free_threadstate((_PyThreadStateImpl *)p);
if (is_after_fork) {
free_threadstate((_PyThreadStateImpl *)p);
}
else {
decref_threadstate((_PyThreadStateImpl *)p);
}
}
}
@ -2082,12 +2100,19 @@ static void
tstate_wait_attach(PyThreadState *tstate)
{
do {
int expected = _Py_THREAD_SUSPENDED;
// Wait until we're switched out of SUSPENDED to DETACHED.
_PyParkingLot_Park(&tstate->state, &expected, sizeof(tstate->state),
/*timeout=*/-1, NULL, /*detach=*/0);
int state = _Py_atomic_load_int_relaxed(&tstate->state);
if (state == _Py_THREAD_SUSPENDED) {
// Wait until we're switched out of SUSPENDED to DETACHED.
_PyParkingLot_Park(&tstate->state, &state, sizeof(tstate->state),
/*timeout=*/-1, NULL, /*detach=*/0);
}
else if (state == _Py_THREAD_SHUTTING_DOWN) {
// We're shutting down, so we can't attach.
_PyThreadState_HangThread(tstate);
}
else {
assert(state == _Py_THREAD_DETACHED);
}
// Once we're back in DETACHED we can re-attach
} while (!tstate_try_attach(tstate));
}
@ -2118,7 +2143,7 @@ _PyThreadState_Attach(PyThreadState *tstate)
tstate_activate(tstate);
#ifdef Py_GIL_DISABLED
if (_PyEval_IsGILEnabled(tstate) && !tstate->_status.holds_gil) {
if (_PyEval_IsGILEnabled(tstate) && !tstate->holds_gil) {
// The GIL was enabled between our call to _PyEval_AcquireLock()
// and when we attached (the GIL can't go from enabled to disabled
// here because only a thread holding the GIL can disable
@ -2201,6 +2226,15 @@ _PyThreadState_Suspend(PyThreadState *tstate)
HEAD_UNLOCK(runtime);
}
void
_PyThreadState_SetShuttingDown(PyThreadState *tstate)
{
_Py_atomic_store_int(&tstate->state, _Py_THREAD_SHUTTING_DOWN);
#ifdef Py_GIL_DISABLED
_PyParkingLot_UnparkAll(&tstate->state);
#endif
}
// Decrease stop-the-world counter of remaining number of threads that need to
// pause. If we are the final thread to pause, notify the requesting thread.
static void
@ -3001,43 +3035,27 @@ _PyThreadState_CheckConsistency(PyThreadState *tstate)
#endif
// Check if a Python thread must exit immediately, rather than taking the GIL
// if Py_Finalize() has been called.
// Check if a Python thread must call _PyThreadState_HangThread(), rather than
// taking the GIL or attaching to the interpreter if Py_Finalize() has been
// called.
//
// When this function is called by a daemon thread after Py_Finalize() has been
// called, the GIL does no longer exist.
//
// tstate can be a dangling pointer (point to freed memory): only tstate value
// is used, the pointer is not deferenced.
// called, the GIL may no longer exist.
//
// tstate must be non-NULL.
int
_PyThreadState_MustExit(PyThreadState *tstate)
{
/* bpo-39877: Access _PyRuntime directly rather than using
tstate->interp->runtime to support calls from Python daemon threads.
After Py_Finalize() has been called, tstate can be a dangling pointer:
point to PyThreadState freed memory. */
unsigned long finalizing_id = _PyRuntimeState_GetFinalizingID(&_PyRuntime);
PyThreadState *finalizing = _PyRuntimeState_GetFinalizing(&_PyRuntime);
if (finalizing == NULL) {
// XXX This isn't completely safe from daemon thraeds,
// since tstate might be a dangling pointer.
finalizing = _PyInterpreterState_GetFinalizing(tstate->interp);
finalizing_id = _PyInterpreterState_GetFinalizingID(tstate->interp);
}
// XXX else check &_PyRuntime._main_interpreter._initial_thread
if (finalizing == NULL) {
return 0;
}
else if (finalizing == tstate) {
return 0;
}
else if (finalizing_id == PyThread_get_thread_ident()) {
/* gh-109793: we must have switched interpreters. */
return 0;
}
return 1;
int state = _Py_atomic_load_int_relaxed(&tstate->state);
return state == _Py_THREAD_SHUTTING_DOWN;
}
void
_PyThreadState_HangThread(PyThreadState *tstate)
{
_PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
decref_threadstate(tstate_impl);
PyThread_hang_thread();
}
/********************/

View file

@ -241,7 +241,7 @@ _Py_qsbr_unregister(PyThreadState *tstate)
// gh-119369: GIL must be released (if held) to prevent deadlocks, because
// we might not have an active tstate, which means that blocking on PyMutex
// locks will not implicitly release the GIL.
assert(!tstate->_status.holds_gil);
assert(!tstate->holds_gil);
PyMutex_Lock(&shared->mutex);
// NOTE: we must load (or reload) the thread state's qbsr inside the mutex