gh-114940: Add _Py_FOR_EACH_TSTATE_UNLOCKED(), and Friends (gh-127077)

This is a precursor to the actual fix for gh-114940, where we will change these macros to use the new lock.  This change is almost entirely mechanical; the exceptions are the loops in codeobject.c and ceval.c, which now hold the "head" lock.  Note that almost all of the uses of _Py_FOR_EACH_TSTATE_UNLOCKED() here will change to _Py_FOR_EACH_TSTATE_BEGIN() once we add the new per-interpreter lock.
This commit is contained in:
Eric Snow 2024-11-21 11:08:38 -07:00 committed by GitHub
parent bf542f8bb9
commit 9dabace39d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 79 additions and 87 deletions

View file

@ -269,6 +269,15 @@ extern int _PyOS_InterruptOccurred(PyThreadState *tstate);
#define HEAD_UNLOCK(runtime) \ #define HEAD_UNLOCK(runtime) \
PyMutex_Unlock(&(runtime)->interpreters.mutex) PyMutex_Unlock(&(runtime)->interpreters.mutex)
#define _Py_FOR_EACH_TSTATE_UNLOCKED(interp, t) \
for (PyThreadState *t = interp->threads.head; t; t = t->next)
#define _Py_FOR_EACH_TSTATE_BEGIN(interp, t) \
HEAD_LOCK(interp->runtime); \
_Py_FOR_EACH_TSTATE_UNLOCKED(interp, t)
#define _Py_FOR_EACH_TSTATE_END(interp) \
HEAD_UNLOCK(interp->runtime)
// Get the configuration of the current interpreter. // Get the configuration of the current interpreter.
// The caller must hold the GIL. // The caller must hold the GIL.
// Export for test_peg_generator. // Export for test_peg_generator.

View file

@ -2895,20 +2895,22 @@ get_indices_in_use(PyInterpreterState *interp, struct flag_set *in_use)
assert(interp->stoptheworld.world_stopped); assert(interp->stoptheworld.world_stopped);
assert(in_use->flags == NULL); assert(in_use->flags == NULL);
int32_t max_index = 0; int32_t max_index = 0;
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) { _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
int32_t idx = ((_PyThreadStateImpl *) p)->tlbc_index; int32_t idx = ((_PyThreadStateImpl *) p)->tlbc_index;
if (idx > max_index) { if (idx > max_index) {
max_index = idx; max_index = idx;
} }
} }
_Py_FOR_EACH_TSTATE_END(interp);
in_use->size = (size_t) max_index + 1; in_use->size = (size_t) max_index + 1;
in_use->flags = PyMem_Calloc(in_use->size, sizeof(*in_use->flags)); in_use->flags = PyMem_Calloc(in_use->size, sizeof(*in_use->flags));
if (in_use->flags == NULL) { if (in_use->flags == NULL) {
return -1; return -1;
} }
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) { _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
in_use->flags[((_PyThreadStateImpl *) p)->tlbc_index] = 1; in_use->flags[((_PyThreadStateImpl *) p)->tlbc_index] = 1;
} }
_Py_FOR_EACH_TSTATE_END(interp);
return 0; return 0;
} }

View file

@ -119,7 +119,7 @@ get_reftotal(PyInterpreterState *interp)
since we can't determine which interpreter updated it. */ since we can't determine which interpreter updated it. */
Py_ssize_t total = REFTOTAL(interp); Py_ssize_t total = REFTOTAL(interp);
#ifdef Py_GIL_DISABLED #ifdef Py_GIL_DISABLED
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) { _Py_FOR_EACH_TSTATE_UNLOCKED(interp, p) {
/* This may race with other threads modifications to their reftotal */ /* This may race with other threads modifications to their reftotal */
_PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)p; _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)p;
total += _Py_atomic_load_ssize_relaxed(&tstate_impl->reftotal); total += _Py_atomic_load_ssize_relaxed(&tstate_impl->reftotal);

View file

@ -1439,7 +1439,7 @@ get_mimalloc_allocated_blocks(PyInterpreterState *interp)
{ {
size_t allocated_blocks = 0; size_t allocated_blocks = 0;
#ifdef Py_GIL_DISABLED #ifdef Py_GIL_DISABLED
for (PyThreadState *t = interp->threads.head; t != NULL; t = t->next) { _Py_FOR_EACH_TSTATE_UNLOCKED(interp, t) {
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)t; _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)t;
for (int i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) { for (int i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
mi_heap_t *heap = &tstate->mimalloc.heaps[i]; mi_heap_t *heap = &tstate->mimalloc.heaps[i];

View file

@ -296,11 +296,12 @@ Py_SetRecursionLimit(int new_limit)
{ {
PyInterpreterState *interp = _PyInterpreterState_GET(); PyInterpreterState *interp = _PyInterpreterState_GET();
interp->ceval.recursion_limit = new_limit; interp->ceval.recursion_limit = new_limit;
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) { _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
int depth = p->py_recursion_limit - p->py_recursion_remaining; int depth = p->py_recursion_limit - p->py_recursion_remaining;
p->py_recursion_limit = new_limit; p->py_recursion_limit = new_limit;
p->py_recursion_remaining = new_limit - depth; p->py_recursion_remaining = new_limit - depth;
} }
_Py_FOR_EACH_TSTATE_END(interp);
} }
/* The function _Py_EnterRecursiveCallTstate() only calls _Py_CheckRecursiveCall() /* The function _Py_EnterRecursiveCallTstate() only calls _Py_CheckRecursiveCall()

View file

@ -977,25 +977,19 @@ make_pending_calls(PyThreadState *tstate)
void void
_Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit) _Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit)
{ {
_PyRuntimeState *runtime = &_PyRuntime; _Py_FOR_EACH_TSTATE_BEGIN(interp, tstate) {
HEAD_LOCK(runtime);
for (PyThreadState *tstate = interp->threads.head; tstate != NULL; tstate = tstate->next) {
_Py_set_eval_breaker_bit(tstate, bit); _Py_set_eval_breaker_bit(tstate, bit);
} }
HEAD_UNLOCK(runtime); _Py_FOR_EACH_TSTATE_END(interp);
} }
void void
_Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit) _Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit)
{ {
_PyRuntimeState *runtime = &_PyRuntime; _Py_FOR_EACH_TSTATE_BEGIN(interp, tstate) {
HEAD_LOCK(runtime);
for (PyThreadState *tstate = interp->threads.head; tstate != NULL; tstate = tstate->next) {
_Py_unset_eval_breaker_bit(tstate, bit); _Py_unset_eval_breaker_bit(tstate, bit);
} }
HEAD_UNLOCK(runtime); _Py_FOR_EACH_TSTATE_END(interp);
} }
void void

View file

@ -304,7 +304,7 @@ gc_visit_heaps_lock_held(PyInterpreterState *interp, mi_block_visit_fun *visitor
Py_ssize_t offset_pre = offset_base + 2 * sizeof(PyObject*); Py_ssize_t offset_pre = offset_base + 2 * sizeof(PyObject*);
// visit each thread's heaps for GC objects // visit each thread's heaps for GC objects
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) { _Py_FOR_EACH_TSTATE_UNLOCKED(interp, p) {
struct _mimalloc_thread_state *m = &((_PyThreadStateImpl *)p)->mimalloc; struct _mimalloc_thread_state *m = &((_PyThreadStateImpl *)p)->mimalloc;
if (!_Py_atomic_load_int(&m->initialized)) { if (!_Py_atomic_load_int(&m->initialized)) {
// The thread may not have called tstate_mimalloc_bind() yet. // The thread may not have called tstate_mimalloc_bind() yet.
@ -374,8 +374,7 @@ gc_visit_stackref(_PyStackRef stackref)
static void static void
gc_visit_thread_stacks(PyInterpreterState *interp) gc_visit_thread_stacks(PyInterpreterState *interp)
{ {
HEAD_LOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) {
for (_PyInterpreterFrame *f = p->current_frame; f != NULL; f = f->previous) { for (_PyInterpreterFrame *f = p->current_frame; f != NULL; f = f->previous) {
PyObject *executable = PyStackRef_AsPyObjectBorrow(f->f_executable); PyObject *executable = PyStackRef_AsPyObjectBorrow(f->f_executable);
if (executable == NULL || !PyCode_Check(executable)) { if (executable == NULL || !PyCode_Check(executable)) {
@ -390,7 +389,7 @@ gc_visit_thread_stacks(PyInterpreterState *interp)
} }
} }
} }
HEAD_UNLOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_END(interp);
} }
static void static void
@ -444,14 +443,13 @@ process_delayed_frees(PyInterpreterState *interp, struct collection_state *state
// Merge the queues from other threads into our own queue so that we can // Merge the queues from other threads into our own queue so that we can
// process all of the pending delayed free requests at once. // process all of the pending delayed free requests at once.
HEAD_LOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) {
_PyThreadStateImpl *other = (_PyThreadStateImpl *)p; _PyThreadStateImpl *other = (_PyThreadStateImpl *)p;
if (other != current_tstate) { if (other != current_tstate) {
llist_concat(&current_tstate->mem_free_queue, &other->mem_free_queue); llist_concat(&current_tstate->mem_free_queue, &other->mem_free_queue);
} }
} }
HEAD_UNLOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_END(interp);
_PyMem_ProcessDelayedNoDealloc((PyThreadState *)current_tstate, queue_freed_object, state); _PyMem_ProcessDelayedNoDealloc((PyThreadState *)current_tstate, queue_freed_object, state);
} }
@ -1234,8 +1232,7 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
state->gcstate->old[i-1].count = 0; state->gcstate->old[i-1].count = 0;
} }
HEAD_LOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) {
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)p; _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)p;
// merge per-thread refcount for types into the type's actual refcount // merge per-thread refcount for types into the type's actual refcount
@ -1244,7 +1241,7 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
// merge refcounts for all queued objects // merge refcounts for all queued objects
merge_queued_objects(tstate, state); merge_queued_objects(tstate, state);
} }
HEAD_UNLOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_END(interp);
process_delayed_frees(interp, state); process_delayed_frees(interp, state);
@ -1993,13 +1990,11 @@ PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void *arg)
void void
_PyGC_ClearAllFreeLists(PyInterpreterState *interp) _PyGC_ClearAllFreeLists(PyInterpreterState *interp)
{ {
HEAD_LOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)interp->threads.head; _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)p;
while (tstate != NULL) {
_PyObject_ClearFreeLists(&tstate->freelists, 0); _PyObject_ClearFreeLists(&tstate->freelists, 0);
tstate = (_PyThreadStateImpl *)tstate->base.next;
} }
HEAD_UNLOCK(&_PyRuntime); _Py_FOR_EACH_TSTATE_END(interp);
} }
#endif // Py_GIL_DISABLED #endif // Py_GIL_DISABLED

View file

@ -1006,13 +1006,10 @@ set_global_version(PyThreadState *tstate, uint32_t version)
#ifdef Py_GIL_DISABLED #ifdef Py_GIL_DISABLED
// Set the version on all threads in free-threaded builds. // Set the version on all threads in free-threaded builds.
_PyRuntimeState *runtime = &_PyRuntime; _Py_FOR_EACH_TSTATE_BEGIN(interp, tstate) {
HEAD_LOCK(runtime);
for (tstate = interp->threads.head; tstate;
tstate = PyThreadState_Next(tstate)) {
set_version_raw(&tstate->eval_breaker, version); set_version_raw(&tstate->eval_breaker, version);
}; };
HEAD_UNLOCK(runtime); _Py_FOR_EACH_TSTATE_END(interp);
#else #else
// Normal builds take the current version from instrumentation_version when // Normal builds take the current version from instrumentation_version when
// attaching a thread, so we only have to set the current thread's version. // attaching a thread, so we only have to set the current thread's version.

View file

@ -790,18 +790,15 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
} }
// Clear the current/main thread state last. // Clear the current/main thread state last.
HEAD_LOCK(runtime); _Py_FOR_EACH_TSTATE_BEGIN(interp, p) {
PyThreadState *p = interp->threads.head;
HEAD_UNLOCK(runtime);
while (p != NULL) {
// See https://github.com/python/cpython/issues/102126 // See https://github.com/python/cpython/issues/102126
// Must be called without HEAD_LOCK held as it can deadlock // Must be called without HEAD_LOCK held as it can deadlock
// if any finalizer tries to acquire that lock. // if any finalizer tries to acquire that lock.
HEAD_UNLOCK(runtime);
PyThreadState_Clear(p); PyThreadState_Clear(p);
HEAD_LOCK(runtime); HEAD_LOCK(runtime);
p = p->next;
HEAD_UNLOCK(runtime);
} }
_Py_FOR_EACH_TSTATE_END(interp);
if (tstate->interp == interp) { if (tstate->interp == interp) {
/* We fix tstate->_status below when we for sure aren't using it /* We fix tstate->_status below when we for sure aren't using it
(e.g. no longer need the GIL). */ (e.g. no longer need the GIL). */
@ -1801,10 +1798,9 @@ tstate_delete_common(PyThreadState *tstate, int release_gil)
static void static void
zapthreads(PyInterpreterState *interp) zapthreads(PyInterpreterState *interp)
{ {
PyThreadState *tstate;
/* No need to lock the mutex here because this should only happen /* No need to lock the mutex here because this should only happen
when the threads are all really dead (XXX famous last words). */ when the threads are all really dead (XXX famous last words). */
while ((tstate = interp->threads.head) != NULL) { _Py_FOR_EACH_TSTATE_UNLOCKED(interp, tstate) {
tstate_verify_not_active(tstate); tstate_verify_not_active(tstate);
tstate_delete_common(tstate, 0); tstate_delete_common(tstate, 0);
free_threadstate((_PyThreadStateImpl *)tstate); free_threadstate((_PyThreadStateImpl *)tstate);
@ -2161,7 +2157,7 @@ decrement_stoptheworld_countdown(struct _stoptheworld_state *stw)
} }
#ifdef Py_GIL_DISABLED #ifdef Py_GIL_DISABLED
// Interpreter for _Py_FOR_EACH_THREAD(). For global stop-the-world events, // Interpreter for _Py_FOR_EACH_STW_INTERP(). For global stop-the-world events,
// we start with the first interpreter and then iterate over all interpreters. // we start with the first interpreter and then iterate over all interpreters.
// For per-interpreter stop-the-world events, we only operate on the one // For per-interpreter stop-the-world events, we only operate on the one
// interpreter. // interpreter.
@ -2176,10 +2172,9 @@ interp_for_stop_the_world(struct _stoptheworld_state *stw)
// Loops over threads for a stop-the-world event. // Loops over threads for a stop-the-world event.
// For global: all threads in all interpreters // For global: all threads in all interpreters
// For per-interpreter: all threads in the interpreter // For per-interpreter: all threads in the interpreter
#define _Py_FOR_EACH_THREAD(stw, i, t) \ #define _Py_FOR_EACH_STW_INTERP(stw, i) \
for (i = interp_for_stop_the_world((stw)); \ for (PyInterpreterState *i = interp_for_stop_the_world((stw)); \
i != NULL; i = ((stw->is_global) ? i->next : NULL)) \ i != NULL; i = ((stw->is_global) ? i->next : NULL))
for (t = i->threads.head; t; t = t->next)
// Try to transition threads atomically from the "detached" state to the // Try to transition threads atomically from the "detached" state to the
@ -2188,14 +2183,13 @@ static bool
park_detached_threads(struct _stoptheworld_state *stw) park_detached_threads(struct _stoptheworld_state *stw)
{ {
int num_parked = 0; int num_parked = 0;
PyInterpreterState *i; _Py_FOR_EACH_STW_INTERP(stw, i) {
PyThreadState *t; _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
_Py_FOR_EACH_THREAD(stw, i, t) {
int state = _Py_atomic_load_int_relaxed(&t->state); int state = _Py_atomic_load_int_relaxed(&t->state);
if (state == _Py_THREAD_DETACHED) { if (state == _Py_THREAD_DETACHED) {
// Atomically transition to "suspended" if in "detached" state. // Atomically transition to "suspended" if in "detached" state.
if (_Py_atomic_compare_exchange_int(&t->state, if (_Py_atomic_compare_exchange_int(
&state, _Py_THREAD_SUSPENDED)) { &t->state, &state, _Py_THREAD_SUSPENDED)) {
num_parked++; num_parked++;
} }
} }
@ -2203,6 +2197,7 @@ park_detached_threads(struct _stoptheworld_state *stw)
_Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT); _Py_set_eval_breaker_bit(t, _PY_EVAL_PLEASE_STOP_BIT);
} }
} }
}
stw->thread_countdown -= num_parked; stw->thread_countdown -= num_parked;
assert(stw->thread_countdown >= 0); assert(stw->thread_countdown >= 0);
return num_parked > 0 && stw->thread_countdown == 0; return num_parked > 0 && stw->thread_countdown == 0;
@ -2227,14 +2222,14 @@ stop_the_world(struct _stoptheworld_state *stw)
stw->stop_event = (PyEvent){0}; // zero-initialize (unset) stw->stop_event = (PyEvent){0}; // zero-initialize (unset)
stw->requester = _PyThreadState_GET(); // may be NULL stw->requester = _PyThreadState_GET(); // may be NULL
PyInterpreterState *i; _Py_FOR_EACH_STW_INTERP(stw, i) {
PyThreadState *t; _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
_Py_FOR_EACH_THREAD(stw, i, t) {
if (t != stw->requester) { if (t != stw->requester) {
// Count all the other threads (we don't wait on ourself). // Count all the other threads (we don't wait on ourself).
stw->thread_countdown++; stw->thread_countdown++;
} }
} }
}
if (stw->thread_countdown == 0) { if (stw->thread_countdown == 0) {
HEAD_UNLOCK(runtime); HEAD_UNLOCK(runtime);
@ -2273,9 +2268,8 @@ start_the_world(struct _stoptheworld_state *stw)
stw->requested = 0; stw->requested = 0;
stw->world_stopped = 0; stw->world_stopped = 0;
// Switch threads back to the detached state. // Switch threads back to the detached state.
PyInterpreterState *i; _Py_FOR_EACH_STW_INTERP(stw, i) {
PyThreadState *t; _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
_Py_FOR_EACH_THREAD(stw, i, t) {
if (t != stw->requester) { if (t != stw->requester) {
assert(_Py_atomic_load_int_relaxed(&t->state) == assert(_Py_atomic_load_int_relaxed(&t->state) ==
_Py_THREAD_SUSPENDED); _Py_THREAD_SUSPENDED);
@ -2283,6 +2277,7 @@ start_the_world(struct _stoptheworld_state *stw)
_PyParkingLot_UnparkAll(&t->state); _PyParkingLot_UnparkAll(&t->state);
} }
} }
}
stw->requester = NULL; stw->requester = NULL;
HEAD_UNLOCK(runtime); HEAD_UNLOCK(runtime);
if (stw->is_global) { if (stw->is_global) {
@ -2344,7 +2339,6 @@ _PyEval_StartTheWorld(PyInterpreterState *interp)
int int
PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
{ {
_PyRuntimeState *runtime = &_PyRuntime;
PyInterpreterState *interp = _PyInterpreterState_GET(); PyInterpreterState *interp = _PyInterpreterState_GET();
/* Although the GIL is held, a few C API functions can be called /* Although the GIL is held, a few C API functions can be called
@ -2353,12 +2347,16 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
* list of thread states we're traversing, so to prevent that we lock * list of thread states we're traversing, so to prevent that we lock
* head_mutex for the duration. * head_mutex for the duration.
*/ */
HEAD_LOCK(runtime); PyThreadState *tstate = NULL;
for (PyThreadState *tstate = interp->threads.head; tstate != NULL; tstate = tstate->next) { _Py_FOR_EACH_TSTATE_BEGIN(interp, t) {
if (tstate->thread_id != id) { if (t->thread_id == id) {
continue; tstate = t;
break;
} }
}
_Py_FOR_EACH_TSTATE_END(interp);
if (tstate != NULL) {
/* Tricky: we need to decref the current value /* Tricky: we need to decref the current value
* (if any) in tstate->async_exc, but that can in turn * (if any) in tstate->async_exc, but that can in turn
* allow arbitrary Python code to run, including * allow arbitrary Python code to run, including
@ -2368,14 +2366,12 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
*/ */
Py_XINCREF(exc); Py_XINCREF(exc);
PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc); PyObject *old_exc = _Py_atomic_exchange_ptr(&tstate->async_exc, exc);
HEAD_UNLOCK(runtime);
Py_XDECREF(old_exc); Py_XDECREF(old_exc);
_Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT); _Py_set_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
return 1;
} }
HEAD_UNLOCK(runtime);
return 0; return tstate != NULL;
} }
//--------------------------------- //---------------------------------
@ -2515,8 +2511,7 @@ _PyThread_CurrentFrames(void)
HEAD_LOCK(runtime); HEAD_LOCK(runtime);
PyInterpreterState *i; PyInterpreterState *i;
for (i = runtime->interpreters.head; i != NULL; i = i->next) { for (i = runtime->interpreters.head; i != NULL; i = i->next) {
PyThreadState *t; _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
for (t = i->threads.head; t != NULL; t = t->next) {
_PyInterpreterFrame *frame = t->current_frame; _PyInterpreterFrame *frame = t->current_frame;
frame = _PyFrame_GetFirstComplete(frame); frame = _PyFrame_GetFirstComplete(frame);
if (frame == NULL) { if (frame == NULL) {
@ -2581,8 +2576,7 @@ _PyThread_CurrentExceptions(void)
HEAD_LOCK(runtime); HEAD_LOCK(runtime);
PyInterpreterState *i; PyInterpreterState *i;
for (i = runtime->interpreters.head; i != NULL; i = i->next) { for (i = runtime->interpreters.head; i != NULL; i = i->next) {
PyThreadState *t; _Py_FOR_EACH_TSTATE_UNLOCKED(i, t) {
for (t = i->threads.head; t != NULL; t = t->next) {
_PyErr_StackItem *err_info = _PyErr_GetTopmostException(t); _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
if (err_info == NULL) { if (err_info == NULL) {
continue; continue;