mirror of
https://github.com/python/cpython.git
synced 2025-10-09 16:34:44 +00:00
gh-110481: Fix biased reference counting queue initialization. (#117271)
The biased reference counting queue must be initialized from the bound (active) thread because it uses `_Py_ThreadId()` as the key in a hash table.
This commit is contained in:
parent
9a1e55b8c5
commit
8dbfdb2957
2 changed files with 18 additions and 6 deletions
14
Python/brc.c
14
Python/brc.c
|
@ -119,6 +119,8 @@ _Py_brc_merge_refcounts(PyThreadState *tstate)
|
||||||
struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc;
|
struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc;
|
||||||
struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid);
|
struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid);
|
||||||
|
|
||||||
|
assert(brc->tid == _Py_ThreadId());
|
||||||
|
|
||||||
// Append all objects into a local stack. We don't want to hold the lock
|
// Append all objects into a local stack. We don't want to hold the lock
|
||||||
// while calling destructors.
|
// while calling destructors.
|
||||||
PyMutex_Lock(&bucket->mutex);
|
PyMutex_Lock(&bucket->mutex);
|
||||||
|
@ -142,11 +144,12 @@ void
|
||||||
_Py_brc_init_thread(PyThreadState *tstate)
|
_Py_brc_init_thread(PyThreadState *tstate)
|
||||||
{
|
{
|
||||||
struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc;
|
struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc;
|
||||||
brc->tid = _Py_ThreadId();
|
uintptr_t tid = _Py_ThreadId();
|
||||||
|
|
||||||
// Add ourself to the hashtable
|
// Add ourself to the hashtable
|
||||||
struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid);
|
struct _brc_bucket *bucket = get_bucket(tstate->interp, tid);
|
||||||
PyMutex_Lock(&bucket->mutex);
|
PyMutex_Lock(&bucket->mutex);
|
||||||
|
brc->tid = tid;
|
||||||
llist_insert_tail(&bucket->root, &brc->bucket_node);
|
llist_insert_tail(&bucket->root, &brc->bucket_node);
|
||||||
PyMutex_Unlock(&bucket->mutex);
|
PyMutex_Unlock(&bucket->mutex);
|
||||||
}
|
}
|
||||||
|
@ -155,6 +158,13 @@ void
|
||||||
_Py_brc_remove_thread(PyThreadState *tstate)
|
_Py_brc_remove_thread(PyThreadState *tstate)
|
||||||
{
|
{
|
||||||
struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc;
|
struct _brc_thread_state *brc = &((_PyThreadStateImpl *)tstate)->brc;
|
||||||
|
if (brc->tid == 0) {
|
||||||
|
// The thread state may have been created, but never bound to a native
|
||||||
|
// thread and therefore never added to the hashtable.
|
||||||
|
assert(tstate->_status.bound == 0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid);
|
struct _brc_bucket *bucket = get_bucket(tstate->interp, brc->tid);
|
||||||
|
|
||||||
// We need to fully process any objects to merge before removing ourself
|
// We need to fully process any objects to merge before removing ourself
|
||||||
|
|
|
@ -261,6 +261,12 @@ bind_tstate(PyThreadState *tstate)
|
||||||
tstate->native_thread_id = PyThread_get_thread_native_id();
|
tstate->native_thread_id = PyThread_get_thread_native_id();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef Py_GIL_DISABLED
|
||||||
|
// Initialize biased reference counting inter-thread queue. Note that this
|
||||||
|
// needs to be initialized from the active thread.
|
||||||
|
_Py_brc_init_thread(tstate);
|
||||||
|
#endif
|
||||||
|
|
||||||
// mimalloc state needs to be initialized from the active thread.
|
// mimalloc state needs to be initialized from the active thread.
|
||||||
tstate_mimalloc_bind(tstate);
|
tstate_mimalloc_bind(tstate);
|
||||||
|
|
||||||
|
@ -1412,10 +1418,6 @@ init_threadstate(_PyThreadStateImpl *_tstate,
|
||||||
tstate->what_event = -1;
|
tstate->what_event = -1;
|
||||||
tstate->previous_executor = NULL;
|
tstate->previous_executor = NULL;
|
||||||
|
|
||||||
#ifdef Py_GIL_DISABLED
|
|
||||||
// Initialize biased reference counting inter-thread queue
|
|
||||||
_Py_brc_init_thread(tstate);
|
|
||||||
#endif
|
|
||||||
llist_init(&_tstate->mem_free_queue);
|
llist_init(&_tstate->mem_free_queue);
|
||||||
|
|
||||||
if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
|
if (interp->stoptheworld.requested || _PyRuntime.stoptheworld.requested) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue