mirror of
https://github.com/python/cpython.git
synced 2025-08-04 00:48:58 +00:00
[3.13] gh-117657: Fix TSAN race involving import lock (GH-118523) (#120169)
This adds a `_PyRecursiveMutex` type based on `PyMutex` and uses that
for the import lock. This fixes some data races in the free-threaded
build and generally simplifies the import lock code.
(cherry picked from commit e21057b999
)
Co-authored-by: Sam Gross <colesbury@gmail.com>
This commit is contained in:
parent
015ddfeca5
commit
517733ce3c
7 changed files with 90 additions and 105 deletions
|
@ -94,11 +94,7 @@ static struct _inittab *inittab_copy = NULL;
|
|||
(interp)->imports.import_func
|
||||
|
||||
#define IMPORT_LOCK(interp) \
|
||||
(interp)->imports.lock.mutex
|
||||
#define IMPORT_LOCK_THREAD(interp) \
|
||||
(interp)->imports.lock.thread
|
||||
#define IMPORT_LOCK_LEVEL(interp) \
|
||||
(interp)->imports.lock.level
|
||||
(interp)->imports.lock
|
||||
|
||||
#define FIND_AND_LOAD(interp) \
|
||||
(interp)->imports.find_and_load
|
||||
|
@ -115,75 +111,15 @@ static struct _inittab *inittab_copy = NULL;
|
|||
void
|
||||
_PyImport_AcquireLock(PyInterpreterState *interp)
|
||||
{
|
||||
unsigned long me = PyThread_get_thread_ident();
|
||||
if (me == PYTHREAD_INVALID_THREAD_ID)
|
||||
return; /* Too bad */
|
||||
if (IMPORT_LOCK(interp) == NULL) {
|
||||
IMPORT_LOCK(interp) = PyThread_allocate_lock();
|
||||
if (IMPORT_LOCK(interp) == NULL)
|
||||
return; /* Nothing much we can do. */
|
||||
}
|
||||
if (IMPORT_LOCK_THREAD(interp) == me) {
|
||||
IMPORT_LOCK_LEVEL(interp)++;
|
||||
return;
|
||||
}
|
||||
if (IMPORT_LOCK_THREAD(interp) != PYTHREAD_INVALID_THREAD_ID ||
|
||||
!PyThread_acquire_lock(IMPORT_LOCK(interp), 0))
|
||||
{
|
||||
PyThreadState *tstate = PyEval_SaveThread();
|
||||
PyThread_acquire_lock(IMPORT_LOCK(interp), WAIT_LOCK);
|
||||
PyEval_RestoreThread(tstate);
|
||||
}
|
||||
assert(IMPORT_LOCK_LEVEL(interp) == 0);
|
||||
IMPORT_LOCK_THREAD(interp) = me;
|
||||
IMPORT_LOCK_LEVEL(interp) = 1;
|
||||
_PyRecursiveMutex_Lock(&IMPORT_LOCK(interp));
|
||||
}
|
||||
|
||||
int
|
||||
void
|
||||
_PyImport_ReleaseLock(PyInterpreterState *interp)
|
||||
{
|
||||
unsigned long me = PyThread_get_thread_ident();
|
||||
if (me == PYTHREAD_INVALID_THREAD_ID || IMPORT_LOCK(interp) == NULL)
|
||||
return 0; /* Too bad */
|
||||
if (IMPORT_LOCK_THREAD(interp) != me)
|
||||
return -1;
|
||||
IMPORT_LOCK_LEVEL(interp)--;
|
||||
assert(IMPORT_LOCK_LEVEL(interp) >= 0);
|
||||
if (IMPORT_LOCK_LEVEL(interp) == 0) {
|
||||
IMPORT_LOCK_THREAD(interp) = PYTHREAD_INVALID_THREAD_ID;
|
||||
PyThread_release_lock(IMPORT_LOCK(interp));
|
||||
}
|
||||
return 1;
|
||||
_PyRecursiveMutex_Unlock(&IMPORT_LOCK(interp));
|
||||
}
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
/* This function is called from PyOS_AfterFork_Child() to ensure that newly
|
||||
created child processes do not share locks with the parent.
|
||||
We now acquire the import lock around fork() calls but on some platforms
|
||||
(Solaris 9 and earlier? see isue7242) that still left us with problems. */
|
||||
PyStatus
|
||||
_PyImport_ReInitLock(PyInterpreterState *interp)
|
||||
{
|
||||
if (IMPORT_LOCK(interp) != NULL) {
|
||||
if (_PyThread_at_fork_reinit(&IMPORT_LOCK(interp)) < 0) {
|
||||
return _PyStatus_ERR("failed to create a new lock");
|
||||
}
|
||||
}
|
||||
|
||||
if (IMPORT_LOCK_LEVEL(interp) > 1) {
|
||||
/* Forked as a side effect of import */
|
||||
unsigned long me = PyThread_get_thread_ident();
|
||||
PyThread_acquire_lock(IMPORT_LOCK(interp), WAIT_LOCK);
|
||||
IMPORT_LOCK_THREAD(interp) = me;
|
||||
IMPORT_LOCK_LEVEL(interp)--;
|
||||
} else {
|
||||
IMPORT_LOCK_THREAD(interp) = PYTHREAD_INVALID_THREAD_ID;
|
||||
IMPORT_LOCK_LEVEL(interp) = 0;
|
||||
}
|
||||
return _PyStatus_OK();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/***************/
|
||||
/* sys.modules */
|
||||
|
@ -4111,11 +4047,6 @@ _PyImport_FiniCore(PyInterpreterState *interp)
|
|||
PyErr_FormatUnraisable("Exception ignored on clearing sys.modules");
|
||||
}
|
||||
|
||||
if (IMPORT_LOCK(interp) != NULL) {
|
||||
PyThread_free_lock(IMPORT_LOCK(interp));
|
||||
IMPORT_LOCK(interp) = NULL;
|
||||
}
|
||||
|
||||
_PyImport_ClearCore(interp);
|
||||
}
|
||||
|
||||
|
@ -4248,8 +4179,7 @@ _imp_lock_held_impl(PyObject *module)
|
|||
/*[clinic end generated code: output=8b89384b5e1963fc input=9b088f9b217d9bdf]*/
|
||||
{
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
return PyBool_FromLong(
|
||||
IMPORT_LOCK_THREAD(interp) != PYTHREAD_INVALID_THREAD_ID);
|
||||
return PyBool_FromLong(PyMutex_IsLocked(&IMPORT_LOCK(interp).mutex));
|
||||
}
|
||||
|
||||
/*[clinic input]
|
||||
|
@ -4283,11 +4213,12 @@ _imp_release_lock_impl(PyObject *module)
|
|||
/*[clinic end generated code: output=7faab6d0be178b0a input=934fb11516dd778b]*/
|
||||
{
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
if (_PyImport_ReleaseLock(interp) < 0) {
|
||||
if (!_PyRecursiveMutex_IsLockedByCurrentThread(&IMPORT_LOCK(interp))) {
|
||||
PyErr_SetString(PyExc_RuntimeError,
|
||||
"not holding the import lock");
|
||||
return NULL;
|
||||
}
|
||||
_PyImport_ReleaseLock(interp);
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
|
|
|
@ -366,6 +366,48 @@ _PyOnceFlag_CallOnceSlow(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg)
|
|||
}
|
||||
}
|
||||
|
||||
static int
|
||||
recursive_mutex_is_owned_by(_PyRecursiveMutex *m, PyThread_ident_t tid)
|
||||
{
|
||||
return _Py_atomic_load_ullong_relaxed(&m->thread) == tid;
|
||||
}
|
||||
|
||||
int
|
||||
_PyRecursiveMutex_IsLockedByCurrentThread(_PyRecursiveMutex *m)
|
||||
{
|
||||
return recursive_mutex_is_owned_by(m, PyThread_get_thread_ident_ex());
|
||||
}
|
||||
|
||||
void
|
||||
_PyRecursiveMutex_Lock(_PyRecursiveMutex *m)
|
||||
{
|
||||
PyThread_ident_t thread = PyThread_get_thread_ident_ex();
|
||||
if (recursive_mutex_is_owned_by(m, thread)) {
|
||||
m->level++;
|
||||
return;
|
||||
}
|
||||
PyMutex_Lock(&m->mutex);
|
||||
_Py_atomic_store_ullong_relaxed(&m->thread, thread);
|
||||
assert(m->level == 0);
|
||||
}
|
||||
|
||||
void
|
||||
_PyRecursiveMutex_Unlock(_PyRecursiveMutex *m)
|
||||
{
|
||||
PyThread_ident_t thread = PyThread_get_thread_ident_ex();
|
||||
if (!recursive_mutex_is_owned_by(m, thread)) {
|
||||
Py_FatalError("unlocking a recursive mutex that is not owned by the"
|
||||
" current thread");
|
||||
}
|
||||
if (m->level > 0) {
|
||||
m->level--;
|
||||
return;
|
||||
}
|
||||
assert(m->level == 0);
|
||||
_Py_atomic_store_ullong_relaxed(&m->thread, 0);
|
||||
PyMutex_Unlock(&m->mutex);
|
||||
}
|
||||
|
||||
#define _Py_WRITE_LOCKED 1
|
||||
#define _PyRWMutex_READER_SHIFT 2
|
||||
#define _Py_RWMUTEX_MAX_READERS (UINTPTR_MAX >> _PyRWMutex_READER_SHIFT)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue