mirror of
https://github.com/python/cpython.git
synced 2025-11-01 10:45:30 +00:00
gh-116818: Make sys.settrace, sys.setprofile, and monitoring thread-safe (#116775)
Makes sys.settrace, sys.setprofile, and monitoring generally thread-safe. Mostly uses a stop-the-world approach and synchronization around the code object's _co_instrumentation_version. There may be a little bit of extra synchronization around the monitoring data that's required to be TSAN clean.
This commit is contained in:
parent
b45af00bad
commit
07525c9a85
18 changed files with 530 additions and 63 deletions
|
|
@ -465,10 +465,16 @@ _Py_atomic_store_ullong_relaxed(unsigned long long *obj,
|
|||
static inline void *
|
||||
_Py_atomic_load_ptr_acquire(const void *obj);
|
||||
|
||||
static inline uintptr_t
|
||||
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj);
|
||||
|
||||
// Stores `*obj = value` (release operation)
|
||||
static inline void
|
||||
_Py_atomic_store_ptr_release(void *obj, void *value);
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value);
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value);
|
||||
|
||||
|
|
@ -491,6 +497,8 @@ static inline Py_ssize_t
|
|||
_Py_atomic_load_ssize_acquire(const Py_ssize_t *obj);
|
||||
|
||||
|
||||
|
||||
|
||||
// --- _Py_atomic_fence ------------------------------------------------------
|
||||
|
||||
// Sequential consistency fence. C11 fences have complex semantics. When
|
||||
|
|
|
|||
|
|
@ -492,10 +492,18 @@ static inline void *
|
|||
_Py_atomic_load_ptr_acquire(const void *obj)
|
||||
{ return (void *)__atomic_load_n((void **)obj, __ATOMIC_ACQUIRE); }
|
||||
|
||||
static inline uintptr_t
|
||||
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj)
|
||||
{ return (uintptr_t)__atomic_load_n((uintptr_t *)obj, __ATOMIC_ACQUIRE); }
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_ptr_release(void *obj, void *value)
|
||||
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); }
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value)
|
||||
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_int_release(int *obj, int value)
|
||||
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
|
||||
|
|
|
|||
|
|
@ -914,6 +914,18 @@ _Py_atomic_load_ptr_acquire(const void *obj)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline uintptr_t
|
||||
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj)
|
||||
{
|
||||
#if defined(_M_X64) || defined(_M_IX86)
|
||||
return *(uintptr_t volatile *)obj;
|
||||
#elif defined(_M_ARM64)
|
||||
return (uintptr_t)__ldar64((unsigned __int64 volatile *)obj);
|
||||
#else
|
||||
# error "no implementation of _Py_atomic_load_uintptr_acquire"
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_ptr_release(void *obj, void *value)
|
||||
{
|
||||
|
|
@ -926,6 +938,19 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value)
|
||||
{
|
||||
#if defined(_M_X64) || defined(_M_IX86)
|
||||
*(uintptr_t volatile *)obj = value;
|
||||
#elif defined(_M_ARM64)
|
||||
_Py_atomic_ASSERT_ARG_TYPE(unsigned __int64);
|
||||
__stlr64((unsigned __int64 volatile *)obj, (unsigned __int64)value);
|
||||
#else
|
||||
# error "no implementation of _Py_atomic_store_uintptr_release"
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_int_release(int *obj, int value)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -863,6 +863,14 @@ _Py_atomic_load_ptr_acquire(const void *obj)
|
|||
memory_order_acquire);
|
||||
}
|
||||
|
||||
static inline uintptr_t
|
||||
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj)
|
||||
{
|
||||
_Py_USING_STD;
|
||||
return atomic_load_explicit((const _Atomic(uintptr_t)*)obj,
|
||||
memory_order_acquire);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_ptr_release(void *obj, void *value)
|
||||
{
|
||||
|
|
@ -871,6 +879,14 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
|
|||
memory_order_release);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value)
|
||||
{
|
||||
_Py_USING_STD;
|
||||
atomic_store_explicit((_Atomic(uintptr_t)*)obj, value,
|
||||
memory_order_release);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_int_release(int *obj, int value)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ struct _ceval_runtime_state {
|
|||
} perf;
|
||||
/* Pending calls to be made only on the main thread. */
|
||||
struct _pending_calls pending_mainthread;
|
||||
PyMutex sys_trace_profile_mutex;
|
||||
};
|
||||
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ static inline void _PyObject_GC_SET_SHARED(PyObject *op) {
|
|||
* threads and needs special purpose when freeing due to
|
||||
* the possibility of in-flight lock-free reads occurring.
|
||||
* Objects with this bit that are GC objects will automatically
|
||||
* delay-freed by PyObject_GC_Del. */
|
||||
* delay-freed by PyObject_GC_Del. */
|
||||
static inline int _PyObject_GC_IS_SHARED_INLINE(PyObject *op) {
|
||||
return (op->ob_gc_bits & _PyGC_BITS_SHARED_INLINE) != 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,20 +26,34 @@ extern "C" {
|
|||
_Py_atomic_load_ssize_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) \
|
||||
_Py_atomic_store_ptr(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) \
|
||||
_Py_atomic_load_ptr_acquire(&value)
|
||||
#define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) \
|
||||
_Py_atomic_load_uintptr_acquire(&value)
|
||||
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ptr_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) \
|
||||
_Py_atomic_store_ptr_release(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINTPTR_RELEASE(value, new_value) \
|
||||
_Py_atomic_store_uintptr_release(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ssize_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint8_relaxed(&value, new_value)
|
||||
|
||||
#else
|
||||
#define FT_ATOMIC_LOAD_PTR(value) value
|
||||
#define FT_ATOMIC_LOAD_SSIZE(value) value
|
||||
#define FT_ATOMIC_LOAD_SSIZE_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINTPTR_RELEASE(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) value = new_value
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue