mirror of
https://github.com/python/cpython.git
synced 2025-07-07 19:35:27 +00:00
GH-124715: Move trashcan mechanism into Py_Dealloc
(GH-132280)
This commit is contained in:
parent
0f23e84cda
commit
44e4c479fb
26 changed files with 88 additions and 196 deletions
|
@ -429,81 +429,14 @@ PyAPI_FUNC(void) _Py_NO_RETURN _PyObject_AssertFailed(
|
|||
const char *function);
|
||||
|
||||
|
||||
/* Trashcan mechanism, thanks to Christian Tismer.
|
||||
|
||||
When deallocating a container object, it's possible to trigger an unbounded
|
||||
chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
|
||||
next" object in the chain to 0. This can easily lead to stack overflows,
|
||||
especially in threads (which typically have less stack space to work with).
|
||||
|
||||
A container object can avoid this by bracketing the body of its tp_dealloc
|
||||
function with a pair of macros:
|
||||
|
||||
static void
|
||||
mytype_dealloc(mytype *p)
|
||||
{
|
||||
... declarations go here ...
|
||||
|
||||
PyObject_GC_UnTrack(p); // must untrack first
|
||||
Py_TRASHCAN_BEGIN(p, mytype_dealloc)
|
||||
... The body of the deallocator goes here, including all calls ...
|
||||
... to Py_DECREF on contained objects. ...
|
||||
Py_TRASHCAN_END // there should be no code after this
|
||||
}
|
||||
|
||||
CAUTION: Never return from the middle of the body! If the body needs to
|
||||
"get out early", put a label immediately before the Py_TRASHCAN_END
|
||||
call, and goto it. Else the call-depth counter (see below) will stay
|
||||
above 0 forever, and the trashcan will never get emptied.
|
||||
|
||||
How it works: The BEGIN macro increments a call-depth counter. So long
|
||||
as this counter is small, the body of the deallocator is run directly without
|
||||
further ado. But if the counter gets large, it instead adds p to a list of
|
||||
objects to be deallocated later, skips the body of the deallocator, and
|
||||
resumes execution after the END macro. The tp_dealloc routine then returns
|
||||
without deallocating anything (and so unbounded call-stack depth is avoided).
|
||||
|
||||
When the call stack finishes unwinding again, code generated by the END macro
|
||||
notices this, and calls another routine to deallocate all the objects that
|
||||
may have been added to the list of deferred deallocations. In effect, a
|
||||
chain of N deallocations is broken into (N-1)/(Py_TRASHCAN_HEADROOM-1) pieces,
|
||||
with the call stack never exceeding a depth of Py_TRASHCAN_HEADROOM.
|
||||
|
||||
Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
|
||||
class, we need to ensure that the trashcan is only triggered on the tp_dealloc
|
||||
of the actual class being deallocated. Otherwise we might end up with a
|
||||
partially-deallocated object. To check this, the tp_dealloc function must be
|
||||
passed as second argument to Py_TRASHCAN_BEGIN().
|
||||
*/
|
||||
|
||||
|
||||
PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op);
|
||||
PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(PyThreadState *tstate);
|
||||
|
||||
|
||||
/* Python 3.10 private API, invoked by the Py_TRASHCAN_BEGIN(). */
|
||||
|
||||
/* To avoid raising recursion errors during dealloc trigger trashcan before we reach
|
||||
* recursion limit. To avoid trashing, we don't attempt to empty the trashcan until
|
||||
* we have headroom above the trigger limit */
|
||||
#define Py_TRASHCAN_HEADROOM 50
|
||||
|
||||
/* Helper function for Py_TRASHCAN_BEGIN */
|
||||
PyAPI_FUNC(int) _Py_ReachedRecursionLimitWithMargin(PyThreadState *tstate, int margin_count);
|
||||
|
||||
#define Py_TRASHCAN_BEGIN(op, dealloc) \
|
||||
do { \
|
||||
PyThreadState *tstate = PyThreadState_Get(); \
|
||||
if (_Py_ReachedRecursionLimitWithMargin(tstate, 2) && Py_TYPE(op)->tp_dealloc == (destructor)dealloc) { \
|
||||
_PyTrash_thread_deposit_object(tstate, (PyObject *)op); \
|
||||
break; \
|
||||
}
|
||||
/* The body of the deallocator is here. */
|
||||
#define Py_TRASHCAN_END \
|
||||
if (tstate->delete_later && !_Py_ReachedRecursionLimitWithMargin(tstate, 4)) { \
|
||||
_PyTrash_thread_destroy_chain(tstate); \
|
||||
} \
|
||||
} while (0);
|
||||
/* For backwards compatibility with the old trashcan mechanism */
|
||||
#define Py_TRASHCAN_BEGIN(op, dealloc)
|
||||
#define Py_TRASHCAN_END
|
||||
|
||||
|
||||
PyAPI_FUNC(void *) PyObject_GetItemData(PyObject *obj);
|
||||
|
|
|
@ -196,25 +196,6 @@ extern void _PyEval_DeactivateOpCache(void);
|
|||
|
||||
/* --- _Py_EnterRecursiveCall() ----------------------------------------- */
|
||||
|
||||
#if !_Py__has_builtin(__builtin_frame_address) && !defined(_MSC_VER)
|
||||
static uintptr_t return_pointer_as_int(char* p) {
|
||||
return (uintptr_t)p;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline uintptr_t
|
||||
_Py_get_machine_stack_pointer(void) {
|
||||
#if _Py__has_builtin(__builtin_frame_address)
|
||||
return (uintptr_t)__builtin_frame_address(0);
|
||||
#elif defined(_MSC_VER)
|
||||
return (uintptr_t)_AddressOfReturnAddress();
|
||||
#else
|
||||
char here;
|
||||
/* Avoid compiler warning about returning stack address */
|
||||
return return_pointer_as_int(&here);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
|
@ -249,12 +230,7 @@ PyAPI_FUNC(void) _Py_InitializeRecursionLimits(PyThreadState *tstate);
|
|||
static inline int _Py_ReachedRecursionLimit(PyThreadState *tstate) {
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
if (here_addr > _tstate->c_stack_soft_limit) {
|
||||
return 0;
|
||||
}
|
||||
if (_tstate->c_stack_hard_limit == 0) {
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
assert(_tstate->c_stack_hard_limit != 0);
|
||||
return here_addr <= _tstate->c_stack_soft_limit;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "pycore_typedefs.h" // _PyRuntimeState
|
||||
#include "pycore_tstate.h"
|
||||
|
||||
|
||||
// Values for PyThreadState.state. A thread must be in the "attached" state
|
||||
|
@ -299,6 +300,34 @@ _Py_AssertHoldsTstateFunc(const char *func)
|
|||
#define _Py_AssertHoldsTstate()
|
||||
#endif
|
||||
|
||||
#if !_Py__has_builtin(__builtin_frame_address) && !defined(_MSC_VER)
|
||||
static uintptr_t return_pointer_as_int(char* p) {
|
||||
return (uintptr_t)p;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline uintptr_t
|
||||
_Py_get_machine_stack_pointer(void) {
|
||||
#if _Py__has_builtin(__builtin_frame_address)
|
||||
return (uintptr_t)__builtin_frame_address(0);
|
||||
#elif defined(_MSC_VER)
|
||||
return (uintptr_t)_AddressOfReturnAddress();
|
||||
#else
|
||||
char here;
|
||||
/* Avoid compiler warning about returning stack address */
|
||||
return return_pointer_as_int(&here);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline intptr_t
|
||||
_Py_RecursionLimit_GetMargin(PyThreadState *tstate)
|
||||
{
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
assert(_tstate->c_stack_hard_limit != 0);
|
||||
intptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
return Py_ARITHMETIC_RIGHT_SHIFT(intptr_t, here_addr - (intptr_t)_tstate->c_stack_soft_limit, PYOS_STACK_MARGIN_SHIFT);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -26,17 +26,25 @@ PyAPI_DATA(int) (*PyOS_InputHook)(void);
|
|||
* apart. In practice, that means it must be larger than the C
|
||||
* stack consumption of PyEval_EvalDefault */
|
||||
#if defined(_Py_ADDRESS_SANITIZER) || defined(_Py_THREAD_SANITIZER)
|
||||
# define PYOS_STACK_MARGIN 4096
|
||||
# define PYOS_LOG2_STACK_MARGIN 12
|
||||
#elif defined(Py_DEBUG) && defined(WIN32)
|
||||
# define PYOS_STACK_MARGIN 4096
|
||||
# define PYOS_LOG2_STACK_MARGIN 12
|
||||
#elif defined(__wasi__)
|
||||
/* Web assembly has two stacks, so this isn't really a size */
|
||||
# define PYOS_STACK_MARGIN 500
|
||||
# define PYOS_LOG2_STACK_MARGIN 9
|
||||
#else
|
||||
# define PYOS_STACK_MARGIN 2048
|
||||
# define PYOS_LOG2_STACK_MARGIN 11
|
||||
#endif
|
||||
#define PYOS_STACK_MARGIN (1 << PYOS_LOG2_STACK_MARGIN)
|
||||
#define PYOS_STACK_MARGIN_BYTES (PYOS_STACK_MARGIN * sizeof(void *))
|
||||
|
||||
#if SIZEOF_VOID_P == 8
|
||||
#define PYOS_STACK_MARGIN_SHIFT (PYOS_LOG2_STACK_MARGIN + 3)
|
||||
#else
|
||||
#define PYOS_STACK_MARGIN_SHIFT (PYOS_LOG2_STACK_MARGIN + 2)
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(WIN32)
|
||||
#define USE_STACKCHECK
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
Prevents against stack overflows when calling :c:func:`Py_DECREF`. Third-party
|
||||
extension objects no longer need to use the "trashcan" mechanism, as
|
||||
protection is now built into the :c:func:`Py_DECREF` macro.
|
|
@ -689,7 +689,6 @@ element_dealloc(PyObject *op)
|
|||
|
||||
/* bpo-31095: UnTrack is needed before calling any callbacks */
|
||||
PyObject_GC_UnTrack(self);
|
||||
Py_TRASHCAN_BEGIN(self, element_dealloc)
|
||||
|
||||
if (self->weakreflist != NULL)
|
||||
PyObject_ClearWeakRefs(op);
|
||||
|
@ -700,7 +699,6 @@ element_dealloc(PyObject *op)
|
|||
|
||||
tp->tp_free(self);
|
||||
Py_DECREF(tp);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
|
|
@ -1311,11 +1311,9 @@ wrapper_dealloc(PyObject *self)
|
|||
{
|
||||
wrapperobject *wp = (wrapperobject *)self;
|
||||
PyObject_GC_UnTrack(wp);
|
||||
Py_TRASHCAN_BEGIN(wp, wrapper_dealloc)
|
||||
Py_XDECREF(wp->descr);
|
||||
Py_XDECREF(wp->self);
|
||||
PyObject_GC_Del(wp);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
|
@ -3285,7 +3285,6 @@ dict_dealloc(PyObject *self)
|
|||
|
||||
/* bpo-31095: UnTrack is needed before calling any callbacks */
|
||||
PyObject_GC_UnTrack(mp);
|
||||
Py_TRASHCAN_BEGIN(mp, dict_dealloc)
|
||||
if (values != NULL) {
|
||||
if (values->embedded == 0) {
|
||||
for (i = 0, n = values->capacity; i < n; i++) {
|
||||
|
@ -3305,7 +3304,6 @@ dict_dealloc(PyObject *self)
|
|||
else {
|
||||
Py_TYPE(mp)->tp_free((PyObject *)mp);
|
||||
}
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -150,10 +150,8 @@ BaseException_dealloc(PyObject *op)
|
|||
// bpo-44348: The trashcan mechanism prevents stack overflow when deleting
|
||||
// long chains of exceptions. For example, exceptions can be chained
|
||||
// through the __context__ attributes or the __traceback__ attribute.
|
||||
Py_TRASHCAN_BEGIN(self, BaseException_dealloc)
|
||||
(void)BaseException_clear(op);
|
||||
Py_TYPE(self)->tp_free(self);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -1917,7 +1917,6 @@ frame_dealloc(PyObject *op)
|
|||
_PyObject_GC_UNTRACK(f);
|
||||
}
|
||||
|
||||
Py_TRASHCAN_BEGIN(f, frame_dealloc);
|
||||
/* GH-106092: If f->f_frame was on the stack and we reached the maximum
|
||||
* nesting depth for deallocations, the trashcan may have delayed this
|
||||
* deallocation until after f->f_frame is freed. Avoid dereferencing
|
||||
|
@ -1942,7 +1941,6 @@ frame_dealloc(PyObject *op)
|
|||
Py_CLEAR(f->f_locals_cache);
|
||||
Py_CLEAR(f->f_overwritten_fast_locals);
|
||||
PyObject_GC_Del(f);
|
||||
Py_TRASHCAN_END;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -550,7 +550,6 @@ list_dealloc(PyObject *self)
|
|||
PyListObject *op = (PyListObject *)self;
|
||||
Py_ssize_t i;
|
||||
PyObject_GC_UnTrack(op);
|
||||
Py_TRASHCAN_BEGIN(op, list_dealloc)
|
||||
if (op->ob_item != NULL) {
|
||||
/* Do it backwards, for Christian Tismer.
|
||||
There's a simple test case where somehow this reduces
|
||||
|
@ -569,7 +568,6 @@ list_dealloc(PyObject *self)
|
|||
else {
|
||||
PyObject_GC_Del(op);
|
||||
}
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
|
@ -166,10 +166,7 @@ static void
|
|||
meth_dealloc(PyObject *self)
|
||||
{
|
||||
PyCFunctionObject *m = _PyCFunctionObject_CAST(self);
|
||||
// The Py_TRASHCAN mechanism requires that we be able to
|
||||
// call PyObject_GC_UnTrack twice on an object.
|
||||
PyObject_GC_UnTrack(m);
|
||||
Py_TRASHCAN_BEGIN(m, meth_dealloc);
|
||||
if (m->m_weakreflist != NULL) {
|
||||
PyObject_ClearWeakRefs((PyObject*) m);
|
||||
}
|
||||
|
@ -190,7 +187,6 @@ meth_dealloc(PyObject *self)
|
|||
assert(Py_IS_TYPE(self, &PyCFunction_Type));
|
||||
_Py_FREELIST_FREE(pycfunctionobject, m, PyObject_GC_Del);
|
||||
}
|
||||
Py_TRASHCAN_END;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
|
@ -2913,13 +2913,15 @@ finally:
|
|||
void
|
||||
_PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT(op, _PyObject_IS_GC(op));
|
||||
_PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op));
|
||||
_PyObject_ASSERT(op, Py_REFCNT(op) == 0);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
op->ob_tid = (uintptr_t)tstate->delete_later;
|
||||
#else
|
||||
_PyGCHead_SET_PREV(_Py_AS_GC(op), (PyGC_Head*)tstate->delete_later);
|
||||
/* Store the delete_later pointer in the refcnt field.
|
||||
* As this object may still be tracked by the GC,
|
||||
* it is important that we never store 0 (NULL). */
|
||||
uintptr_t refcnt = (uintptr_t)tstate->delete_later;
|
||||
*((uintptr_t*)op) = refcnt+1;
|
||||
#endif
|
||||
tstate->delete_later = op;
|
||||
}
|
||||
|
@ -2938,7 +2940,11 @@ _PyTrash_thread_destroy_chain(PyThreadState *tstate)
|
|||
op->ob_tid = 0;
|
||||
_Py_atomic_store_ssize_relaxed(&op->ob_ref_shared, _Py_REF_MERGED);
|
||||
#else
|
||||
tstate->delete_later = (PyObject*) _PyGCHead_PREV(_Py_AS_GC(op));
|
||||
/* Get the delete_later pointer from the refcnt field.
|
||||
* See _PyTrash_thread_deposit_object(). */
|
||||
uintptr_t refcnt = *((uintptr_t*)op);
|
||||
tstate->delete_later = (PyObject *)(refcnt - 1);
|
||||
op->ob_refcnt = 0;
|
||||
#endif
|
||||
|
||||
/* Call the deallocator directly. This used to try to
|
||||
|
@ -3003,13 +3009,25 @@ _PyObject_AssertFailed(PyObject *obj, const char *expr, const char *msg,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
When deallocating a container object, it's possible to trigger an unbounded
|
||||
chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
|
||||
next" object in the chain to 0. This can easily lead to stack overflows.
|
||||
To avoid that, if the C stack is nearing its limit, instead of calling
|
||||
dealloc on the object, it is added to a queue to be freed later when the
|
||||
stack is shallower */
|
||||
void
|
||||
_Py_Dealloc(PyObject *op)
|
||||
{
|
||||
PyTypeObject *type = Py_TYPE(op);
|
||||
destructor dealloc = type->tp_dealloc;
|
||||
#ifdef Py_DEBUG
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
intptr_t margin = _Py_RecursionLimit_GetMargin(tstate);
|
||||
if (margin < 2) {
|
||||
_PyTrash_thread_deposit_object(tstate, (PyObject *)op);
|
||||
return;
|
||||
}
|
||||
#ifdef Py_DEBUG
|
||||
#if !defined(Py_GIL_DISABLED) && !defined(Py_STACKREF_DEBUG)
|
||||
/* This assertion doesn't hold for the free-threading build, as
|
||||
* PyStackRef_CLOSE_SPECIALIZED is not implemented */
|
||||
|
@ -3051,6 +3069,9 @@ _Py_Dealloc(PyObject *op)
|
|||
Py_XDECREF(old_exc);
|
||||
Py_DECREF(type);
|
||||
#endif
|
||||
if (tstate->delete_later && margin >= 4) {
|
||||
_PyTrash_thread_destroy_chain(tstate);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1389,7 +1389,6 @@ odict_dealloc(PyObject *op)
|
|||
{
|
||||
PyODictObject *self = _PyODictObject_CAST(op);
|
||||
PyObject_GC_UnTrack(self);
|
||||
Py_TRASHCAN_BEGIN(self, odict_dealloc)
|
||||
|
||||
Py_XDECREF(self->od_inst_dict);
|
||||
if (self->od_weakreflist != NULL)
|
||||
|
@ -1397,8 +1396,6 @@ odict_dealloc(PyObject *op)
|
|||
|
||||
_odict_clear_nodes(self);
|
||||
PyDict_Type.tp_dealloc((PyObject *)self);
|
||||
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
/* tp_repr */
|
||||
|
|
|
@ -536,7 +536,6 @@ set_dealloc(PyObject *self)
|
|||
|
||||
/* bpo-31095: UnTrack is needed before calling any callbacks */
|
||||
PyObject_GC_UnTrack(so);
|
||||
Py_TRASHCAN_BEGIN(so, set_dealloc)
|
||||
if (so->weakreflist != NULL)
|
||||
PyObject_ClearWeakRefs((PyObject *) so);
|
||||
|
||||
|
@ -549,7 +548,6 @@ set_dealloc(PyObject *self)
|
|||
if (so->table != so->smalltable)
|
||||
PyMem_Free(so->table);
|
||||
Py_TYPE(so)->tp_free(so);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
|
@ -207,7 +207,6 @@ tuple_dealloc(PyObject *self)
|
|||
}
|
||||
|
||||
PyObject_GC_UnTrack(op);
|
||||
Py_TRASHCAN_BEGIN(op, tuple_dealloc)
|
||||
|
||||
Py_ssize_t i = Py_SIZE(op);
|
||||
while (--i >= 0) {
|
||||
|
@ -217,8 +216,6 @@ tuple_dealloc(PyObject *self)
|
|||
if (!maybe_freelist_push(op)) {
|
||||
Py_TYPE(op)->tp_free((PyObject *)op);
|
||||
}
|
||||
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
|
|
|
@ -2575,7 +2575,6 @@ subtype_dealloc(PyObject *self)
|
|||
/* UnTrack and re-Track around the trashcan macro, alas */
|
||||
/* See explanation at end of function for full disclosure */
|
||||
PyObject_GC_UnTrack(self);
|
||||
Py_TRASHCAN_BEGIN(self, subtype_dealloc);
|
||||
|
||||
/* Find the nearest base with a different tp_dealloc */
|
||||
base = type;
|
||||
|
@ -2590,7 +2589,7 @@ subtype_dealloc(PyObject *self)
|
|||
_PyObject_GC_TRACK(self);
|
||||
if (PyObject_CallFinalizerFromDealloc(self) < 0) {
|
||||
/* Resurrected */
|
||||
goto endlabel;
|
||||
return;
|
||||
}
|
||||
_PyObject_GC_UNTRACK(self);
|
||||
}
|
||||
|
@ -2612,7 +2611,7 @@ subtype_dealloc(PyObject *self)
|
|||
type->tp_del(self);
|
||||
if (Py_REFCNT(self) > 0) {
|
||||
/* Resurrected */
|
||||
goto endlabel;
|
||||
return;
|
||||
}
|
||||
_PyObject_GC_UNTRACK(self);
|
||||
}
|
||||
|
@ -2675,46 +2674,6 @@ subtype_dealloc(PyObject *self)
|
|||
if (type_needs_decref) {
|
||||
_Py_DECREF_TYPE(type);
|
||||
}
|
||||
|
||||
endlabel:
|
||||
Py_TRASHCAN_END
|
||||
|
||||
/* Explanation of the weirdness around the trashcan macros:
|
||||
|
||||
Q. What do the trashcan macros do?
|
||||
|
||||
A. Read the comment titled "Trashcan mechanism" in object.h.
|
||||
For one, this explains why there must be a call to GC-untrack
|
||||
before the trashcan begin macro. Without understanding the
|
||||
trashcan code, the answers to the following questions don't make
|
||||
sense.
|
||||
|
||||
Q. Why do we GC-untrack before the trashcan and then immediately
|
||||
GC-track again afterward?
|
||||
|
||||
A. In the case that the base class is GC-aware, the base class
|
||||
probably GC-untracks the object. If it does that using the
|
||||
UNTRACK macro, this will crash when the object is already
|
||||
untracked. Because we don't know what the base class does, the
|
||||
only safe thing is to make sure the object is tracked when we
|
||||
call the base class dealloc. But... The trashcan begin macro
|
||||
requires that the object is *untracked* before it is called. So
|
||||
the dance becomes:
|
||||
|
||||
GC untrack
|
||||
trashcan begin
|
||||
GC track
|
||||
|
||||
Q. Why did the last question say "immediately GC-track again"?
|
||||
It's nowhere near immediately.
|
||||
|
||||
A. Because the code *used* to re-track immediately. Bad Idea.
|
||||
self has a refcount of 0, and if gc ever gets its hands on it
|
||||
(which can happen if any weakref callback gets invoked), it
|
||||
looks like trash to gc too, and gc also tries to delete self
|
||||
then. But we're already deleting self. Double deallocation is
|
||||
a subtle disaster.
|
||||
*/
|
||||
}
|
||||
|
||||
static PyTypeObject *solid_base(PyTypeObject *type);
|
||||
|
|
|
@ -566,11 +566,9 @@ filter_dealloc(PyObject *self)
|
|||
{
|
||||
filterobject *lz = _filterobject_CAST(self);
|
||||
PyObject_GC_UnTrack(lz);
|
||||
Py_TRASHCAN_BEGIN(lz, filter_dealloc)
|
||||
Py_XDECREF(lz->func);
|
||||
Py_XDECREF(lz->it);
|
||||
Py_TYPE(lz)->tp_free(lz);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -482,12 +482,6 @@ _Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
|
|||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
uintptr_t here_addr = _Py_get_machine_stack_pointer();
|
||||
assert(_tstate->c_stack_soft_limit != 0);
|
||||
if (_tstate->c_stack_hard_limit == 0) {
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
if (here_addr >= _tstate->c_stack_soft_limit) {
|
||||
return 0;
|
||||
}
|
||||
assert(_tstate->c_stack_hard_limit != 0);
|
||||
if (here_addr < _tstate->c_stack_hard_limit) {
|
||||
/* Overflowing while handling an overflow. Give up. */
|
||||
|
|
|
@ -2207,9 +2207,8 @@ void
|
|||
PyObject_GC_UnTrack(void *op_raw)
|
||||
{
|
||||
PyObject *op = _PyObject_CAST(op_raw);
|
||||
/* Obscure: the Py_TRASHCAN mechanism requires that we be able to
|
||||
* call PyObject_GC_UnTrack twice on an object.
|
||||
*/
|
||||
/* The code for some objects, such as tuples, is a bit
|
||||
* sloppy about when the object is tracked and untracked. */
|
||||
if (_PyObject_GC_IS_TRACKED(op)) {
|
||||
_PyObject_GC_UNTRACK(op);
|
||||
}
|
||||
|
|
|
@ -2511,9 +2511,8 @@ void
|
|||
PyObject_GC_UnTrack(void *op_raw)
|
||||
{
|
||||
PyObject *op = _PyObject_CAST(op_raw);
|
||||
/* Obscure: the Py_TRASHCAN mechanism requires that we be able to
|
||||
* call PyObject_GC_UnTrack twice on an object.
|
||||
*/
|
||||
/* The code for some objects, such as tuples, is a bit
|
||||
* sloppy about when the object is tracked and untracked. */
|
||||
if (_PyObject_GC_IS_TRACKED(op)) {
|
||||
_PyObject_GC_UNTRACK(op);
|
||||
}
|
||||
|
|
|
@ -1118,7 +1118,6 @@ hamt_node_bitmap_dealloc(PyObject *self)
|
|||
}
|
||||
|
||||
PyObject_GC_UnTrack(self);
|
||||
Py_TRASHCAN_BEGIN(self, hamt_node_bitmap_dealloc)
|
||||
|
||||
if (len > 0) {
|
||||
i = len;
|
||||
|
@ -1128,7 +1127,6 @@ hamt_node_bitmap_dealloc(PyObject *self)
|
|||
}
|
||||
|
||||
Py_TYPE(self)->tp_free(self);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
|
@ -1508,7 +1506,6 @@ hamt_node_collision_dealloc(PyObject *self)
|
|||
/* Collision's tp_dealloc */
|
||||
Py_ssize_t len = Py_SIZE(self);
|
||||
PyObject_GC_UnTrack(self);
|
||||
Py_TRASHCAN_BEGIN(self, hamt_node_collision_dealloc)
|
||||
if (len > 0) {
|
||||
PyHamtNode_Collision *node = _PyHamtNode_Collision_CAST(self);
|
||||
while (--len >= 0) {
|
||||
|
@ -1516,7 +1513,6 @@ hamt_node_collision_dealloc(PyObject *self)
|
|||
}
|
||||
}
|
||||
Py_TYPE(self)->tp_free(self);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
|
@ -1878,13 +1874,11 @@ hamt_node_array_dealloc(PyObject *self)
|
|||
{
|
||||
/* Array's tp_dealloc */
|
||||
PyObject_GC_UnTrack(self);
|
||||
Py_TRASHCAN_BEGIN(self, hamt_node_array_dealloc)
|
||||
PyHamtNode_Array *obj = _PyHamtNode_Array_CAST(self);
|
||||
for (Py_ssize_t i = 0; i < HAMT_ARRAY_NODE_SIZE; i++) {
|
||||
Py_XDECREF(obj->a_array[i]);
|
||||
}
|
||||
Py_TYPE(self)->tp_free(self);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
|
|
|
@ -419,10 +419,8 @@ inst_seq_dealloc(PyObject *op)
|
|||
{
|
||||
_PyInstructionSequence *seq = (_PyInstructionSequence *)op;
|
||||
PyObject_GC_UnTrack(seq);
|
||||
Py_TRASHCAN_BEGIN(seq, inst_seq_dealloc)
|
||||
PyInstructionSequence_Fini(seq);
|
||||
PyObject_GC_Del(seq);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -854,6 +854,10 @@ error:
|
|||
static PyStatus
|
||||
pycore_interp_init(PyThreadState *tstate)
|
||||
{
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
if (_tstate->c_stack_hard_limit == 0) {
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
PyInterpreterState *interp = tstate->interp;
|
||||
PyStatus status;
|
||||
PyObject *sysmod = NULL;
|
||||
|
|
|
@ -2168,7 +2168,10 @@ _PyThreadState_Attach(PyThreadState *tstate)
|
|||
if (current_fast_get() != NULL) {
|
||||
Py_FatalError("non-NULL old thread state");
|
||||
}
|
||||
|
||||
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
|
||||
if (_tstate->c_stack_hard_limit == 0) {
|
||||
_Py_InitializeRecursionLimits(tstate);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
_PyEval_AcquireLock(tstate);
|
||||
|
|
|
@ -236,11 +236,9 @@ tb_dealloc(PyObject *op)
|
|||
{
|
||||
PyTracebackObject *tb = _PyTracebackObject_CAST(op);
|
||||
PyObject_GC_UnTrack(tb);
|
||||
Py_TRASHCAN_BEGIN(tb, tb_dealloc)
|
||||
Py_XDECREF(tb->tb_next);
|
||||
Py_XDECREF(tb->tb_frame);
|
||||
PyObject_GC_Del(tb);
|
||||
Py_TRASHCAN_END
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue