bpo-30860: Consolidate stateful runtime globals. (#2594)

* group the (stateful) runtime globals into various topical structs
* consolidate the topical structs under a single top-level _PyRuntimeState struct
* add a check-c-globals.py script that helps identify runtime globals

Other globals are excluded (see globals.txt and check-c-globals.py).
This commit is contained in:
Eric Snow 2017-09-05 18:26:16 -07:00 committed by GitHub
parent 501b324d3a
commit 76d5abc868
40 changed files with 2727 additions and 1327 deletions

View file

@ -2028,14 +2028,6 @@ finally:
/* Trashcan support. */
/* Current call-stack depth of tp_dealloc calls. */
int _PyTrash_delete_nesting = 0;
/* List of objects that still need to be cleaned up, singly linked via their
* gc headers' gc_prev pointers.
*/
PyObject *_PyTrash_delete_later = NULL;
/* Add op to the _PyTrash_delete_later list. Called when the current
* call-stack depth gets large. op must be a currently untracked gc'ed
* object, with refcount 0. Py_DECREF must already have been called on it.
@ -2046,8 +2038,8 @@ _PyTrash_deposit_object(PyObject *op)
assert(PyObject_IS_GC(op));
assert(_PyGC_REFS(op) == _PyGC_REFS_UNTRACKED);
assert(op->ob_refcnt == 0);
_Py_AS_GC(op)->gc.gc_prev = (PyGC_Head *)_PyTrash_delete_later;
_PyTrash_delete_later = op;
_Py_AS_GC(op)->gc.gc_prev = (PyGC_Head *)_PyRuntime.gc.trash_delete_later;
_PyRuntime.gc.trash_delete_later = op;
}
/* The equivalent API, using per-thread state recursion info */
@ -2068,11 +2060,11 @@ _PyTrash_thread_deposit_object(PyObject *op)
void
_PyTrash_destroy_chain(void)
{
while (_PyTrash_delete_later) {
PyObject *op = _PyTrash_delete_later;
while (_PyRuntime.gc.trash_delete_later) {
PyObject *op = _PyRuntime.gc.trash_delete_later;
destructor dealloc = Py_TYPE(op)->tp_dealloc;
_PyTrash_delete_later =
_PyRuntime.gc.trash_delete_later =
(PyObject*) _Py_AS_GC(op)->gc.gc_prev;
/* Call the deallocator directly. This used to try to
@ -2082,9 +2074,9 @@ _PyTrash_destroy_chain(void)
* up distorting allocation statistics.
*/
assert(op->ob_refcnt == 0);
++_PyTrash_delete_nesting;
++_PyRuntime.gc.trash_delete_nesting;
(*dealloc)(op);
--_PyTrash_delete_nesting;
--_PyRuntime.gc.trash_delete_nesting;
}
}

File diff suppressed because it is too large Load diff

View file

@ -1115,6 +1115,7 @@ frozenset_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
}
/* The empty frozenset is a singleton */
if (emptyfrozenset == NULL)
/* There is a possible (relatively harmless) race here. */
emptyfrozenset = make_new_set(type, NULL);
Py_XINCREF(emptyfrozenset);
return emptyfrozenset;

View file

@ -1157,10 +1157,10 @@ subtype_dealloc(PyObject *self)
/* UnTrack and re-Track around the trashcan macro, alas */
/* See explanation at end of function for full disclosure */
PyObject_GC_UnTrack(self);
++_PyTrash_delete_nesting;
++_PyRuntime.gc.trash_delete_nesting;
++ tstate->trash_delete_nesting;
Py_TRASHCAN_SAFE_BEGIN(self);
--_PyTrash_delete_nesting;
--_PyRuntime.gc.trash_delete_nesting;
-- tstate->trash_delete_nesting;
/* Find the nearest base with a different tp_dealloc */
@ -1254,10 +1254,10 @@ subtype_dealloc(PyObject *self)
Py_DECREF(type);
endlabel:
++_PyTrash_delete_nesting;
++_PyRuntime.gc.trash_delete_nesting;
++ tstate->trash_delete_nesting;
Py_TRASHCAN_SAFE_END(self);
--_PyTrash_delete_nesting;
--_PyRuntime.gc.trash_delete_nesting;
-- tstate->trash_delete_nesting;
/* Explanation of the weirdness around the trashcan macros:
@ -1297,7 +1297,7 @@ subtype_dealloc(PyObject *self)
a subtle disaster.
Q. Why the bizarre (net-zero) manipulation of
_PyTrash_delete_nesting around the trashcan macros?
_PyRuntime.trash_delete_nesting around the trashcan macros?
A. Some base classes (e.g. list) also use the trashcan mechanism.
The following scenario used to be possible: