mirror of
https://github.com/python/cpython.git
synced 2025-07-19 09:15:34 +00:00
gh-112529: Implement GC for free-threaded builds (#114262)
* gh-112529: Implement GC for free-threaded builds This implements a mark and sweep GC for the free-threaded builds of CPython. The implementation relies on mimalloc to find GC tracked objects (i.e., "containers").
This commit is contained in:
parent
4850410b60
commit
b52fc70d1a
18 changed files with 1952 additions and 22 deletions
|
@ -15,6 +15,8 @@
|
|||
#include "pycore_weakref.h" // _PyWeakref_ClearRef()
|
||||
#include "pydtrace.h"
|
||||
|
||||
#ifndef Py_GIL_DISABLED
|
||||
|
||||
typedef struct _gc_runtime_state GCState;
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
|
@ -964,10 +966,10 @@ finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable)
|
|||
PyGC_Head *gc = GC_NEXT(collectable);
|
||||
PyObject *op = FROM_GC(gc);
|
||||
gc_list_move(gc, &seen);
|
||||
if (!_PyGCHead_FINALIZED(gc) &&
|
||||
if (!_PyGC_FINALIZED(op) &&
|
||||
(finalize = Py_TYPE(op)->tp_finalize) != NULL)
|
||||
{
|
||||
_PyGCHead_SET_FINALIZED(gc);
|
||||
_PyGC_SET_FINALIZED(op);
|
||||
Py_INCREF(op);
|
||||
finalize(op);
|
||||
assert(!_PyErr_Occurred(tstate));
|
||||
|
@ -1942,3 +1944,5 @@ PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void *arg)
|
|||
done:
|
||||
gcstate->enabled = origenstate;
|
||||
}
|
||||
|
||||
#endif // Py_GIL_DISABLED
|
||||
|
|
File diff suppressed because it is too large
Load diff
87
Python/object_stack.c
Normal file
87
Python/object_stack.c
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Stack of Python objects
|
||||
|
||||
#include "Python.h"
|
||||
#include "pycore_freelist.h"
|
||||
#include "pycore_pystate.h"
|
||||
#include "pycore_object_stack.h"
|
||||
|
||||
extern _PyObjectStackChunk *_PyObjectStackChunk_New(void);
|
||||
extern void _PyObjectStackChunk_Free(_PyObjectStackChunk *);
|
||||
|
||||
static struct _Py_object_stack_state *
|
||||
get_state(void)
|
||||
{
|
||||
_PyFreeListState *state = _PyFreeListState_GET();
|
||||
return &state->object_stack_state;
|
||||
}
|
||||
|
||||
_PyObjectStackChunk *
|
||||
_PyObjectStackChunk_New(void)
|
||||
{
|
||||
_PyObjectStackChunk *buf;
|
||||
struct _Py_object_stack_state *state = get_state();
|
||||
if (state->numfree > 0) {
|
||||
buf = state->free_list;
|
||||
state->free_list = buf->prev;
|
||||
state->numfree--;
|
||||
}
|
||||
else {
|
||||
// NOTE: we use PyMem_RawMalloc() here because this is used by the GC
|
||||
// during mimalloc heap traversal. In that context, it is not safe to
|
||||
// allocate mimalloc memory, such as via PyMem_Malloc().
|
||||
buf = PyMem_RawMalloc(sizeof(_PyObjectStackChunk));
|
||||
if (buf == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
buf->prev = NULL;
|
||||
buf->n = 0;
|
||||
return buf;
|
||||
}
|
||||
|
||||
void
|
||||
_PyObjectStackChunk_Free(_PyObjectStackChunk *buf)
|
||||
{
|
||||
assert(buf->n == 0);
|
||||
struct _Py_object_stack_state *state = get_state();
|
||||
if (state->numfree >= 0 &&
|
||||
state->numfree < _PyObjectStackChunk_MAXFREELIST)
|
||||
{
|
||||
buf->prev = state->free_list;
|
||||
state->free_list = buf;
|
||||
state->numfree++;
|
||||
}
|
||||
else {
|
||||
PyMem_RawFree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
_PyObjectStack_Clear(_PyObjectStack *queue)
|
||||
{
|
||||
while (queue->head != NULL) {
|
||||
_PyObjectStackChunk *buf = queue->head;
|
||||
buf->n = 0;
|
||||
queue->head = buf->prev;
|
||||
_PyObjectStackChunk_Free(buf);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
_PyObjectStackChunk_ClearFreeList(_PyFreeListState *free_lists, int is_finalization)
|
||||
{
|
||||
if (!is_finalization) {
|
||||
// Ignore requests to clear the free list during GC. We use object
|
||||
// stacks during GC, so emptying the free-list is counterproductive.
|
||||
return;
|
||||
}
|
||||
|
||||
struct _Py_object_stack_state *state = &free_lists->object_stack_state;
|
||||
while (state->numfree > 0) {
|
||||
_PyObjectStackChunk *buf = state->free_list;
|
||||
state->free_list = buf->prev;
|
||||
state->numfree--;
|
||||
PyMem_RawFree(buf);
|
||||
}
|
||||
state->numfree = -1;
|
||||
}
|
|
@ -10,6 +10,7 @@
|
|||
#include "pycore_frame.h"
|
||||
#include "pycore_initconfig.h" // _PyStatus_OK()
|
||||
#include "pycore_object.h" // _PyType_InitCache()
|
||||
#include "pycore_object_stack.h" // _PyObjectStackChunk_ClearFreeList()
|
||||
#include "pycore_parking_lot.h" // _PyParkingLot_AfterFork()
|
||||
#include "pycore_pyerrors.h" // _PyErr_Clear()
|
||||
#include "pycore_pylifecycle.h" // _PyAST_Fini()
|
||||
|
@ -1468,6 +1469,7 @@ _Py_ClearFreeLists(_PyFreeListState *state, int is_finalization)
|
|||
_PyList_ClearFreeList(state, is_finalization);
|
||||
_PyContext_ClearFreeList(state, is_finalization);
|
||||
_PyAsyncGen_ClearFreeLists(state, is_finalization);
|
||||
_PyObjectStackChunk_ClearFreeList(state, is_finalization);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2055,7 +2057,6 @@ start_the_world(struct _stoptheworld_state *stw)
|
|||
HEAD_LOCK(runtime);
|
||||
stw->requested = 0;
|
||||
stw->world_stopped = 0;
|
||||
stw->requester = NULL;
|
||||
// Switch threads back to the detached state.
|
||||
PyInterpreterState *i;
|
||||
PyThreadState *t;
|
||||
|
@ -2066,6 +2067,7 @@ start_the_world(struct _stoptheworld_state *stw)
|
|||
_PyParkingLot_UnparkAll(&t->state);
|
||||
}
|
||||
}
|
||||
stw->requester = NULL;
|
||||
HEAD_UNLOCK(runtime);
|
||||
if (stw->is_global) {
|
||||
_PyRWMutex_Unlock(&runtime->stoptheworld_mutex);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue