mirror of
https://github.com/python/cpython.git
synced 2025-07-12 13:55:34 +00:00
[3.13] GH-124567: Revert the Incremental GC in 3.13 (#124770)
Revert the incremental GC in 3.13, since it's not clear that without further turning, the benefits outweigh the costs. Co-authored-by: Adam Turner <9087854+AA-Turner@users.noreply.github.com>
This commit is contained in:
parent
bc1fae89af
commit
e0eb44ad49
11 changed files with 5525 additions and 5908 deletions
|
@ -744,7 +744,7 @@ void
|
|||
_PyGC_InitState(GCState *gcstate)
|
||||
{
|
||||
// TODO: move to pycore_runtime_init.h once the incremental GC lands.
|
||||
gcstate->young.threshold = 2000;
|
||||
gcstate->generations[0].threshold = 2000;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1042,8 +1042,8 @@ cleanup_worklist(struct worklist *worklist)
|
|||
static bool
|
||||
gc_should_collect(GCState *gcstate)
|
||||
{
|
||||
int count = _Py_atomic_load_int_relaxed(&gcstate->young.count);
|
||||
int threshold = gcstate->young.threshold;
|
||||
int count = _Py_atomic_load_int_relaxed(&gcstate->generations[0].count);
|
||||
int threshold = gcstate->generations[0].threshold;
|
||||
if (count <= threshold || threshold == 0 || !gcstate->enabled) {
|
||||
return false;
|
||||
}
|
||||
|
@ -1051,7 +1051,7 @@ gc_should_collect(GCState *gcstate)
|
|||
// objects. A few tests rely on immediate scheduling of the GC so we ignore
|
||||
// the scaled threshold if generations[1].threshold is set to zero.
|
||||
return (count > gcstate->long_lived_total / 4 ||
|
||||
gcstate->old[0].threshold == 0);
|
||||
gcstate->generations[1].threshold == 0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1065,7 +1065,7 @@ record_allocation(PyThreadState *tstate)
|
|||
if (gc->alloc_count >= LOCAL_ALLOC_COUNT_THRESHOLD) {
|
||||
// TODO: Use Py_ssize_t for the generation count.
|
||||
GCState *gcstate = &tstate->interp->gc;
|
||||
_Py_atomic_add_int(&gcstate->young.count, (int)gc->alloc_count);
|
||||
_Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
|
||||
gc->alloc_count = 0;
|
||||
|
||||
if (gc_should_collect(gcstate) &&
|
||||
|
@ -1084,7 +1084,7 @@ record_deallocation(PyThreadState *tstate)
|
|||
gc->alloc_count--;
|
||||
if (gc->alloc_count <= -LOCAL_ALLOC_COUNT_THRESHOLD) {
|
||||
GCState *gcstate = &tstate->interp->gc;
|
||||
_Py_atomic_add_int(&gcstate->young.count, (int)gc->alloc_count);
|
||||
_Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
|
||||
gc->alloc_count = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1096,12 +1096,10 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
|
|||
|
||||
// update collection and allocation counters
|
||||
if (generation+1 < NUM_GENERATIONS) {
|
||||
state->gcstate->old[generation].count += 1;
|
||||
state->gcstate->generations[generation+1].count += 1;
|
||||
}
|
||||
|
||||
state->gcstate->young.count = 0;
|
||||
for (int i = 1; i <= generation; ++i) {
|
||||
state->gcstate->old[i-1].count = 0;
|
||||
for (int i = 0; i <= generation; i++) {
|
||||
state->gcstate->generations[i].count = 0;
|
||||
}
|
||||
|
||||
// merge refcounts for all queued objects
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue