Revert GC changes to "work_to_do" logic.

This reverts parts of the GH-140262 change.  The changes that affect the
tuple untracking are left unchanged.  Revert the changes to the
calculation of the increment size, based on the "work_to_do" variable.
This causes cyclic garbage to be collected more quickly.  Revert also
the change to test_gc.py, which was done because the expected GC
collection was taking longer to happen.

With the tuple untrack change, the performance regression as reported by
bug GH-139951 is still resolved (work_to_do changes are not required).
This commit is contained in:
Neil Schemenauer 2025-11-26 20:40:48 -08:00
parent bc9e63dd9d
commit fef3eed829
2 changed files with 10 additions and 13 deletions

View file

@ -1493,11 +1493,10 @@ class GCTogglingTests(unittest.TestCase):
# The free-threaded build doesn't have multiple generations, so
# just trigger a GC manually.
gc.collect()
assert not detector.gc_happened
while not detector.gc_happened:
i += 1
if i > 100000:
self.fail("gc didn't happen after 100000 iterations")
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
@ -1569,8 +1568,8 @@ class GCTogglingTests(unittest.TestCase):
gc.collect()
while not detector.gc_happened:
i += 1
if i > 50000:
self.fail("gc didn't happen after 50000 iterations")
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
@ -1587,8 +1586,8 @@ class GCTogglingTests(unittest.TestCase):
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 100000:
self.fail("gc didn't happen after 100000 iterations")
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
junk.append([]) # this will eventually trigger gc
try:
@ -1598,11 +1597,11 @@ class GCTogglingTests(unittest.TestCase):
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 100000:
if i > 10000:
break
junk.append([]) # this may eventually trigger gc (if it is enabled)
self.assertEqual(i, 100001)
self.assertEqual(i, 10001)
finally:
gc.enable()

View file

@ -1644,7 +1644,7 @@ assess_work_to_do(GCState *gcstate)
scale_factor = 2;
}
intptr_t new_objects = gcstate->young.count;
intptr_t max_heap_fraction = new_objects*2;
intptr_t max_heap_fraction = new_objects*3/2;
intptr_t heap_fraction = gcstate->heap_size / SCAN_RATE_DIVISOR / scale_factor;
if (heap_fraction > max_heap_fraction) {
heap_fraction = max_heap_fraction;
@ -1659,9 +1659,6 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
GC_STAT_ADD(1, collections, 1);
GCState *gcstate = &tstate->interp->gc;
gcstate->work_to_do += assess_work_to_do(gcstate);
if (gcstate->work_to_do < 0) {
return;
}
untrack_tuples(&gcstate->young.head);
if (gcstate->phase == GC_PHASE_MARK) {
Py_ssize_t objects_marked = mark_at_start(tstate);
@ -1705,6 +1702,7 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
gc_collect_region(tstate, &increment, &survivors, stats);
gc_list_merge(&survivors, visited);
assert(gc_list_is_empty(&increment));
gcstate->work_to_do += gcstate->heap_size / SCAN_RATE_DIVISOR / scale_factor;
gcstate->work_to_do -= increment_size;
if (gc_list_is_empty(not_visited)) {