mirror of
https://github.com/python/cpython.git
synced 2025-12-04 00:30:19 +00:00
gh-115103: Delay reuse of mimalloc pages that store PyObjects (#115435)
This implements the delayed reuse of mimalloc pages that contain Python objects in the free-threaded build. Allocations of the same size class are grouped in data structures called pages. These are different from operating system pages. For thread-safety, we want to ensure that memory used to store PyObjects remains valid as long as there may be concurrent lock-free readers; we want to delay using it for other size classes, in other heaps, or returning it to the operating system. When a mimalloc page becomes empty, instead of immediately freeing it, we tag it with a QSBR goal and insert it into a per-thread state linked list of pages to be freed. When mimalloc needs a fresh page, we process the queue and free any still empty pages that are now deemed safe to be freed. Pages waiting to be freed are still available for allocations of the same size class and allocating from a page prevent it from being freed. There is additional logic to handle abandoned pages when threads exit.
This commit is contained in:
parent
02ee475ee3
commit
c012c8ab7b
9 changed files with 199 additions and 17 deletions
|
|
@ -225,6 +225,9 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
|
|||
|
||||
// and the local free list
|
||||
if (page->local_free != NULL) {
|
||||
// any previous QSBR goals are no longer valid because we reused the page
|
||||
_PyMem_mi_page_clear_qsbr(page);
|
||||
|
||||
if mi_likely(page->free == NULL) {
|
||||
// usual case
|
||||
page->free = page->local_free;
|
||||
|
|
@ -267,6 +270,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
|||
// TODO: push on full queue immediately if it is full?
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
|
||||
mi_page_queue_push(heap, pq, page);
|
||||
_PyMem_mi_page_reclaimed(page);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
}
|
||||
|
||||
|
|
@ -383,6 +387,13 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
|||
|
||||
mi_heap_t* pheap = mi_page_heap(page);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
if (page->qsbr_node.next != NULL) {
|
||||
// remove from QSBR queue, but keep the goal
|
||||
llist_remove(&page->qsbr_node);
|
||||
}
|
||||
#endif
|
||||
|
||||
// remove from our page list
|
||||
mi_segments_tld_t* segments_tld = &pheap->tld->segments;
|
||||
mi_page_queue_remove(pq, page);
|
||||
|
|
@ -417,6 +428,11 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
|||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
mi_assert_internal(page->qsbr_goal == 0);
|
||||
mi_assert_internal(page->qsbr_node.next == NULL);
|
||||
#endif
|
||||
|
||||
// remove from the page list
|
||||
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
|
||||
mi_segments_tld_t* segments_tld = &heap->tld->segments;
|
||||
|
|
@ -444,6 +460,9 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
|||
|
||||
mi_page_set_has_aligned(page, false);
|
||||
|
||||
// any previous QSBR goals are no longer valid because we reused the page
|
||||
_PyMem_mi_page_clear_qsbr(page);
|
||||
|
||||
// don't retire too often..
|
||||
// (or we end up retiring and re-allocating most of the time)
|
||||
// NOTE: refine this more: we should not retire if this
|
||||
|
|
@ -465,7 +484,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
|||
return; // dont't free after all
|
||||
}
|
||||
}
|
||||
_mi_page_free(page, pq, false);
|
||||
_PyMem_mi_page_maybe_free(page, pq, false);
|
||||
}
|
||||
|
||||
// free retired pages: we don't need to look at the entire queues
|
||||
|
|
@ -480,7 +499,10 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
|
|||
if (mi_page_all_free(page)) {
|
||||
page->retire_expire--;
|
||||
if (force || page->retire_expire == 0) {
|
||||
_mi_page_free(pq->first, pq, force);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
mi_assert_internal(page->qsbr_goal == 0);
|
||||
#endif
|
||||
_PyMem_mi_page_maybe_free(page, pq, force);
|
||||
}
|
||||
else {
|
||||
// keep retired, update min/max
|
||||
|
|
@ -661,6 +683,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
// set fields
|
||||
mi_page_set_heap(page, heap);
|
||||
page->tag = heap->tag;
|
||||
page->use_qsbr = heap->page_use_qsbr;
|
||||
page->debug_offset = heap->debug_offset;
|
||||
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
|
||||
size_t page_size;
|
||||
|
|
@ -691,6 +714,10 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
mi_assert_internal(page->xthread_free == 0);
|
||||
mi_assert_internal(page->next == NULL);
|
||||
mi_assert_internal(page->prev == NULL);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
mi_assert_internal(page->qsbr_goal == 0);
|
||||
mi_assert_internal(page->qsbr_node.next == NULL);
|
||||
#endif
|
||||
mi_assert_internal(page->retire_expire == 0);
|
||||
mi_assert_internal(!mi_page_has_aligned(page));
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
|
|
@ -750,6 +777,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
mi_heap_stat_counter_increase(heap, searches, count);
|
||||
|
||||
if (page == NULL) {
|
||||
_PyMem_mi_heap_collect_qsbr(heap); // some pages might be safe to free now
|
||||
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
|
||||
page = mi_page_fresh(heap, pq);
|
||||
if (page == NULL && first_try) {
|
||||
|
|
@ -760,6 +788,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
else {
|
||||
mi_assert(pq->first == page);
|
||||
page->retire_expire = 0;
|
||||
_PyMem_mi_page_clear_qsbr(page);
|
||||
}
|
||||
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
|
||||
return page;
|
||||
|
|
@ -785,6 +814,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
|
|||
|
||||
if (mi_page_immediate_available(page)) {
|
||||
page->retire_expire = 0;
|
||||
_PyMem_mi_page_clear_qsbr(page);
|
||||
return page; // fast path
|
||||
}
|
||||
}
|
||||
|
|
@ -878,6 +908,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
|
|||
return NULL;
|
||||
}
|
||||
else {
|
||||
_PyMem_mi_heap_collect_qsbr(heap);
|
||||
return mi_large_huge_page_alloc(heap,size,huge_alignment);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue