From 67600e0f020edf9ef9b86ad1cfcc1079d892d7ce Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Tue, 21 Nov 2023 17:55:51 -0500 Subject: [PATCH] free even more state for exited threads --- src/gc-stacks.c | 5 +++++ src/gc.c | 41 +++++++++++++++++++++++++++++++---------- src/threading.c | 5 ++++- 3 files changed, 40 insertions(+), 11 deletions(-) diff --git a/src/gc-stacks.c b/src/gc-stacks.c index 7af1017cb55d9..eb627ec39d409 100644 --- a/src/gc-stacks.c +++ b/src/gc-stacks.c @@ -203,6 +203,8 @@ void sweep_stack_pools(void) assert(gc_n_threads); for (int i = 0; i < gc_n_threads; i++) { jl_ptls_t ptls2 = gc_all_tls_states[i]; + if (ptls2 == NULL) + continue; // free half of stacks that remain unused since last sweep for (int p = 0; p < JL_N_STACK_POOLS; p++) { @@ -227,6 +229,9 @@ void sweep_stack_pools(void) small_arraylist_free(al); } } + if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { + small_arraylist_free(ptls2->heap.free_stacks); + } small_arraylist_t *live_tasks = &ptls2->heap.live_tasks; size_t n = 0; diff --git a/src/gc.c b/src/gc.c index 69a65af2da71c..4ee44a9664783 100644 --- a/src/gc.c +++ b/src/gc.c @@ -1655,6 +1655,18 @@ void gc_free_pages(void) } } +void gc_move_to_global_page_pool(jl_gc_page_stack_t *pgstack) +{ + while (1) { + jl_gc_pagemeta_t *pg = pop_lf_back(pgstack); + if (pg == NULL) { + break; + } + jl_gc_free_page(pg); + push_lf_back(&global_page_pool_freed, pg); + } +} + // setup the data-structures for a sweep over all memory pools static void gc_sweep_pool(void) { @@ -1667,7 +1679,7 @@ static void gc_sweep_pool(void) // allocate enough space to hold the end of the free list chain // for every thread and pool size - jl_taggedvalue_t ***pfl = (jl_taggedvalue_t ***) alloca(n_threads * JL_GC_N_POOLS * sizeof(jl_taggedvalue_t**)); + jl_taggedvalue_t ***pfl = (jl_taggedvalue_t ***) malloc(n_threads * JL_GC_N_POOLS * sizeof(jl_taggedvalue_t**)); // update metadata of pages that were pointed to by freelist or newpages from a pool // i.e. pages being the current allocation target @@ -1709,7 +1721,7 @@ static void gc_sweep_pool(void) } // the actual sweeping - jl_gc_page_stack_t *tmp = (jl_gc_page_stack_t *)alloca(n_threads * sizeof(jl_gc_page_stack_t)); + jl_gc_page_stack_t *tmp = (jl_gc_page_stack_t *)malloc(n_threads * sizeof(jl_gc_page_stack_t)); memset(tmp, 0, n_threads * sizeof(jl_gc_page_stack_t)); jl_atomic_store(&gc_allocd_scratch, tmp); gc_sweep_wake_all(); @@ -1726,6 +1738,7 @@ static void gc_sweep_pool(void) } } } + free(tmp); // merge free lists for (int t_i = 0; t_i < n_threads; t_i++) { @@ -1756,6 +1769,7 @@ static void gc_sweep_pool(void) } } } + free(pfl); #ifdef _P64 // only enable concurrent sweeping on 64bit // wake thread up to sweep concurrently @@ -2980,11 +2994,13 @@ void gc_mark_clean_reclaim_sets(void) // Clean up `reclaim-sets` for (int i = 0; i < gc_n_threads; i++) { jl_ptls_t ptls2 = gc_all_tls_states[i]; - arraylist_t *reclaim_set2 = &ptls2->mark_queue.reclaim_set; - ws_array_t *a = NULL; - while ((a = (ws_array_t *)arraylist_pop(reclaim_set2)) != NULL) { - free(a->buffer); - free(a); + if (ptls2 != NULL) { + arraylist_t *reclaim_set2 = &ptls2->mark_queue.reclaim_set; + ws_array_t *a = NULL; + while ((a = (ws_array_t *)arraylist_pop(reclaim_set2)) != NULL) { + free(a->buffer); + free(a); + } } } } @@ -3500,7 +3516,7 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) ptls2->heap.remset->len = 0; } // free empty GC state for threads that have exited - if (ptls2->current_task == NULL) { + if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { jl_thread_heap_t *heap = &ptls2->heap; if (heap->weak_refs.len == 0) small_arraylist_free(&heap->weak_refs); @@ -3511,9 +3527,14 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) if (heap->last_remset->len == 0) arraylist_free(heap->last_remset); if (ptls2->finalizers.len == 0) - arraylist_free(&ptls->finalizers); + arraylist_free(&ptls2->finalizers); if (ptls2->sweep_objs.len == 0) - arraylist_free(&ptls->sweep_objs); + arraylist_free(&ptls2->sweep_objs); + gc_move_to_global_page_pool(&ptls2->page_metadata_buffered); + if (ptls2->page_metadata_allocd.bottom == NULL) { + free(ptls2); + gc_all_tls_states[t_i] = NULL; + } } } diff --git a/src/threading.c b/src/threading.c index 86575264f5080..71eb261bcfc70 100644 --- a/src/threading.c +++ b/src/threading.c @@ -435,6 +435,8 @@ JL_DLLEXPORT jl_gcframe_t **jl_adopt_thread(void) void jl_task_frame_noreturn(jl_task_t *ct) JL_NOTSAFEPOINT; +void jl_free_thread_gc_state(jl_ptls_t ptls); + static void jl_delete_thread(void *value) JL_NOTSAFEPOINT_ENTER { #ifndef _OS_WINDOWS_ @@ -511,9 +513,10 @@ static void jl_delete_thread(void *value) JL_NOTSAFEPOINT_ENTER pthread_mutex_unlock(&in_signal_lock); #endif free(ptls->bt_data); + small_arraylist_free(&ptls->locks); + ptls->previous_exception = NULL; // allow the page root_task is on to be freed ptls->root_task = NULL; - void jl_free_thread_gc_state(jl_ptls_t ptls); jl_free_thread_gc_state(ptls); // then park in safe-region (void)jl_gc_safe_enter(ptls);