1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 // #include "PosixSource.h"
19 #include "OSThreads.h"
20 #include "LdvProfile.h"
25 #include "BlockAlloc.h"
31 #include "ParTicky.h" // ToDo: move into Rts.h
32 #include "RtsSignals.h"
36 #if defined(RTS_GTK_FRONTPANEL)
37 #include "FrontPanel.h"
40 #include "RetainerProfile.h"
41 #include "RaiseAsync.h"
54 #include <string.h> // for memset()
57 /* -----------------------------------------------------------------------------
59 -------------------------------------------------------------------------- */
61 /* STATIC OBJECT LIST.
64 * We maintain a linked list of static objects that are still live.
65 * The requirements for this list are:
67 * - we need to scan the list while adding to it, in order to
68 * scavenge all the static objects (in the same way that
69 * breadth-first scavenging works for dynamic objects).
71 * - we need to be able to tell whether an object is already on
72 * the list, to break loops.
74 * Each static object has a "static link field", which we use for
75 * linking objects on to the list. We use a stack-type list, consing
76 * objects on the front as they are added (this means that the
77 * scavenge phase is depth-first, not breadth-first, but that
80 * A separate list is kept for objects that have been scavenged
81 * already - this is so that we can zero all the marks afterwards.
83 * An object is on the list if its static link field is non-zero; this
84 * means that we have to mark the end of the list with '1', not NULL.
86 * Extra notes for generational GC:
88 * Each generation has a static object list associated with it. When
89 * collecting generations up to N, we treat the static object lists
90 * from generations > N as roots.
92 * We build up a static object list while collecting generations 0..N,
93 * which is then appended to the static object list of generation N+1.
96 /* N is the oldest generation being collected, where the generations
97 * are numbered starting at 0. A major GC (indicated by the major_gc
98 * flag) is when we're collecting all generations. We only attempt to
99 * deal with static objects and GC CAFs when doing a major GC.
104 /* Data used for allocation area sizing.
106 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
116 /* Thread-local data for each GC thread
118 gc_thread **gc_threads = NULL;
120 #if !defined(THREADED_RTS)
121 StgWord8 the_gc_thread[sizeof(gc_thread) + 64 * sizeof(step_workspace)];
124 // Number of threads running in *this* GC. Affects how many
125 // step->todos[] lists we have to look in to find work.
129 long copied; // *words* copied & scavenged during this GC
131 rtsBool work_stealing;
135 /* -----------------------------------------------------------------------------
136 Static function declarations
137 -------------------------------------------------------------------------- */
139 static void mark_root (void *user, StgClosure **root);
140 static void zero_static_object_list (StgClosure* first_static);
141 static nat initialise_N (rtsBool force_major_gc);
142 static void init_collected_gen (nat g, nat threads);
143 static void init_uncollected_gen (nat g, nat threads);
144 static void init_gc_thread (gc_thread *t);
145 static void update_task_list (void);
146 static void resize_generations (void);
147 static void resize_nursery (void);
148 static void start_gc_threads (void);
149 static void scavenge_until_all_done (void);
150 static nat inc_running (void);
151 static nat dec_running (void);
152 static void wakeup_gc_threads (nat n_threads, nat me);
153 static void shutdown_gc_threads (nat n_threads, nat me);
155 #if 0 && defined(DEBUG)
156 static void gcCAFs (void);
159 /* -----------------------------------------------------------------------------
160 The mark bitmap & stack.
161 -------------------------------------------------------------------------- */
163 #define MARK_STACK_BLOCKS 4
165 bdescr *mark_stack_bdescr;
170 // Flag and pointers used for falling back to a linear scan when the
171 // mark stack overflows.
172 rtsBool mark_stack_overflowed;
173 bdescr *oldgen_scan_bd;
176 /* -----------------------------------------------------------------------------
177 GarbageCollect: the main entry point to the garbage collector.
179 Locks held: all capabilities are held throughout GarbageCollect().
180 -------------------------------------------------------------------------- */
183 GarbageCollect (rtsBool force_major_gc,
184 nat gc_type USED_IF_THREADS,
189 lnat live, allocated, max_copied, avg_copied, slop;
190 gc_thread *saved_gct;
193 // necessary if we stole a callee-saves register for gct:
197 CostCentreStack *prev_CCS;
202 #if defined(RTS_USER_SIGNALS)
203 if (RtsFlags.MiscFlags.install_signal_handlers) {
209 ASSERT(sizeof(step_workspace) == 16 * sizeof(StgWord));
210 // otherwise adjust the padding in step_workspace.
212 // tell the stats department that we've started a GC
215 // tell the STM to discard any cached closures it's hoping to re-use
224 // attribute any costs to CCS_GC
230 /* Approximate how much we allocated.
231 * Todo: only when generating stats?
233 allocated = calcAllocated();
235 /* Figure out which generation to collect
237 n = initialise_N(force_major_gc);
239 #if defined(THREADED_RTS)
240 work_stealing = RtsFlags.ParFlags.parGcLoadBalancing;
241 // It's not always a good idea to do load balancing in parallel
242 // GC. In particular, for a parallel program we don't want to
243 // lose locality by moving cached data into another CPU's cache
244 // (this effect can be quite significant).
246 // We could have a more complex way to deterimine whether to do
247 // work stealing or not, e.g. it might be a good idea to do it
248 // if the heap is big. For now, we just turn it on or off with
252 /* Start threads, so they can be spinning up while we finish initialisation.
256 #if defined(THREADED_RTS)
257 /* How many threads will be participating in this GC?
258 * We don't try to parallelise minor GCs (unless the user asks for
259 * it with +RTS -gn0), or mark/compact/sweep GC.
261 if (gc_type == PENDING_GC_PAR) {
262 n_gc_threads = RtsFlags.ParFlags.nNodes;
270 debugTrace(DEBUG_gc, "GC (gen %d): %d KB to collect, %ld MB in use, using %d thread(s)",
271 N, n * (BLOCK_SIZE / 1024), mblocks_allocated, n_gc_threads);
273 #ifdef RTS_GTK_FRONTPANEL
274 if (RtsFlags.GcFlags.frontpanel) {
275 updateFrontPanelBeforeGC(N);
280 // check for memory leaks if DEBUG is on
281 memInventory(traceClass(DEBUG_gc));
284 // check stack sanity *before* GC
285 IF_DEBUG(sanity, checkFreeListSanity());
286 IF_DEBUG(sanity, checkMutableLists(rtsTrue));
288 // Initialise all our gc_thread structures
289 for (t = 0; t < n_gc_threads; t++) {
290 init_gc_thread(gc_threads[t]);
293 // Initialise all the generations/steps that we're collecting.
294 for (g = 0; g <= N; g++) {
295 init_collected_gen(g,n_gc_threads);
298 // Initialise all the generations/steps that we're *not* collecting.
299 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
300 init_uncollected_gen(g,n_gc_threads);
303 /* Allocate a mark stack if we're doing a major collection.
305 if (major_gc && oldest_gen->steps[0].mark) {
306 nat mark_stack_blocks;
307 mark_stack_blocks = stg_max(MARK_STACK_BLOCKS,
308 oldest_gen->steps[0].n_old_blocks / 100);
309 mark_stack_bdescr = allocGroup(mark_stack_blocks);
310 mark_stack = (StgPtr *)mark_stack_bdescr->start;
311 mark_sp = mark_stack;
312 mark_splim = mark_stack + (mark_stack_blocks * BLOCK_SIZE_W);
314 mark_stack_bdescr = NULL;
317 // this is the main thread
319 if (n_gc_threads == 1) {
320 SET_GCT(gc_threads[0]);
322 SET_GCT(gc_threads[cap->no]);
325 SET_GCT(gc_threads[0]);
328 /* -----------------------------------------------------------------------
329 * follow all the roots that we know about:
332 // the main thread is running: this prevents any other threads from
333 // exiting prematurely, so we can start them now.
334 // NB. do this after the mutable lists have been saved above, otherwise
335 // the other GC threads will be writing into the old mutable lists.
337 wakeup_gc_threads(n_gc_threads, gct->thread_index);
339 // Mutable lists from each generation > N
340 // we want to *scavenge* these roots, not evacuate them: they're not
341 // going to move in this GC.
342 // Also do them in reverse generation order, for the usual reason:
343 // namely to reduce the likelihood of spurious old->new pointers.
345 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
346 scavenge_mutable_list(generations[g].saved_mut_list, &generations[g]);
347 freeChain_sync(generations[g].saved_mut_list);
348 generations[g].saved_mut_list = NULL;
352 // scavenge the capability-private mutable lists. This isn't part
353 // of markSomeCapabilities() because markSomeCapabilities() can only
354 // call back into the GC via mark_root() (due to the gct register
356 if (n_gc_threads == 1) {
357 for (n = 0; n < n_capabilities; n++) {
358 scavenge_capability_mut_lists(&capabilities[n]);
361 scavenge_capability_mut_lists(&capabilities[gct->thread_index]);
364 // follow roots from the CAF list (used by GHCi)
366 markCAFs(mark_root, gct);
368 // follow all the roots that the application knows about.
370 markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
371 rtsTrue/*prune sparks*/);
373 #if defined(RTS_USER_SIGNALS)
374 // mark the signal handlers (signals should be already blocked)
375 markSignalHandlers(mark_root, gct);
378 // Mark the weak pointer list, and prepare to detect dead weak pointers.
382 // Mark the stable pointer table.
383 markStablePtrTable(mark_root, gct);
385 /* -------------------------------------------------------------------------
386 * Repeatedly scavenge all the areas we know about until there's no
387 * more scavenging to be done.
391 scavenge_until_all_done();
392 // The other threads are now stopped. We might recurse back to
393 // here, but from now on this is the only thread.
395 // if any blackholes are alive, make the threads that wait on
397 if (traverseBlackholeQueue()) {
402 // must be last... invariant is that everything is fully
403 // scavenged at this point.
404 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
409 // If we get to here, there's really nothing left to do.
413 shutdown_gc_threads(n_gc_threads, gct->thread_index);
415 // Update pointers from the Task list
418 // Now see which stable names are still alive.
422 // We call processHeapClosureForDead() on every closure destroyed during
423 // the current garbage collection, so we invoke LdvCensusForDead().
424 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
425 || RtsFlags.ProfFlags.bioSelector != NULL)
429 // NO MORE EVACUATION AFTER THIS POINT!
431 // Two-space collector: free the old to-space.
432 // g0s0->old_blocks is the old nursery
433 // g0s0->blocks is to-space from the previous GC
434 if (RtsFlags.GcFlags.generations == 1) {
435 if (g0s0->blocks != NULL) {
436 freeChain(g0s0->blocks);
441 // For each workspace, in each thread, move the copied blocks to the step
447 for (t = 0; t < n_gc_threads; t++) {
451 if (RtsFlags.GcFlags.generations == 1) {
456 for (; s < total_steps; s++) {
459 // Push the final block
461 push_scanned_block(ws->todo_bd, ws);
464 ASSERT(gct->scan_bd == NULL);
465 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks);
468 for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
469 ws->step->n_words += bd->free - bd->start;
473 prev->link = ws->step->blocks;
474 ws->step->blocks = ws->scavd_list;
476 ws->step->n_blocks += ws->n_scavd_blocks;
480 // Add all the partial blocks *after* we've added all the full
481 // blocks. This is so that we can grab the partial blocks back
482 // again and try to fill them up in the next GC.
483 for (t = 0; t < n_gc_threads; t++) {
487 if (RtsFlags.GcFlags.generations == 1) {
492 for (; s < total_steps; s++) {
496 for (bd = ws->part_list; bd != NULL; bd = next) {
498 if (bd->free == bd->start) {
500 ws->part_list = next;
507 ws->step->n_words += bd->free - bd->start;
512 prev->link = ws->step->blocks;
513 ws->step->blocks = ws->part_list;
515 ws->step->n_blocks += ws->n_part_blocks;
517 ASSERT(countBlocks(ws->step->blocks) == ws->step->n_blocks);
518 ASSERT(countOccupied(ws->step->blocks) == ws->step->n_words);
523 // Finally: compact or sweep the oldest generation.
524 if (major_gc && oldest_gen->steps[0].mark) {
525 if (oldest_gen->steps[0].compact)
526 compact(gct->scavenged_static_objects);
528 sweep(&oldest_gen->steps[0]);
531 /* run through all the generations/steps and tidy up
538 for (i=0; i < n_gc_threads; i++) {
539 if (n_gc_threads > 1) {
540 debugTrace(DEBUG_gc,"thread %d:", i);
541 debugTrace(DEBUG_gc," copied %ld", gc_threads[i]->copied * sizeof(W_));
542 debugTrace(DEBUG_gc," scanned %ld", gc_threads[i]->scanned * sizeof(W_));
543 debugTrace(DEBUG_gc," any_work %ld", gc_threads[i]->any_work);
544 debugTrace(DEBUG_gc," no_work %ld", gc_threads[i]->no_work);
545 debugTrace(DEBUG_gc," scav_find_work %ld", gc_threads[i]->scav_find_work);
547 copied += gc_threads[i]->copied;
548 max_copied = stg_max(gc_threads[i]->copied, max_copied);
550 if (n_gc_threads == 1) {
558 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
561 generations[g].collections++; // for stats
562 if (n_gc_threads > 1) generations[g].par_collections++;
565 // Count the mutable list as bytes "copied" for the purposes of
566 // stats. Every mutable list is copied during every GC.
568 nat mut_list_size = 0;
569 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
570 mut_list_size += bd->free - bd->start;
572 for (n = 0; n < n_capabilities; n++) {
573 for (bd = capabilities[n].mut_lists[g];
574 bd != NULL; bd = bd->link) {
575 mut_list_size += bd->free - bd->start;
578 copied += mut_list_size;
581 "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
582 (unsigned long)(mut_list_size * sizeof(W_)),
583 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
586 for (s = 0; s < generations[g].n_steps; s++) {
588 stp = &generations[g].steps[s];
590 // for generations we collected...
593 /* free old memory and shift to-space into from-space for all
594 * the collected steps (except the allocation area). These
595 * freed blocks will probaby be quickly recycled.
597 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
600 // tack the new blocks on the end of the existing blocks
601 if (stp->old_blocks != NULL) {
604 for (bd = stp->old_blocks; bd != NULL; bd = next) {
608 if (!(bd->flags & BF_MARKED))
611 stp->old_blocks = next;
620 stp->n_words += bd->free - bd->start;
622 // NB. this step might not be compacted next
623 // time, so reset the BF_MARKED flags.
624 // They are set before GC if we're going to
625 // compact. (search for BF_MARKED above).
626 bd->flags &= ~BF_MARKED;
628 // between GCs, all blocks in the heap except
629 // for the nursery have the BF_EVACUATED flag set.
630 bd->flags |= BF_EVACUATED;
637 prev->link = stp->blocks;
638 stp->blocks = stp->old_blocks;
641 // add the new blocks to the block tally
642 stp->n_blocks += stp->n_old_blocks;
643 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
644 ASSERT(countOccupied(stp->blocks) == stp->n_words);
648 freeChain(stp->old_blocks);
650 stp->old_blocks = NULL;
651 stp->n_old_blocks = 0;
654 /* LARGE OBJECTS. The current live large objects are chained on
655 * scavenged_large, having been moved during garbage
656 * collection from large_objects. Any objects left on
657 * large_objects list are therefore dead, so we free them here.
659 for (bd = stp->large_objects; bd != NULL; bd = next) {
665 stp->large_objects = stp->scavenged_large_objects;
666 stp->n_large_blocks = stp->n_scavenged_large_blocks;
669 else // for older generations...
671 /* For older generations, we need to append the
672 * scavenged_large_object list (i.e. large objects that have been
673 * promoted during this GC) to the large_object list for that step.
675 for (bd = stp->scavenged_large_objects; bd; bd = next) {
677 dbl_link_onto(bd, &stp->large_objects);
680 // add the new blocks we promoted during this GC
681 stp->n_large_blocks += stp->n_scavenged_large_blocks;
686 // update the max size of older generations after a major GC
687 resize_generations();
689 // Calculate the amount of live data for stats.
690 live = calcLiveWords();
692 // Free the small objects allocated via allocate(), since this will
693 // all have been copied into G0S1 now.
694 if (RtsFlags.GcFlags.generations > 1) {
695 if (g0s0->blocks != NULL) {
696 freeChain(g0s0->blocks);
703 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
705 // Start a new pinned_object_block
706 pinned_object_block = NULL;
708 // Free the mark stack.
709 if (mark_stack_bdescr != NULL) {
710 freeGroup(mark_stack_bdescr);
714 for (g = 0; g <= N; g++) {
715 for (s = 0; s < generations[g].n_steps; s++) {
716 stp = &generations[g].steps[s];
717 if (stp->bitmap != NULL) {
718 freeGroup(stp->bitmap);
726 // mark the garbage collected CAFs as dead
727 #if 0 && defined(DEBUG) // doesn't work at the moment
728 if (major_gc) { gcCAFs(); }
732 // resetStaticObjectForRetainerProfiling() must be called before
734 if (n_gc_threads > 1) {
735 barf("profiling is currently broken with multi-threaded GC");
736 // ToDo: fix the gct->scavenged_static_objects below
738 resetStaticObjectForRetainerProfiling(gct->scavenged_static_objects);
741 // zero the scavenged static object list
744 for (i = 0; i < n_gc_threads; i++) {
745 zero_static_object_list(gc_threads[i]->scavenged_static_objects);
752 // start any pending finalizers
754 scheduleFinalizers(cap, old_weak_ptr_list);
757 // send exceptions to any threads which were about to die
759 resurrectThreads(resurrected_threads);
760 performPendingThrowTos(exception_threads);
763 // Update the stable pointer hash table.
764 updateStablePtrTable(major_gc);
766 // check sanity after GC
767 IF_DEBUG(sanity, checkSanity());
769 // extra GC trace info
770 IF_DEBUG(gc, statDescribeGens());
773 // symbol-table based profiling
774 /* heapCensus(to_blocks); */ /* ToDo */
777 // restore enclosing cost centre
783 // check for memory leaks if DEBUG is on
784 memInventory(traceClass(DEBUG_gc));
787 #ifdef RTS_GTK_FRONTPANEL
788 if (RtsFlags.GcFlags.frontpanel) {
789 updateFrontPanelAfterGC( N, live );
793 // ok, GC over: tell the stats department what happened.
794 slop = calcLiveBlocks() * BLOCK_SIZE_W - live;
795 stat_endGC(allocated, live, copied, N, max_copied, avg_copied, slop);
797 // Guess which generation we'll collect *next* time
798 initialise_N(force_major_gc);
800 #if defined(RTS_USER_SIGNALS)
801 if (RtsFlags.MiscFlags.install_signal_handlers) {
802 // unblock signals again
803 unblockUserSignals();
812 /* -----------------------------------------------------------------------------
813 Figure out which generation to collect, initialise N and major_gc.
815 Also returns the total number of blocks in generations that will be
817 -------------------------------------------------------------------------- */
820 initialise_N (rtsBool force_major_gc)
823 nat s, blocks, blocks_total;
828 if (force_major_gc) {
829 N = RtsFlags.GcFlags.generations - 1;
834 for (g = RtsFlags.GcFlags.generations - 1; g >= 0; g--) {
836 for (s = 0; s < generations[g].n_steps; s++) {
837 blocks += generations[g].steps[s].n_words / BLOCK_SIZE_W;
838 blocks += generations[g].steps[s].n_large_blocks;
840 if (blocks >= generations[g].max_blocks) {
844 blocks_total += blocks;
848 blocks_total += countNurseryBlocks();
850 major_gc = (N == RtsFlags.GcFlags.generations-1);
854 /* -----------------------------------------------------------------------------
855 Initialise the gc_thread structures.
856 -------------------------------------------------------------------------- */
858 #define GC_THREAD_INACTIVE 0
859 #define GC_THREAD_STANDING_BY 1
860 #define GC_THREAD_RUNNING 2
861 #define GC_THREAD_WAITING_TO_CONTINUE 3
864 new_gc_thread (nat n, gc_thread *t)
871 initSpinLock(&t->gc_spin);
872 initSpinLock(&t->mut_spin);
873 ACQUIRE_SPIN_LOCK(&t->gc_spin);
874 t->wakeup = GC_THREAD_INACTIVE; // starts true, so we can wait for the
875 // thread to start up, see wakeup_gc_threads
879 t->free_blocks = NULL;
888 for (s = 0; s < total_steps; s++)
891 ws->step = &all_steps[s];
892 ASSERT(s == ws->step->abs_no);
896 ws->todo_q = newWSDeque(128);
897 ws->todo_overflow = NULL;
898 ws->n_todo_overflow = 0;
900 ws->part_list = NULL;
901 ws->n_part_blocks = 0;
903 ws->scavd_list = NULL;
904 ws->n_scavd_blocks = 0;
912 if (gc_threads == NULL) {
913 #if defined(THREADED_RTS)
915 gc_threads = stgMallocBytes (RtsFlags.ParFlags.nNodes *
919 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
921 stgMallocBytes(sizeof(gc_thread) + total_steps * sizeof(step_workspace),
924 new_gc_thread(i, gc_threads[i]);
927 gc_threads = stgMallocBytes (sizeof(gc_thread*),"alloc_gc_threads");
929 new_gc_thread(0,gc_threads[0]);
934 /* ----------------------------------------------------------------------------
936 ------------------------------------------------------------------------- */
938 static nat gc_running_threads;
940 #if defined(THREADED_RTS)
941 static Mutex gc_running_mutex;
948 ACQUIRE_LOCK(&gc_running_mutex);
949 n_running = ++gc_running_threads;
950 RELEASE_LOCK(&gc_running_mutex);
951 ASSERT(n_running <= n_gc_threads);
959 ACQUIRE_LOCK(&gc_running_mutex);
960 ASSERT(n_gc_threads != 0);
961 n_running = --gc_running_threads;
962 RELEASE_LOCK(&gc_running_mutex);
976 // scavenge objects in compacted generation
977 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
978 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
982 // Check for global work in any step. We don't need to check for
983 // local work, because we have already exited scavenge_loop(),
984 // which means there is no local work for this thread.
985 for (s = total_steps-1; s >= 0; s--) {
986 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
990 if (ws->todo_large_objects) return rtsTrue;
991 if (!looksEmptyWSDeque(ws->todo_q)) return rtsTrue;
992 if (ws->todo_overflow) return rtsTrue;
995 #if defined(THREADED_RTS)
998 // look for work to steal
999 for (n = 0; n < n_gc_threads; n++) {
1000 if (n == gct->thread_index) continue;
1001 for (s = total_steps-1; s >= 0; s--) {
1002 ws = &gc_threads[n]->steps[s];
1003 if (!looksEmptyWSDeque(ws->todo_q)) return rtsTrue;
1015 scavenge_until_all_done (void)
1019 debugTrace(DEBUG_gc, "GC thread %d working", gct->thread_index);
1022 #if defined(THREADED_RTS)
1023 if (n_gc_threads > 1) {
1032 // scavenge_loop() only exits when there's no work to do
1035 debugTrace(DEBUG_gc, "GC thread %d idle (%d still running)",
1036 gct->thread_index, r);
1038 while (gc_running_threads != 0) {
1044 // any_work() does not remove the work from the queue, it
1045 // just checks for the presence of work. If we find any,
1046 // then we increment gc_running_threads and go back to
1047 // scavenge_loop() to perform any pending work.
1050 // All threads are now stopped
1051 debugTrace(DEBUG_gc, "GC thread %d finished.", gct->thread_index);
1054 #if defined(THREADED_RTS)
1057 gcWorkerThread (Capability *cap)
1059 cap->in_gc = rtsTrue;
1061 gct = gc_threads[cap->no];
1062 gct->id = osThreadId();
1064 // Wait until we're told to wake up
1065 RELEASE_SPIN_LOCK(&gct->mut_spin);
1066 gct->wakeup = GC_THREAD_STANDING_BY;
1067 debugTrace(DEBUG_gc, "GC thread %d standing by...", gct->thread_index);
1068 ACQUIRE_SPIN_LOCK(&gct->gc_spin);
1071 // start performance counters in this thread...
1072 if (gct->papi_events == -1) {
1073 papi_init_eventset(&gct->papi_events);
1075 papi_thread_start_gc1_count(gct->papi_events);
1078 // Every thread evacuates some roots.
1080 markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
1081 rtsTrue/*prune sparks*/);
1082 scavenge_capability_mut_lists(&capabilities[gct->thread_index]);
1084 scavenge_until_all_done();
1087 // count events in this thread towards the GC totals
1088 papi_thread_stop_gc1_count(gct->papi_events);
1091 // Wait until we're told to continue
1092 RELEASE_SPIN_LOCK(&gct->gc_spin);
1093 gct->wakeup = GC_THREAD_WAITING_TO_CONTINUE;
1094 debugTrace(DEBUG_gc, "GC thread %d waiting to continue...",
1096 ACQUIRE_SPIN_LOCK(&gct->mut_spin);
1097 debugTrace(DEBUG_gc, "GC thread %d on my way...", gct->thread_index);
1103 waitForGcThreads (Capability *cap USED_IF_THREADS)
1105 #if defined(THREADED_RTS)
1106 nat n_threads = RtsFlags.ParFlags.nNodes;
1109 rtsBool retry = rtsTrue;
1112 for (i=0; i < n_threads; i++) {
1113 if (i == me) continue;
1114 if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
1115 prodCapability(&capabilities[i], cap->running_task);
1118 for (j=0; j < 10000000; j++) {
1120 for (i=0; i < n_threads; i++) {
1121 if (i == me) continue;
1123 setContextSwitches();
1124 if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
1135 start_gc_threads (void)
1137 #if defined(THREADED_RTS)
1138 gc_running_threads = 0;
1139 initMutex(&gc_running_mutex);
1144 wakeup_gc_threads (nat n_threads USED_IF_THREADS, nat me USED_IF_THREADS)
1146 #if defined(THREADED_RTS)
1148 for (i=0; i < n_threads; i++) {
1149 if (i == me) continue;
1151 debugTrace(DEBUG_gc, "waking up gc thread %d", i);
1152 if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) barf("wakeup_gc_threads");
1154 gc_threads[i]->wakeup = GC_THREAD_RUNNING;
1155 ACQUIRE_SPIN_LOCK(&gc_threads[i]->mut_spin);
1156 RELEASE_SPIN_LOCK(&gc_threads[i]->gc_spin);
1161 // After GC is complete, we must wait for all GC threads to enter the
1162 // standby state, otherwise they may still be executing inside
1163 // any_work(), and may even remain awake until the next GC starts.
1165 shutdown_gc_threads (nat n_threads USED_IF_THREADS, nat me USED_IF_THREADS)
1167 #if defined(THREADED_RTS)
1169 for (i=0; i < n_threads; i++) {
1170 if (i == me) continue;
1171 while (gc_threads[i]->wakeup != GC_THREAD_WAITING_TO_CONTINUE) { write_barrier(); }
1177 releaseGCThreads (Capability *cap USED_IF_THREADS)
1179 #if defined(THREADED_RTS)
1180 nat n_threads = RtsFlags.ParFlags.nNodes;
1183 for (i=0; i < n_threads; i++) {
1184 if (i == me) continue;
1185 if (gc_threads[i]->wakeup != GC_THREAD_WAITING_TO_CONTINUE)
1186 barf("releaseGCThreads");
1188 gc_threads[i]->wakeup = GC_THREAD_INACTIVE;
1189 ACQUIRE_SPIN_LOCK(&gc_threads[i]->gc_spin);
1190 RELEASE_SPIN_LOCK(&gc_threads[i]->mut_spin);
1195 /* ----------------------------------------------------------------------------
1196 Initialise a generation that is to be collected
1197 ------------------------------------------------------------------------- */
1200 init_collected_gen (nat g, nat n_threads)
1207 // Throw away the current mutable list. Invariant: the mutable
1208 // list always has at least one block; this means we can avoid a
1209 // check for NULL in recordMutable().
1211 freeChain(generations[g].mut_list);
1212 generations[g].mut_list = allocBlock();
1213 for (i = 0; i < n_capabilities; i++) {
1214 freeChain(capabilities[i].mut_lists[g]);
1215 capabilities[i].mut_lists[g] = allocBlock();
1219 for (s = 0; s < generations[g].n_steps; s++) {
1221 stp = &generations[g].steps[s];
1222 ASSERT(stp->gen_no == g);
1224 // we'll construct a new list of threads in this step
1225 // during GC, throw away the current list.
1226 stp->old_threads = stp->threads;
1227 stp->threads = END_TSO_QUEUE;
1229 // generation 0, step 0 doesn't need to-space
1230 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1234 // deprecate the existing blocks
1235 stp->old_blocks = stp->blocks;
1236 stp->n_old_blocks = stp->n_blocks;
1240 stp->live_estimate = 0;
1242 // initialise the large object queues.
1243 stp->scavenged_large_objects = NULL;
1244 stp->n_scavenged_large_blocks = 0;
1246 // mark the small objects as from-space
1247 for (bd = stp->old_blocks; bd; bd = bd->link) {
1248 bd->flags &= ~BF_EVACUATED;
1251 // mark the large objects as from-space
1252 for (bd = stp->large_objects; bd; bd = bd->link) {
1253 bd->flags &= ~BF_EVACUATED;
1256 // for a compacted step, we need to allocate the bitmap
1258 nat bitmap_size; // in bytes
1259 bdescr *bitmap_bdescr;
1262 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
1264 if (bitmap_size > 0) {
1265 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
1267 stp->bitmap = bitmap_bdescr;
1268 bitmap = bitmap_bdescr->start;
1270 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
1271 bitmap_size, bitmap);
1273 // don't forget to fill it with zeros!
1274 memset(bitmap, 0, bitmap_size);
1276 // For each block in this step, point to its bitmap from the
1277 // block descriptor.
1278 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
1279 bd->u.bitmap = bitmap;
1280 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
1282 // Also at this point we set the BF_MARKED flag
1283 // for this block. The invariant is that
1284 // BF_MARKED is always unset, except during GC
1285 // when it is set on those blocks which will be
1287 if (!(bd->flags & BF_FRAGMENTED)) {
1288 bd->flags |= BF_MARKED;
1295 // For each GC thread, for each step, allocate a "todo" block to
1296 // store evacuated objects to be scavenged, and a block to store
1297 // evacuated objects that do not need to be scavenged.
1298 for (t = 0; t < n_threads; t++) {
1299 for (s = 0; s < generations[g].n_steps; s++) {
1301 // we don't copy objects into g0s0, unless -G0
1302 if (g==0 && s==0 && RtsFlags.GcFlags.generations > 1) continue;
1304 ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
1306 ws->todo_large_objects = NULL;
1308 ws->part_list = NULL;
1309 ws->n_part_blocks = 0;
1311 // allocate the first to-space block; extra blocks will be
1312 // chained on as necessary.
1314 ASSERT(looksEmptyWSDeque(ws->todo_q));
1315 alloc_todo_block(ws,0);
1317 ws->todo_overflow = NULL;
1318 ws->n_todo_overflow = 0;
1320 ws->scavd_list = NULL;
1321 ws->n_scavd_blocks = 0;
1327 /* ----------------------------------------------------------------------------
1328 Initialise a generation that is *not* to be collected
1329 ------------------------------------------------------------------------- */
1332 init_uncollected_gen (nat g, nat threads)
1339 // save the current mutable lists for this generation, and
1340 // allocate a fresh block for each one. We'll traverse these
1341 // mutable lists as roots early on in the GC.
1342 generations[g].saved_mut_list = generations[g].mut_list;
1343 generations[g].mut_list = allocBlock();
1344 for (n = 0; n < n_capabilities; n++) {
1345 capabilities[n].saved_mut_lists[g] = capabilities[n].mut_lists[g];
1346 capabilities[n].mut_lists[g] = allocBlock();
1349 for (s = 0; s < generations[g].n_steps; s++) {
1350 stp = &generations[g].steps[s];
1351 stp->scavenged_large_objects = NULL;
1352 stp->n_scavenged_large_blocks = 0;
1355 for (s = 0; s < generations[g].n_steps; s++) {
1357 stp = &generations[g].steps[s];
1359 for (t = 0; t < threads; t++) {
1360 ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
1362 ASSERT(looksEmptyWSDeque(ws->todo_q));
1363 ws->todo_large_objects = NULL;
1365 ws->part_list = NULL;
1366 ws->n_part_blocks = 0;
1368 ws->scavd_list = NULL;
1369 ws->n_scavd_blocks = 0;
1371 // If the block at the head of the list in this generation
1372 // is less than 3/4 full, then use it as a todo block.
1373 if (stp->blocks && isPartiallyFull(stp->blocks))
1375 ws->todo_bd = stp->blocks;
1376 ws->todo_free = ws->todo_bd->free;
1377 ws->todo_lim = ws->todo_bd->start + BLOCK_SIZE_W;
1378 stp->blocks = stp->blocks->link;
1380 stp->n_words -= ws->todo_bd->free - ws->todo_bd->start;
1381 ws->todo_bd->link = NULL;
1382 // we must scan from the current end point.
1383 ws->todo_bd->u.scan = ws->todo_bd->free;
1388 alloc_todo_block(ws,0);
1392 // deal out any more partial blocks to the threads' part_lists
1394 while (stp->blocks && isPartiallyFull(stp->blocks))
1397 stp->blocks = bd->link;
1398 ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
1399 bd->link = ws->part_list;
1401 ws->n_part_blocks += 1;
1402 bd->u.scan = bd->free;
1404 stp->n_words -= bd->free - bd->start;
1406 if (t == n_gc_threads) t = 0;
1411 /* -----------------------------------------------------------------------------
1412 Initialise a gc_thread before GC
1413 -------------------------------------------------------------------------- */
1416 init_gc_thread (gc_thread *t)
1418 t->static_objects = END_OF_STATIC_LIST;
1419 t->scavenged_static_objects = END_OF_STATIC_LIST;
1421 t->mut_lists = capabilities[t->thread_index].mut_lists;
1423 t->failed_to_evac = rtsFalse;
1424 t->eager_promotion = rtsTrue;
1425 t->thunk_selector_depth = 0;
1430 t->scav_find_work = 0;
1433 /* -----------------------------------------------------------------------------
1434 Function we pass to evacuate roots.
1435 -------------------------------------------------------------------------- */
1438 mark_root(void *user USED_IF_THREADS, StgClosure **root)
1440 // we stole a register for gct, but this function is called from
1441 // *outside* the GC where the register variable is not in effect,
1442 // so we need to save and restore it here. NB. only call
1443 // mark_root() from the main GC thread, otherwise gct will be
1445 gc_thread *saved_gct;
1454 /* -----------------------------------------------------------------------------
1455 Initialising the static object & mutable lists
1456 -------------------------------------------------------------------------- */
1459 zero_static_object_list(StgClosure* first_static)
1463 const StgInfoTable *info;
1465 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1467 link = *STATIC_LINK(info, p);
1468 *STATIC_LINK(info,p) = NULL;
1472 /* ----------------------------------------------------------------------------
1473 Update the pointers from the task list
1475 These are treated as weak pointers because we want to allow a main
1476 thread to get a BlockedOnDeadMVar exception in the same way as any
1477 other thread. Note that the threads should all have been retained
1478 by GC by virtue of being on the all_threads list, we're just
1479 updating pointers here.
1480 ------------------------------------------------------------------------- */
1483 update_task_list (void)
1487 for (task = all_tasks; task != NULL; task = task->all_link) {
1488 if (!task->stopped && task->tso) {
1489 ASSERT(task->tso->bound == task);
1490 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
1492 barf("task %p: main thread %d has been GC'd",
1505 /* ----------------------------------------------------------------------------
1506 Reset the sizes of the older generations when we do a major
1509 CURRENT STRATEGY: make all generations except zero the same size.
1510 We have to stay within the maximum heap size, and leave a certain
1511 percentage of the maximum heap size available to allocate into.
1512 ------------------------------------------------------------------------- */
1515 resize_generations (void)
1519 if (major_gc && RtsFlags.GcFlags.generations > 1) {
1520 nat live, size, min_alloc, words;
1521 nat max = RtsFlags.GcFlags.maxHeapSize;
1522 nat gens = RtsFlags.GcFlags.generations;
1524 // live in the oldest generations
1525 if (oldest_gen->steps[0].live_estimate != 0) {
1526 words = oldest_gen->steps[0].live_estimate;
1528 words = oldest_gen->steps[0].n_words;
1530 live = (words + BLOCK_SIZE_W - 1) / BLOCK_SIZE_W +
1531 oldest_gen->steps[0].n_large_blocks;
1533 // default max size for all generations except zero
1534 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
1535 RtsFlags.GcFlags.minOldGenSize);
1537 // minimum size for generation zero
1538 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
1539 RtsFlags.GcFlags.minAllocAreaSize);
1541 // Auto-enable compaction when the residency reaches a
1542 // certain percentage of the maximum heap size (default: 30%).
1543 if (RtsFlags.GcFlags.generations > 1 &&
1544 (RtsFlags.GcFlags.compact ||
1546 oldest_gen->steps[0].n_blocks >
1547 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
1548 oldest_gen->steps[0].mark = 1;
1549 oldest_gen->steps[0].compact = 1;
1550 // debugBelch("compaction: on\n", live);
1552 oldest_gen->steps[0].mark = 0;
1553 oldest_gen->steps[0].compact = 0;
1554 // debugBelch("compaction: off\n", live);
1557 if (RtsFlags.GcFlags.sweep) {
1558 oldest_gen->steps[0].mark = 1;
1561 // if we're going to go over the maximum heap size, reduce the
1562 // size of the generations accordingly. The calculation is
1563 // different if compaction is turned on, because we don't need
1564 // to double the space required to collect the old generation.
1567 // this test is necessary to ensure that the calculations
1568 // below don't have any negative results - we're working
1569 // with unsigned values here.
1570 if (max < min_alloc) {
1574 if (oldest_gen->steps[0].compact) {
1575 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
1576 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
1579 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
1580 size = (max - min_alloc) / ((gens - 1) * 2);
1590 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
1591 min_alloc, size, max);
1594 for (g = 0; g < gens; g++) {
1595 generations[g].max_blocks = size;
1600 /* -----------------------------------------------------------------------------
1601 Calculate the new size of the nursery, and resize it.
1602 -------------------------------------------------------------------------- */
1605 resize_nursery (void)
1607 if (RtsFlags.GcFlags.generations == 1)
1608 { // Two-space collector:
1611 /* set up a new nursery. Allocate a nursery size based on a
1612 * function of the amount of live data (by default a factor of 2)
1613 * Use the blocks from the old nursery if possible, freeing up any
1616 * If we get near the maximum heap size, then adjust our nursery
1617 * size accordingly. If the nursery is the same size as the live
1618 * data (L), then we need 3L bytes. We can reduce the size of the
1619 * nursery to bring the required memory down near 2L bytes.
1621 * A normal 2-space collector would need 4L bytes to give the same
1622 * performance we get from 3L bytes, reducing to the same
1623 * performance at 2L bytes.
1625 blocks = g0s0->n_blocks;
1627 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1628 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1629 RtsFlags.GcFlags.maxHeapSize )
1631 long adjusted_blocks; // signed on purpose
1634 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1636 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
1637 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
1639 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1640 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even * be < 0 */
1644 blocks = adjusted_blocks;
1648 blocks *= RtsFlags.GcFlags.oldGenFactor;
1649 if (blocks < RtsFlags.GcFlags.minAllocAreaSize)
1651 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1654 resizeNurseries(blocks);
1656 else // Generational collector
1659 * If the user has given us a suggested heap size, adjust our
1660 * allocation area to make best use of the memory available.
1662 if (RtsFlags.GcFlags.heapSizeSuggestion)
1665 nat needed = calcNeeded(); // approx blocks needed at next GC
1667 /* Guess how much will be live in generation 0 step 0 next time.
1668 * A good approximation is obtained by finding the
1669 * percentage of g0s0 that was live at the last minor GC.
1671 * We have an accurate figure for the amount of copied data in
1672 * 'copied', but we must convert this to a number of blocks, with
1673 * a small adjustment for estimated slop at the end of a block
1678 g0s0_pcnt_kept = ((copied / (BLOCK_SIZE_W - 10)) * 100)
1679 / countNurseryBlocks();
1682 /* Estimate a size for the allocation area based on the
1683 * information available. We might end up going slightly under
1684 * or over the suggested heap size, but we should be pretty
1687 * Formula: suggested - needed
1688 * ----------------------------
1689 * 1 + g0s0_pcnt_kept/100
1691 * where 'needed' is the amount of memory needed at the next
1692 * collection for collecting all steps except g0s0.
1695 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1696 (100 + (long)g0s0_pcnt_kept);
1698 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1699 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1702 resizeNurseries((nat)blocks);
1706 // we might have added extra large blocks to the nursery, so
1707 // resize back to minAllocAreaSize again.
1708 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1713 /* -----------------------------------------------------------------------------
1714 Sanity code for CAF garbage collection.
1716 With DEBUG turned on, we manage a CAF list in addition to the SRT
1717 mechanism. After GC, we run down the CAF list and blackhole any
1718 CAFs which have been garbage collected. This means we get an error
1719 whenever the program tries to enter a garbage collected CAF.
1721 Any garbage collected CAFs are taken off the CAF list at the same
1723 -------------------------------------------------------------------------- */
1725 #if 0 && defined(DEBUG)
1732 const StgInfoTable *info;
1743 ASSERT(info->type == IND_STATIC);
1745 if (STATIC_LINK(info,p) == NULL) {
1746 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1748 SET_INFO(p,&stg_BLACKHOLE_info);
1749 p = STATIC_LINK2(info,p);
1753 pp = &STATIC_LINK2(info,p);
1760 debugTrace(DEBUG_gccafs, "%d CAFs live", i);