1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
19 #include "OSThreads.h"
20 #include "LdvProfile.h"
25 #include "BlockAlloc.h"
31 #include "ParTicky.h" // ToDo: move into Rts.h
32 #include "RtsSignals.h"
36 #if defined(RTS_GTK_FRONTPANEL)
37 #include "FrontPanel.h"
40 #include "RetainerProfile.h"
41 #include "RaiseAsync.h"
51 #include <string.h> // for memset()
53 /* -----------------------------------------------------------------------------
55 -------------------------------------------------------------------------- */
57 /* STATIC OBJECT LIST.
60 * We maintain a linked list of static objects that are still live.
61 * The requirements for this list are:
63 * - we need to scan the list while adding to it, in order to
64 * scavenge all the static objects (in the same way that
65 * breadth-first scavenging works for dynamic objects).
67 * - we need to be able to tell whether an object is already on
68 * the list, to break loops.
70 * Each static object has a "static link field", which we use for
71 * linking objects on to the list. We use a stack-type list, consing
72 * objects on the front as they are added (this means that the
73 * scavenge phase is depth-first, not breadth-first, but that
76 * A separate list is kept for objects that have been scavenged
77 * already - this is so that we can zero all the marks afterwards.
79 * An object is on the list if its static link field is non-zero; this
80 * means that we have to mark the end of the list with '1', not NULL.
82 * Extra notes for generational GC:
84 * Each generation has a static object list associated with it. When
85 * collecting generations up to N, we treat the static object lists
86 * from generations > N as roots.
88 * We build up a static object list while collecting generations 0..N,
89 * which is then appended to the static object list of generation N+1.
91 StgClosure* static_objects; // live static objects
92 StgClosure* scavenged_static_objects; // static objects scavenged so far
94 SpinLock static_objects_sync;
97 /* N is the oldest generation being collected, where the generations
98 * are numbered starting at 0. A major GC (indicated by the major_gc
99 * flag) is when we're collecting all generations. We only attempt to
100 * deal with static objects and GC CAFs when doing a major GC.
105 /* Data used for allocation area sizing.
107 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
117 /* Thread-local data for each GC thread
119 gc_thread *gc_threads = NULL;
120 gc_thread *gct = NULL; // this thread's gct TODO: make thread-local
123 long copied; // *words* copied & scavenged during this GC
124 long scavd_copied; // *words* copied only during this GC
126 /* -----------------------------------------------------------------------------
127 Static function declarations
128 -------------------------------------------------------------------------- */
130 static void mark_root (StgClosure **root);
131 static void zero_static_object_list (StgClosure* first_static);
132 static void initialise_N (rtsBool force_major_gc);
133 static void alloc_gc_threads (void);
134 static void init_collected_gen (nat g, nat threads);
135 static void init_uncollected_gen (nat g, nat threads);
136 static void init_gc_thread (gc_thread *t);
137 static void update_task_list (void);
138 static void resize_generations (void);
139 static void resize_nursery (void);
141 #if 0 && defined(DEBUG)
142 static void gcCAFs (void);
145 /* -----------------------------------------------------------------------------
146 The mark bitmap & stack.
147 -------------------------------------------------------------------------- */
149 #define MARK_STACK_BLOCKS 4
151 bdescr *mark_stack_bdescr;
156 // Flag and pointers used for falling back to a linear scan when the
157 // mark stack overflows.
158 rtsBool mark_stack_overflowed;
159 bdescr *oldgen_scan_bd;
162 /* -----------------------------------------------------------------------------
163 GarbageCollect: the main entry point to the garbage collector.
165 Locks held: all capabilities are held throughout GarbageCollect().
166 -------------------------------------------------------------------------- */
169 GarbageCollect ( rtsBool force_major_gc )
173 lnat live, allocated;
174 lnat oldgen_saved_blocks = 0;
175 nat n_threads; // number of threads participating in GC
180 CostCentreStack *prev_CCS;
185 debugTrace(DEBUG_gc, "starting GC");
187 #if defined(RTS_USER_SIGNALS)
188 if (RtsFlags.MiscFlags.install_signal_handlers) {
194 // tell the STM to discard any cached closures it's hoping to re-use
197 // tell the stats department that we've started a GC
201 // check for memory leaks if DEBUG is on
211 // attribute any costs to CCS_GC
217 /* Approximate how much we allocated.
218 * Todo: only when generating stats?
220 allocated = calcAllocated();
222 /* Figure out which generation to collect
224 initialise_N(force_major_gc);
226 /* Allocate + initialise the gc_thread structures.
230 /* How many threads will be participating in this GC?
231 * We don't try to parallelise minor GC.
233 #if defined(THREADED_RTS)
237 n_threads = RtsFlags.ParFlags.gcThreads;
243 #ifdef RTS_GTK_FRONTPANEL
244 if (RtsFlags.GcFlags.frontpanel) {
245 updateFrontPanelBeforeGC(N);
249 // check stack sanity *before* GC (ToDo: check all threads)
250 IF_DEBUG(sanity, checkFreeListSanity());
252 /* Initialise the static object lists
254 static_objects = END_OF_STATIC_LIST;
255 scavenged_static_objects = END_OF_STATIC_LIST;
257 initSpinLock(&static_objects_sync);
260 // Initialise all the generations/steps that we're collecting.
261 for (g = 0; g <= N; g++) {
262 init_collected_gen(g,n_threads);
265 // Initialise all the generations/steps that we're *not* collecting.
266 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
267 init_uncollected_gen(g,n_threads);
270 /* Allocate a mark stack if we're doing a major collection.
273 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
274 mark_stack = (StgPtr *)mark_stack_bdescr->start;
275 mark_sp = mark_stack;
276 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
278 mark_stack_bdescr = NULL;
281 // Initialise all our gc_thread structures
282 for (t = 0; t < n_threads; t++) {
283 init_gc_thread(&gc_threads[t]);
290 // start threads etc.
291 // For now, we just have one thread, and set gct to gc_threads[0]
292 gct = &gc_threads[0];
294 /* -----------------------------------------------------------------------
295 * follow all the roots that we know about:
296 * - mutable lists from each generation > N
297 * we want to *scavenge* these roots, not evacuate them: they're not
298 * going to move in this GC.
299 * Also do them in reverse generation order, for the usual reason:
300 * namely to reduce the likelihood of spurious old->new pointers.
303 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
304 generations[g].saved_mut_list = generations[g].mut_list;
305 generations[g].mut_list = allocBlock();
306 // mut_list always has at least one block.
308 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
309 scavenge_mutable_list(&generations[g]);
313 // follow roots from the CAF list (used by GHCi)
317 // follow all the roots that the application knows about.
321 // Mark the weak pointer list, and prepare to detect dead weak pointers.
325 // Mark the stable pointer table.
326 markStablePtrTable(mark_root);
328 /* -------------------------------------------------------------------------
329 * Repeatedly scavenge all the areas we know about until there's no
330 * more scavenging to be done.
339 // if any blackholes are alive, make the threads that wait on
341 if (traverseBlackholeQueue())
344 if (flag) { goto loop; }
346 // must be last... invariant is that everything is fully
347 // scavenged at this point.
348 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
353 // Update pointers from the Task list
356 // Now see which stable names are still alive.
360 // We call processHeapClosureForDead() on every closure destroyed during
361 // the current garbage collection, so we invoke LdvCensusForDead().
362 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
363 || RtsFlags.ProfFlags.bioSelector != NULL)
367 // NO MORE EVACUATION AFTER THIS POINT!
368 // Finally: compaction of the oldest generation.
369 if (major_gc && oldest_gen->steps[0].is_compacted) {
370 // save number of blocks for stats
371 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
375 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
377 // Two-space collector: free the old to-space.
378 // g0s0->old_blocks is the old nursery
379 // g0s0->blocks is to-space from the previous GC
380 if (RtsFlags.GcFlags.generations == 1) {
381 if (g0s0->blocks != NULL) {
382 freeChain(g0s0->blocks);
387 // For each workspace, in each thread:
388 // * clear the BF_EVACUATED flag from each copied block
389 // * move the copied blocks to the step
395 for (t = 0; t < n_threads; t++) {
396 thr = &gc_threads[t];
398 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
399 for (s = 0; s < generations[g].n_steps; s++) {
400 ws = &thr->steps[g][s];
401 if (g==0 && s==0) continue;
403 ASSERT( ws->scan_bd == ws->todo_bd );
404 ASSERT( ws->scan_bd ? ws->scan == ws->scan_bd->free : 1 );
406 // Push the final block
407 if (ws->scan_bd) { push_scan_block(ws->scan_bd, ws); }
409 // update stats: we haven't counted the block at the
410 // front of the scavd_list yet.
411 scavd_copied += ws->scavd_list->free - ws->scavd_list->start;
413 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks);
415 prev = ws->scavd_list;
416 for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
417 bd->flags &= ~BF_EVACUATED; // now from-space
420 prev->link = ws->stp->blocks;
421 ws->stp->blocks = ws->scavd_list;
422 ws->stp->n_blocks += ws->n_scavd_blocks;
423 ASSERT(countBlocks(ws->stp->blocks) == ws->stp->n_blocks);
429 // Two-space collector: swap the semi-spaces around.
430 // Currently: g0s0->old_blocks is the old nursery
431 // g0s0->blocks is to-space from this GC
432 // We want these the other way around.
433 if (RtsFlags.GcFlags.generations == 1) {
434 bdescr *nursery_blocks = g0s0->old_blocks;
435 nat n_nursery_blocks = g0s0->n_old_blocks;
436 g0s0->old_blocks = g0s0->blocks;
437 g0s0->n_old_blocks = g0s0->n_blocks;
438 g0s0->blocks = nursery_blocks;
439 g0s0->n_blocks = n_nursery_blocks;
442 /* run through all the generations/steps and tidy up
444 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
447 generations[g].collections++; // for stats
450 // Count the mutable list as bytes "copied" for the purposes of
451 // stats. Every mutable list is copied during every GC.
453 nat mut_list_size = 0;
454 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
455 mut_list_size += bd->free - bd->start;
457 copied += mut_list_size;
460 "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
461 (unsigned long)(mut_list_size * sizeof(W_)),
462 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
465 for (s = 0; s < generations[g].n_steps; s++) {
467 stp = &generations[g].steps[s];
469 // for generations we collected...
472 /* free old memory and shift to-space into from-space for all
473 * the collected steps (except the allocation area). These
474 * freed blocks will probaby be quickly recycled.
476 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
477 if (stp->is_compacted)
479 // for a compacted step, just shift the new to-space
480 // onto the front of the now-compacted existing blocks.
481 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
482 bd->flags &= ~BF_EVACUATED; // now from-space
484 // tack the new blocks on the end of the existing blocks
485 if (stp->old_blocks != NULL) {
486 for (bd = stp->old_blocks; bd != NULL; bd = next) {
487 // NB. this step might not be compacted next
488 // time, so reset the BF_COMPACTED flags.
489 // They are set before GC if we're going to
490 // compact. (search for BF_COMPACTED above).
491 bd->flags &= ~BF_COMPACTED;
494 bd->link = stp->blocks;
497 stp->blocks = stp->old_blocks;
499 // add the new blocks to the block tally
500 stp->n_blocks += stp->n_old_blocks;
501 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
505 freeChain(stp->old_blocks);
507 stp->old_blocks = NULL;
508 stp->n_old_blocks = 0;
511 /* LARGE OBJECTS. The current live large objects are chained on
512 * scavenged_large, having been moved during garbage
513 * collection from large_objects. Any objects left on
514 * large_objects list are therefore dead, so we free them here.
516 for (bd = stp->large_objects; bd != NULL; bd = next) {
522 // update the count of blocks used by large objects
523 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
524 bd->flags &= ~BF_EVACUATED;
526 stp->large_objects = stp->scavenged_large_objects;
527 stp->n_large_blocks = stp->n_scavenged_large_blocks;
530 else // for older generations...
532 /* For older generations, we need to append the
533 * scavenged_large_object list (i.e. large objects that have been
534 * promoted during this GC) to the large_object list for that step.
536 for (bd = stp->scavenged_large_objects; bd; bd = next) {
538 bd->flags &= ~BF_EVACUATED;
539 dbl_link_onto(bd, &stp->large_objects);
542 // add the new blocks we promoted during this GC
543 stp->n_large_blocks += stp->n_scavenged_large_blocks;
548 // update the max size of older generations after a major GC
549 resize_generations();
551 // Guess the amount of live data for stats.
554 // Free the small objects allocated via allocate(), since this will
555 // all have been copied into G0S1 now.
556 if (RtsFlags.GcFlags.generations > 1) {
557 if (g0s0->blocks != NULL) {
558 freeChain(g0s0->blocks);
564 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
566 // Start a new pinned_object_block
567 pinned_object_block = NULL;
569 // Free the mark stack.
570 if (mark_stack_bdescr != NULL) {
571 freeGroup(mark_stack_bdescr);
575 for (g = 0; g <= N; g++) {
576 for (s = 0; s < generations[g].n_steps; s++) {
577 stp = &generations[g].steps[s];
578 if (stp->bitmap != NULL) {
579 freeGroup(stp->bitmap);
587 // mark the garbage collected CAFs as dead
588 #if 0 && defined(DEBUG) // doesn't work at the moment
589 if (major_gc) { gcCAFs(); }
593 // resetStaticObjectForRetainerProfiling() must be called before
595 resetStaticObjectForRetainerProfiling();
598 // zero the scavenged static object list
600 zero_static_object_list(scavenged_static_objects);
606 // start any pending finalizers
608 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
611 // send exceptions to any threads which were about to die
613 resurrectThreads(resurrected_threads);
616 // Update the stable pointer hash table.
617 updateStablePtrTable(major_gc);
619 // check sanity after GC
620 IF_DEBUG(sanity, checkSanity());
622 // extra GC trace info
623 IF_DEBUG(gc, statDescribeGens());
626 // symbol-table based profiling
627 /* heapCensus(to_blocks); */ /* ToDo */
630 // restore enclosing cost centre
636 // check for memory leaks if DEBUG is on
640 #ifdef RTS_GTK_FRONTPANEL
641 if (RtsFlags.GcFlags.frontpanel) {
642 updateFrontPanelAfterGC( N, live );
646 // ok, GC over: tell the stats department what happened.
647 stat_endGC(allocated, live, copied, scavd_copied, N);
649 #if defined(RTS_USER_SIGNALS)
650 if (RtsFlags.MiscFlags.install_signal_handlers) {
651 // unblock signals again
652 unblockUserSignals();
659 /* ---------------------------------------------------------------------------
660 Where are the roots that we know about?
662 - all the threads on the runnable queue
663 - all the threads on the blocked queue
664 - all the threads on the sleeping queue
665 - all the thread currently executing a _ccall_GC
666 - all the "main threads"
668 ------------------------------------------------------------------------ */
670 /* This has to be protected either by the scheduler monitor, or by the
671 garbage collection monitor (probably the latter).
676 GetRoots( evac_fn evac )
683 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
684 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
685 evac((StgClosure **)&run_queue_hds[i]);
686 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
687 evac((StgClosure **)&run_queue_tls[i]);
689 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
690 evac((StgClosure **)&blocked_queue_hds[i]);
691 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
692 evac((StgClosure **)&blocked_queue_tls[i]);
693 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
694 evac((StgClosure **)&ccalling_threads[i]);
701 for (i = 0; i < n_capabilities; i++) {
702 cap = &capabilities[i];
703 evac((StgClosure **)(void *)&cap->run_queue_hd);
704 evac((StgClosure **)(void *)&cap->run_queue_tl);
705 #if defined(THREADED_RTS)
706 evac((StgClosure **)(void *)&cap->wakeup_queue_hd);
707 evac((StgClosure **)(void *)&cap->wakeup_queue_tl);
709 for (task = cap->suspended_ccalling_tasks; task != NULL;
711 debugTrace(DEBUG_sched,
712 "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
713 evac((StgClosure **)(void *)&task->suspended_tso);
719 #if !defined(THREADED_RTS)
720 evac((StgClosure **)(void *)&blocked_queue_hd);
721 evac((StgClosure **)(void *)&blocked_queue_tl);
722 evac((StgClosure **)(void *)&sleeping_queue);
726 // evac((StgClosure **)&blackhole_queue);
728 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
729 markSparkQueue(evac);
732 #if defined(RTS_USER_SIGNALS)
733 // mark the signal handlers (signals should be already blocked)
734 markSignalHandlers(evac);
738 /* -----------------------------------------------------------------------------
739 isAlive determines whether the given closure is still alive (after
740 a garbage collection) or not. It returns the new address of the
741 closure if it is alive, or NULL otherwise.
743 NOTE: Use it before compaction only!
744 It untags and (if needed) retags pointers to closures.
745 -------------------------------------------------------------------------- */
749 isAlive(StgClosure *p)
751 const StgInfoTable *info;
757 /* The tag and the pointer are split, to be merged later when needed. */
758 tag = GET_CLOSURE_TAG(p);
759 q = UNTAG_CLOSURE(p);
761 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
764 // ignore static closures
766 // ToDo: for static closures, check the static link field.
767 // Problem here is that we sometimes don't set the link field, eg.
768 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
770 if (!HEAP_ALLOCED(q)) {
774 // ignore closures in generations that we're not collecting.
776 if (bd->gen_no > N) {
780 // if it's a pointer into to-space, then we're done
781 if (bd->flags & BF_EVACUATED) {
785 // large objects use the evacuated flag
786 if (bd->flags & BF_LARGE) {
790 // check the mark bit for compacted steps
791 if ((bd->flags & BF_COMPACTED) && is_marked((P_)q,bd)) {
795 switch (info->type) {
800 case IND_OLDGEN: // rely on compatible layout with StgInd
801 case IND_OLDGEN_PERM:
802 // follow indirections
803 p = ((StgInd *)q)->indirectee;
808 return ((StgEvacuated *)q)->evacuee;
811 if (((StgTSO *)q)->what_next == ThreadRelocated) {
812 p = (StgClosure *)((StgTSO *)q)->link;
824 /* -----------------------------------------------------------------------------
825 Figure out which generation to collect, initialise N and major_gc.
826 -------------------------------------------------------------------------- */
829 initialise_N (rtsBool force_major_gc)
833 if (force_major_gc) {
834 N = RtsFlags.GcFlags.generations - 1;
838 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
839 if (generations[g].steps[0].n_blocks +
840 generations[g].steps[0].n_large_blocks
841 >= generations[g].max_blocks) {
845 major_gc = (N == RtsFlags.GcFlags.generations-1);
849 /* -----------------------------------------------------------------------------
850 Initialise the gc_thread structures.
851 -------------------------------------------------------------------------- */
854 alloc_gc_thread (gc_thread *t, int n)
860 t->free_blocks = NULL;
865 t->steps = stgMallocBytes(RtsFlags.GcFlags.generations *
866 sizeof(step_workspace *),
867 "initialise_gc_thread");
869 for (g = 0; g < RtsFlags.GcFlags.generations; g++)
871 t->steps[g] = stgMallocBytes(generations[g].n_steps *
872 sizeof(step_workspace),
873 "initialise_gc_thread/2");
875 for (s = 0; s < generations[g].n_steps; s++)
877 ws = &t->steps[g][s];
878 ws->stp = &generations[g].steps[s];
885 ws->buffer_todo_bd = NULL;
887 ws->scavd_list = NULL;
888 ws->n_scavd_blocks = 0;
895 alloc_gc_threads (void)
897 if (gc_threads == NULL) {
898 #if defined(THREADED_RTS)
901 gc_threads = stgMallocBytes (RtsFlags.ParFlags.gcThreads *
905 for (i = 0; i < RtsFlags.ParFlags.gcThreads; i++) {
906 alloc_gc_thread(&gc_threads[i], i);
909 gc_threads = stgMallocBytes (sizeof(gc_thread),
912 alloc_gc_thread(gc_threads, 0);
917 /* ----------------------------------------------------------------------------
918 Initialise a generation that is to be collected
919 ------------------------------------------------------------------------- */
922 init_collected_gen (nat g, nat n_threads)
929 // Throw away the current mutable list. Invariant: the mutable
930 // list always has at least one block; this means we can avoid a
931 // check for NULL in recordMutable().
933 freeChain(generations[g].mut_list);
934 generations[g].mut_list = allocBlock();
935 for (i = 0; i < n_capabilities; i++) {
936 freeChain(capabilities[i].mut_lists[g]);
937 capabilities[i].mut_lists[g] = allocBlock();
941 for (s = 0; s < generations[g].n_steps; s++) {
943 // generation 0, step 0 doesn't need to-space
944 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
948 stp = &generations[g].steps[s];
949 ASSERT(stp->gen_no == g);
951 // deprecate the existing blocks
952 stp->old_blocks = stp->blocks;
953 stp->n_old_blocks = stp->n_blocks;
957 // we don't have any to-be-scavenged blocks yet
961 // initialise the large object queues.
962 stp->scavenged_large_objects = NULL;
963 stp->n_scavenged_large_blocks = 0;
965 // mark the large objects as not evacuated yet
966 for (bd = stp->large_objects; bd; bd = bd->link) {
967 bd->flags &= ~BF_EVACUATED;
970 // for a compacted step, we need to allocate the bitmap
971 if (stp->is_compacted) {
972 nat bitmap_size; // in bytes
973 bdescr *bitmap_bdescr;
976 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
978 if (bitmap_size > 0) {
979 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
981 stp->bitmap = bitmap_bdescr;
982 bitmap = bitmap_bdescr->start;
984 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
985 bitmap_size, bitmap);
987 // don't forget to fill it with zeros!
988 memset(bitmap, 0, bitmap_size);
990 // For each block in this step, point to its bitmap from the
992 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
993 bd->u.bitmap = bitmap;
994 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
996 // Also at this point we set the BF_COMPACTED flag
997 // for this block. The invariant is that
998 // BF_COMPACTED is always unset, except during GC
999 // when it is set on those blocks which will be
1001 bd->flags |= BF_COMPACTED;
1007 // For each GC thread, for each step, allocate a "todo" block to
1008 // store evacuated objects to be scavenged, and a block to store
1009 // evacuated objects that do not need to be scavenged.
1010 for (t = 0; t < n_threads; t++) {
1011 for (s = 0; s < generations[g].n_steps; s++) {
1013 // we don't copy objects into g0s0, unless -G0
1014 if (g==0 && s==0 && RtsFlags.GcFlags.generations > 1) continue;
1016 ws = &gc_threads[t].steps[g][s];
1021 ws->todo_large_objects = NULL;
1023 // allocate the first to-space block; extra blocks will be
1024 // chained on as necessary.
1026 ws->buffer_todo_bd = NULL;
1027 gc_alloc_todo_block(ws);
1029 // allocate a block for "already scavenged" objects. This goes
1030 // on the front of the stp->blocks list, so it won't be
1031 // traversed by the scavenging sweep.
1032 ws->scavd_list = NULL;
1033 ws->n_scavd_blocks = 0;
1034 gc_alloc_scavd_block(ws);
1040 /* ----------------------------------------------------------------------------
1041 Initialise a generation that is *not* to be collected
1042 ------------------------------------------------------------------------- */
1045 init_uncollected_gen (nat g, nat threads)
1052 for (s = 0; s < generations[g].n_steps; s++) {
1053 stp = &generations[g].steps[s];
1054 stp->scavenged_large_objects = NULL;
1055 stp->n_scavenged_large_blocks = 0;
1058 for (t = 0; t < threads; t++) {
1059 for (s = 0; s < generations[g].n_steps; s++) {
1061 ws = &gc_threads[t].steps[g][s];
1064 ws->buffer_todo_bd = NULL;
1065 ws->todo_large_objects = NULL;
1067 // If the block at the head of the list in this generation
1068 // is less than 3/4 full, then use it as a todo block.
1069 if (isPartiallyFull(stp->blocks))
1071 ws->todo_bd = stp->blocks;
1072 stp->blocks = stp->blocks->link;
1074 ws->todo_bd->link = NULL;
1076 // this block is also the scan block; we must scan
1077 // from the current end point.
1078 ws->scan_bd = ws->todo_bd;
1079 ws->scan = ws->scan_bd->free;
1081 // subtract the contents of this block from the stats,
1082 // because we'll count the whole block later.
1083 copied -= ws->scan_bd->free - ws->scan_bd->start;
1090 gc_alloc_todo_block(ws);
1093 // Do the same trick for the scavd block
1094 if (isPartiallyFull(stp->blocks))
1096 ws->scavd_list = stp->blocks;
1097 stp->blocks = stp->blocks->link;
1099 ws->scavd_list->link = NULL;
1100 ws->n_scavd_blocks = 1;
1101 // subtract the contents of this block from the stats,
1102 // because we'll count the whole block later.
1103 scavd_copied -= ws->scavd_list->free - ws->scavd_list->start;
1107 ws->scavd_list = NULL;
1108 ws->n_scavd_blocks = 0;
1109 gc_alloc_scavd_block(ws);
1114 // Move the private mutable lists from each capability onto the
1115 // main mutable list for the generation.
1116 for (i = 0; i < n_capabilities; i++) {
1117 for (bd = capabilities[i].mut_lists[g];
1118 bd->link != NULL; bd = bd->link) {
1121 bd->link = generations[g].mut_list;
1122 generations[g].mut_list = capabilities[i].mut_lists[g];
1123 capabilities[i].mut_lists[g] = allocBlock();
1127 /* -----------------------------------------------------------------------------
1128 Initialise a gc_thread before GC
1129 -------------------------------------------------------------------------- */
1132 init_gc_thread (gc_thread *t)
1135 t->failed_to_evac = rtsFalse;
1136 t->eager_promotion = rtsTrue;
1137 t->thunk_selector_depth = 0;
1140 /* -----------------------------------------------------------------------------
1141 Function we pass to GetRoots to evacuate roots.
1142 -------------------------------------------------------------------------- */
1145 mark_root(StgClosure **root)
1147 *root = evacuate(*root);
1150 /* -----------------------------------------------------------------------------
1151 Initialising the static object & mutable lists
1152 -------------------------------------------------------------------------- */
1155 zero_static_object_list(StgClosure* first_static)
1159 const StgInfoTable *info;
1161 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1163 link = *STATIC_LINK(info, p);
1164 *STATIC_LINK(info,p) = NULL;
1168 /* -----------------------------------------------------------------------------
1170 -------------------------------------------------------------------------- */
1177 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1178 c = (StgIndStatic *)c->static_link)
1180 SET_INFO(c, c->saved_info);
1181 c->saved_info = NULL;
1182 // could, but not necessary: c->static_link = NULL;
1184 revertible_caf_list = NULL;
1188 markCAFs( evac_fn evac )
1192 for (c = (StgIndStatic *)caf_list; c != NULL;
1193 c = (StgIndStatic *)c->static_link)
1195 evac(&c->indirectee);
1197 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1198 c = (StgIndStatic *)c->static_link)
1200 evac(&c->indirectee);
1204 /* ----------------------------------------------------------------------------
1205 Update the pointers from the task list
1207 These are treated as weak pointers because we want to allow a main
1208 thread to get a BlockedOnDeadMVar exception in the same way as any
1209 other thread. Note that the threads should all have been retained
1210 by GC by virtue of being on the all_threads list, we're just
1211 updating pointers here.
1212 ------------------------------------------------------------------------- */
1215 update_task_list (void)
1219 for (task = all_tasks; task != NULL; task = task->all_link) {
1220 if (!task->stopped && task->tso) {
1221 ASSERT(task->tso->bound == task);
1222 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
1224 barf("task %p: main thread %d has been GC'd",
1237 /* ----------------------------------------------------------------------------
1238 Reset the sizes of the older generations when we do a major
1241 CURRENT STRATEGY: make all generations except zero the same size.
1242 We have to stay within the maximum heap size, and leave a certain
1243 percentage of the maximum heap size available to allocate into.
1244 ------------------------------------------------------------------------- */
1247 resize_generations (void)
1251 if (major_gc && RtsFlags.GcFlags.generations > 1) {
1252 nat live, size, min_alloc;
1253 nat max = RtsFlags.GcFlags.maxHeapSize;
1254 nat gens = RtsFlags.GcFlags.generations;
1256 // live in the oldest generations
1257 live = oldest_gen->steps[0].n_blocks +
1258 oldest_gen->steps[0].n_large_blocks;
1260 // default max size for all generations except zero
1261 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
1262 RtsFlags.GcFlags.minOldGenSize);
1264 // minimum size for generation zero
1265 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
1266 RtsFlags.GcFlags.minAllocAreaSize);
1268 // Auto-enable compaction when the residency reaches a
1269 // certain percentage of the maximum heap size (default: 30%).
1270 if (RtsFlags.GcFlags.generations > 1 &&
1271 (RtsFlags.GcFlags.compact ||
1273 oldest_gen->steps[0].n_blocks >
1274 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
1275 oldest_gen->steps[0].is_compacted = 1;
1276 // debugBelch("compaction: on\n", live);
1278 oldest_gen->steps[0].is_compacted = 0;
1279 // debugBelch("compaction: off\n", live);
1282 // if we're going to go over the maximum heap size, reduce the
1283 // size of the generations accordingly. The calculation is
1284 // different if compaction is turned on, because we don't need
1285 // to double the space required to collect the old generation.
1288 // this test is necessary to ensure that the calculations
1289 // below don't have any negative results - we're working
1290 // with unsigned values here.
1291 if (max < min_alloc) {
1295 if (oldest_gen->steps[0].is_compacted) {
1296 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
1297 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
1300 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
1301 size = (max - min_alloc) / ((gens - 1) * 2);
1311 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
1312 min_alloc, size, max);
1315 for (g = 0; g < gens; g++) {
1316 generations[g].max_blocks = size;
1321 /* -----------------------------------------------------------------------------
1322 Calculate the new size of the nursery, and resize it.
1323 -------------------------------------------------------------------------- */
1326 resize_nursery (void)
1328 if (RtsFlags.GcFlags.generations == 1)
1329 { // Two-space collector:
1332 /* set up a new nursery. Allocate a nursery size based on a
1333 * function of the amount of live data (by default a factor of 2)
1334 * Use the blocks from the old nursery if possible, freeing up any
1337 * If we get near the maximum heap size, then adjust our nursery
1338 * size accordingly. If the nursery is the same size as the live
1339 * data (L), then we need 3L bytes. We can reduce the size of the
1340 * nursery to bring the required memory down near 2L bytes.
1342 * A normal 2-space collector would need 4L bytes to give the same
1343 * performance we get from 3L bytes, reducing to the same
1344 * performance at 2L bytes.
1346 blocks = g0s0->n_old_blocks;
1348 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1349 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1350 RtsFlags.GcFlags.maxHeapSize )
1352 long adjusted_blocks; // signed on purpose
1355 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1357 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
1358 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
1360 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1361 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even * be < 0 */
1365 blocks = adjusted_blocks;
1369 blocks *= RtsFlags.GcFlags.oldGenFactor;
1370 if (blocks < RtsFlags.GcFlags.minAllocAreaSize)
1372 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1375 resizeNurseries(blocks);
1377 else // Generational collector
1380 * If the user has given us a suggested heap size, adjust our
1381 * allocation area to make best use of the memory available.
1383 if (RtsFlags.GcFlags.heapSizeSuggestion)
1386 nat needed = calcNeeded(); // approx blocks needed at next GC
1388 /* Guess how much will be live in generation 0 step 0 next time.
1389 * A good approximation is obtained by finding the
1390 * percentage of g0s0 that was live at the last minor GC.
1392 * We have an accurate figure for the amount of copied data in
1393 * 'copied', but we must convert this to a number of blocks, with
1394 * a small adjustment for estimated slop at the end of a block
1399 g0s0_pcnt_kept = ((copied / (BLOCK_SIZE_W - 10)) * 100)
1400 / countNurseryBlocks();
1403 /* Estimate a size for the allocation area based on the
1404 * information available. We might end up going slightly under
1405 * or over the suggested heap size, but we should be pretty
1408 * Formula: suggested - needed
1409 * ----------------------------
1410 * 1 + g0s0_pcnt_kept/100
1412 * where 'needed' is the amount of memory needed at the next
1413 * collection for collecting all steps except g0s0.
1416 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1417 (100 + (long)g0s0_pcnt_kept);
1419 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1420 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1423 resizeNurseries((nat)blocks);
1427 // we might have added extra large blocks to the nursery, so
1428 // resize back to minAllocAreaSize again.
1429 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1434 /* -----------------------------------------------------------------------------
1435 Sanity code for CAF garbage collection.
1437 With DEBUG turned on, we manage a CAF list in addition to the SRT
1438 mechanism. After GC, we run down the CAF list and blackhole any
1439 CAFs which have been garbage collected. This means we get an error
1440 whenever the program tries to enter a garbage collected CAF.
1442 Any garbage collected CAFs are taken off the CAF list at the same
1444 -------------------------------------------------------------------------- */
1446 #if 0 && defined(DEBUG)
1453 const StgInfoTable *info;
1464 ASSERT(info->type == IND_STATIC);
1466 if (STATIC_LINK(info,p) == NULL) {
1467 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1469 SET_INFO(p,&stg_BLACKHOLE_info);
1470 p = STATIC_LINK2(info,p);
1474 pp = &STATIC_LINK2(info,p);
1481 debugTrace(DEBUG_gccafs, "%d CAFs live", i);