1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
19 #include "OSThreads.h"
20 #include "LdvProfile.h"
25 #include "BlockAlloc.h"
31 #include "ParTicky.h" // ToDo: move into Rts.h
32 #include "RtsSignals.h"
36 #if defined(RTS_GTK_FRONTPANEL)
37 #include "FrontPanel.h"
40 #include "RetainerProfile.h"
41 #include "RaiseAsync.h"
50 #include <string.h> // for memset()
52 /* STATIC OBJECT LIST.
55 * We maintain a linked list of static objects that are still live.
56 * The requirements for this list are:
58 * - we need to scan the list while adding to it, in order to
59 * scavenge all the static objects (in the same way that
60 * breadth-first scavenging works for dynamic objects).
62 * - we need to be able to tell whether an object is already on
63 * the list, to break loops.
65 * Each static object has a "static link field", which we use for
66 * linking objects on to the list. We use a stack-type list, consing
67 * objects on the front as they are added (this means that the
68 * scavenge phase is depth-first, not breadth-first, but that
71 * A separate list is kept for objects that have been scavenged
72 * already - this is so that we can zero all the marks afterwards.
74 * An object is on the list if its static link field is non-zero; this
75 * means that we have to mark the end of the list with '1', not NULL.
77 * Extra notes for generational GC:
79 * Each generation has a static object list associated with it. When
80 * collecting generations up to N, we treat the static object lists
81 * from generations > N as roots.
83 * We build up a static object list while collecting generations 0..N,
84 * which is then appended to the static object list of generation N+1.
86 StgClosure* static_objects; // live static objects
87 StgClosure* scavenged_static_objects; // static objects scavenged so far
89 /* N is the oldest generation being collected, where the generations
90 * are numbered starting at 0. A major GC (indicated by the major_gc
91 * flag) is when we're collecting all generations. We only attempt to
92 * deal with static objects and GC CAFs when doing a major GC.
97 /* Youngest generation that objects should be evacuated to in
98 * evacuate(). (Logically an argument to evacuate, but it's static
99 * a lot of the time so we optimise it into a global variable).
103 /* Whether to do eager promotion or not.
105 rtsBool eager_promotion;
107 /* Flag indicating failure to evacuate an object to the desired
110 rtsBool failed_to_evac;
112 /* Data used for allocation area sizing.
114 lnat new_blocks; // blocks allocated during this GC
115 lnat new_scavd_blocks; // ditto, but depth-first blocks
116 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
126 /* -----------------------------------------------------------------------------
127 Static function declarations
128 -------------------------------------------------------------------------- */
130 static void mark_root ( StgClosure **root );
132 static void zero_static_object_list ( StgClosure* first_static );
134 #if 0 && defined(DEBUG)
135 static void gcCAFs ( void );
138 /* -----------------------------------------------------------------------------
139 inline functions etc. for dealing with the mark bitmap & stack.
140 -------------------------------------------------------------------------- */
142 #define MARK_STACK_BLOCKS 4
144 bdescr *mark_stack_bdescr;
149 // Flag and pointers used for falling back to a linear scan when the
150 // mark stack overflows.
151 rtsBool mark_stack_overflowed;
152 bdescr *oldgen_scan_bd;
155 /* -----------------------------------------------------------------------------
158 Rough outline of the algorithm: for garbage collecting generation N
159 (and all younger generations):
161 - follow all pointers in the root set. the root set includes all
162 mutable objects in all generations (mutable_list).
164 - for each pointer, evacuate the object it points to into either
166 + to-space of the step given by step->to, which is the next
167 highest step in this generation or the first step in the next
168 generation if this is the last step.
170 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
171 When we evacuate an object we attempt to evacuate
172 everything it points to into the same generation - this is
173 achieved by setting evac_gen to the desired generation. If
174 we can't do this, then an entry in the mut list has to
175 be made for the cross-generation pointer.
177 + if the object is already in a generation > N, then leave
180 - repeatedly scavenge to-space from each step in each generation
181 being collected until no more objects can be evacuated.
183 - free from-space in each step, and set from-space = to-space.
185 Locks held: all capabilities are held throughout GarbageCollect().
187 -------------------------------------------------------------------------- */
190 GarbageCollect ( rtsBool force_major_gc )
194 lnat live, allocated, copied = 0, scavd_copied = 0;
195 lnat oldgen_saved_blocks = 0;
199 CostCentreStack *prev_CCS;
204 debugTrace(DEBUG_gc, "starting GC");
206 #if defined(RTS_USER_SIGNALS)
207 if (RtsFlags.MiscFlags.install_signal_handlers) {
213 // tell the STM to discard any cached closures its hoping to re-use
216 // tell the stats department that we've started a GC
220 // check for memory leaks if DEBUG is on
230 // attribute any costs to CCS_GC
236 /* Approximate how much we allocated.
237 * Todo: only when generating stats?
239 allocated = calcAllocated();
241 /* Figure out which generation to collect
243 if (force_major_gc) {
244 N = RtsFlags.GcFlags.generations - 1;
248 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
249 if (generations[g].steps[0].n_blocks +
250 generations[g].steps[0].n_large_blocks
251 >= generations[g].max_blocks) {
255 major_gc = (N == RtsFlags.GcFlags.generations-1);
258 #ifdef RTS_GTK_FRONTPANEL
259 if (RtsFlags.GcFlags.frontpanel) {
260 updateFrontPanelBeforeGC(N);
264 // check stack sanity *before* GC (ToDo: check all threads)
265 IF_DEBUG(sanity, checkFreeListSanity());
267 /* Initialise the static object lists
269 static_objects = END_OF_STATIC_LIST;
270 scavenged_static_objects = END_OF_STATIC_LIST;
272 /* Keep a count of how many new blocks we allocated during this GC
273 * (used for resizing the allocation area, later).
276 new_scavd_blocks = 0;
278 // Initialise to-space in all the generations/steps that we're
281 for (g = 0; g <= N; g++) {
283 // throw away the mutable list. Invariant: the mutable list
284 // always has at least one block; this means we can avoid a check for
285 // NULL in recordMutable().
287 freeChain(generations[g].mut_list);
288 generations[g].mut_list = allocBlock();
289 for (i = 0; i < n_capabilities; i++) {
290 freeChain(capabilities[i].mut_lists[g]);
291 capabilities[i].mut_lists[g] = allocBlock();
295 for (s = 0; s < generations[g].n_steps; s++) {
297 // generation 0, step 0 doesn't need to-space
298 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
302 stp = &generations[g].steps[s];
303 ASSERT(stp->gen_no == g);
305 // start a new to-space for this step.
306 stp->old_blocks = stp->blocks;
307 stp->n_old_blocks = stp->n_blocks;
309 // allocate the first to-space block; extra blocks will be
310 // chained on as necessary.
312 bd = gc_alloc_block(stp);
315 stp->scan = bd->start;
318 // allocate a block for "already scavenged" objects. This goes
319 // on the front of the stp->blocks list, so it won't be
320 // traversed by the scavenging sweep.
321 gc_alloc_scavd_block(stp);
323 // initialise the large object queues.
324 stp->new_large_objects = NULL;
325 stp->scavenged_large_objects = NULL;
326 stp->n_scavenged_large_blocks = 0;
328 // mark the large objects as not evacuated yet
329 for (bd = stp->large_objects; bd; bd = bd->link) {
330 bd->flags &= ~BF_EVACUATED;
333 // for a compacted step, we need to allocate the bitmap
334 if (stp->is_compacted) {
335 nat bitmap_size; // in bytes
336 bdescr *bitmap_bdescr;
339 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
341 if (bitmap_size > 0) {
342 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
344 stp->bitmap = bitmap_bdescr;
345 bitmap = bitmap_bdescr->start;
347 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
348 bitmap_size, bitmap);
350 // don't forget to fill it with zeros!
351 memset(bitmap, 0, bitmap_size);
353 // For each block in this step, point to its bitmap from the
355 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
356 bd->u.bitmap = bitmap;
357 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
359 // Also at this point we set the BF_COMPACTED flag
360 // for this block. The invariant is that
361 // BF_COMPACTED is always unset, except during GC
362 // when it is set on those blocks which will be
364 bd->flags |= BF_COMPACTED;
371 /* make sure the older generations have at least one block to
372 * allocate into (this makes things easier for copy(), see below).
374 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
375 for (s = 0; s < generations[g].n_steps; s++) {
376 stp = &generations[g].steps[s];
377 if (stp->hp_bd == NULL) {
378 ASSERT(stp->blocks == NULL);
379 bd = gc_alloc_block(stp);
383 if (stp->scavd_hp == NULL) {
384 gc_alloc_scavd_block(stp);
387 /* Set the scan pointer for older generations: remember we
388 * still have to scavenge objects that have been promoted. */
390 stp->scan_bd = stp->hp_bd;
391 stp->new_large_objects = NULL;
392 stp->scavenged_large_objects = NULL;
393 stp->n_scavenged_large_blocks = 0;
396 /* Move the private mutable lists from each capability onto the
397 * main mutable list for the generation.
399 for (i = 0; i < n_capabilities; i++) {
400 for (bd = capabilities[i].mut_lists[g];
401 bd->link != NULL; bd = bd->link) {
404 bd->link = generations[g].mut_list;
405 generations[g].mut_list = capabilities[i].mut_lists[g];
406 capabilities[i].mut_lists[g] = allocBlock();
410 /* Allocate a mark stack if we're doing a major collection.
413 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
414 mark_stack = (StgPtr *)mark_stack_bdescr->start;
415 mark_sp = mark_stack;
416 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
418 mark_stack_bdescr = NULL;
421 eager_promotion = rtsTrue; // for now
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = allocBlock();
440 // mut_list always has at least one block.
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 scavenge_mutable_list(&generations[g]);
446 for (st = generations[g].n_steps-1; st >= 0; st--) {
447 scavenge(&generations[g].steps[st]);
452 /* follow roots from the CAF list (used by GHCi)
457 /* follow all the roots that the application knows about.
462 /* Mark the weak pointer list, and prepare to detect dead weak
468 /* Mark the stable pointer table.
470 markStablePtrTable(mark_root);
472 /* -------------------------------------------------------------------------
473 * Repeatedly scavenge all the areas we know about until there's no
474 * more scavenging to be done.
481 // scavenge static objects
482 if (major_gc && static_objects != END_OF_STATIC_LIST) {
483 IF_DEBUG(sanity, checkStaticObjects(static_objects));
487 /* When scavenging the older generations: Objects may have been
488 * evacuated from generations <= N into older generations, and we
489 * need to scavenge these objects. We're going to try to ensure that
490 * any evacuations that occur move the objects into at least the
491 * same generation as the object being scavenged, otherwise we
492 * have to create new entries on the mutable list for the older
496 // scavenge each step in generations 0..maxgen
502 // scavenge objects in compacted generation
503 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
504 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
505 scavenge_mark_stack();
509 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
510 for (st = generations[gen].n_steps; --st >= 0; ) {
511 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
514 stp = &generations[gen].steps[st];
516 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
521 if (stp->new_large_objects != NULL) {
530 // if any blackholes are alive, make the threads that wait on
532 if (traverseBlackholeQueue())
535 if (flag) { goto loop; }
537 // must be last... invariant is that everything is fully
538 // scavenged at this point.
539 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
544 /* Update the pointers from the task list - these are
545 * treated as weak pointers because we want to allow a main thread
546 * to get a BlockedOnDeadMVar exception in the same way as any other
547 * thread. Note that the threads should all have been retained by
548 * GC by virtue of being on the all_threads list, we're just
549 * updating pointers here.
554 for (task = all_tasks; task != NULL; task = task->all_link) {
555 if (!task->stopped && task->tso) {
556 ASSERT(task->tso->bound == task);
557 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
559 barf("task %p: main thread %d has been GC'd",
572 // Now see which stable names are still alive.
575 // Tidy the end of the to-space chains
576 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
577 for (s = 0; s < generations[g].n_steps; s++) {
578 stp = &generations[g].steps[s];
579 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
580 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
581 stp->hp_bd->free = stp->hp;
582 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
588 // We call processHeapClosureForDead() on every closure destroyed during
589 // the current garbage collection, so we invoke LdvCensusForDead().
590 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
591 || RtsFlags.ProfFlags.bioSelector != NULL)
595 // NO MORE EVACUATION AFTER THIS POINT!
596 // Finally: compaction of the oldest generation.
597 if (major_gc && oldest_gen->steps[0].is_compacted) {
598 // save number of blocks for stats
599 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
603 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
605 /* run through all the generations/steps and tidy up
607 copied = new_blocks * BLOCK_SIZE_W;
608 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
609 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
612 generations[g].collections++; // for stats
615 // Count the mutable list as bytes "copied" for the purposes of
616 // stats. Every mutable list is copied during every GC.
618 nat mut_list_size = 0;
619 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
620 mut_list_size += bd->free - bd->start;
622 copied += mut_list_size;
625 "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
626 (unsigned long)(mut_list_size * sizeof(W_)),
627 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
630 for (s = 0; s < generations[g].n_steps; s++) {
632 stp = &generations[g].steps[s];
634 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
635 // stats information: how much we copied
637 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
639 scavd_copied -= stp->scavd_hpLim - stp->scavd_hp;
643 // for generations we collected...
646 /* free old memory and shift to-space into from-space for all
647 * the collected steps (except the allocation area). These
648 * freed blocks will probaby be quickly recycled.
650 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
651 if (stp->is_compacted) {
652 // for a compacted step, just shift the new to-space
653 // onto the front of the now-compacted existing blocks.
654 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
655 bd->flags &= ~BF_EVACUATED; // now from-space
657 // tack the new blocks on the end of the existing blocks
658 if (stp->old_blocks != NULL) {
659 for (bd = stp->old_blocks; bd != NULL; bd = next) {
660 // NB. this step might not be compacted next
661 // time, so reset the BF_COMPACTED flags.
662 // They are set before GC if we're going to
663 // compact. (search for BF_COMPACTED above).
664 bd->flags &= ~BF_COMPACTED;
667 bd->link = stp->blocks;
670 stp->blocks = stp->old_blocks;
672 // add the new blocks to the block tally
673 stp->n_blocks += stp->n_old_blocks;
674 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
676 freeChain(stp->old_blocks);
677 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
678 bd->flags &= ~BF_EVACUATED; // now from-space
681 stp->old_blocks = NULL;
682 stp->n_old_blocks = 0;
685 /* LARGE OBJECTS. The current live large objects are chained on
686 * scavenged_large, having been moved during garbage
687 * collection from large_objects. Any objects left on
688 * large_objects list are therefore dead, so we free them here.
690 for (bd = stp->large_objects; bd != NULL; bd = next) {
696 // update the count of blocks used by large objects
697 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
698 bd->flags &= ~BF_EVACUATED;
700 stp->large_objects = stp->scavenged_large_objects;
701 stp->n_large_blocks = stp->n_scavenged_large_blocks;
704 // for older generations...
706 /* For older generations, we need to append the
707 * scavenged_large_object list (i.e. large objects that have been
708 * promoted during this GC) to the large_object list for that step.
710 for (bd = stp->scavenged_large_objects; bd; bd = next) {
712 bd->flags &= ~BF_EVACUATED;
713 dbl_link_onto(bd, &stp->large_objects);
716 // add the new blocks we promoted during this GC
717 stp->n_large_blocks += stp->n_scavenged_large_blocks;
722 /* Reset the sizes of the older generations when we do a major
725 * CURRENT STRATEGY: make all generations except zero the same size.
726 * We have to stay within the maximum heap size, and leave a certain
727 * percentage of the maximum heap size available to allocate into.
729 if (major_gc && RtsFlags.GcFlags.generations > 1) {
730 nat live, size, min_alloc;
731 nat max = RtsFlags.GcFlags.maxHeapSize;
732 nat gens = RtsFlags.GcFlags.generations;
734 // live in the oldest generations
735 live = oldest_gen->steps[0].n_blocks +
736 oldest_gen->steps[0].n_large_blocks;
738 // default max size for all generations except zero
739 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
740 RtsFlags.GcFlags.minOldGenSize);
742 // minimum size for generation zero
743 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
744 RtsFlags.GcFlags.minAllocAreaSize);
746 // Auto-enable compaction when the residency reaches a
747 // certain percentage of the maximum heap size (default: 30%).
748 if (RtsFlags.GcFlags.generations > 1 &&
749 (RtsFlags.GcFlags.compact ||
751 oldest_gen->steps[0].n_blocks >
752 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
753 oldest_gen->steps[0].is_compacted = 1;
754 // debugBelch("compaction: on\n", live);
756 oldest_gen->steps[0].is_compacted = 0;
757 // debugBelch("compaction: off\n", live);
760 // if we're going to go over the maximum heap size, reduce the
761 // size of the generations accordingly. The calculation is
762 // different if compaction is turned on, because we don't need
763 // to double the space required to collect the old generation.
766 // this test is necessary to ensure that the calculations
767 // below don't have any negative results - we're working
768 // with unsigned values here.
769 if (max < min_alloc) {
773 if (oldest_gen->steps[0].is_compacted) {
774 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
775 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
778 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
779 size = (max - min_alloc) / ((gens - 1) * 2);
789 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
790 min_alloc, size, max);
793 for (g = 0; g < gens; g++) {
794 generations[g].max_blocks = size;
798 // Guess the amount of live data for stats.
801 /* Free the small objects allocated via allocate(), since this will
802 * all have been copied into G0S1 now.
804 if (RtsFlags.GcFlags.generations > 1) {
805 if (g0s0->blocks != NULL) {
806 freeChain(g0s0->blocks);
812 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
814 // Start a new pinned_object_block
815 pinned_object_block = NULL;
817 /* Free the mark stack.
819 if (mark_stack_bdescr != NULL) {
820 freeGroup(mark_stack_bdescr);
825 for (g = 0; g <= N; g++) {
826 for (s = 0; s < generations[g].n_steps; s++) {
827 stp = &generations[g].steps[s];
828 if (stp->bitmap != NULL) {
829 freeGroup(stp->bitmap);
835 /* Two-space collector:
836 * Free the old to-space, and estimate the amount of live data.
838 if (RtsFlags.GcFlags.generations == 1) {
841 /* For a two-space collector, we need to resize the nursery. */
843 /* set up a new nursery. Allocate a nursery size based on a
844 * function of the amount of live data (by default a factor of 2)
845 * Use the blocks from the old nursery if possible, freeing up any
848 * If we get near the maximum heap size, then adjust our nursery
849 * size accordingly. If the nursery is the same size as the live
850 * data (L), then we need 3L bytes. We can reduce the size of the
851 * nursery to bring the required memory down near 2L bytes.
853 * A normal 2-space collector would need 4L bytes to give the same
854 * performance we get from 3L bytes, reducing to the same
855 * performance at 2L bytes.
857 blocks = g0s0->n_blocks;
859 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
860 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
861 RtsFlags.GcFlags.maxHeapSize ) {
862 long adjusted_blocks; // signed on purpose
865 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
867 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
868 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
870 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
871 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
874 blocks = adjusted_blocks;
877 blocks *= RtsFlags.GcFlags.oldGenFactor;
878 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
879 blocks = RtsFlags.GcFlags.minAllocAreaSize;
882 resizeNurseries(blocks);
885 /* Generational collector:
886 * If the user has given us a suggested heap size, adjust our
887 * allocation area to make best use of the memory available.
890 if (RtsFlags.GcFlags.heapSizeSuggestion) {
892 nat needed = calcNeeded(); // approx blocks needed at next GC
894 /* Guess how much will be live in generation 0 step 0 next time.
895 * A good approximation is obtained by finding the
896 * percentage of g0s0 that was live at the last minor GC.
899 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
902 /* Estimate a size for the allocation area based on the
903 * information available. We might end up going slightly under
904 * or over the suggested heap size, but we should be pretty
907 * Formula: suggested - needed
908 * ----------------------------
909 * 1 + g0s0_pcnt_kept/100
911 * where 'needed' is the amount of memory needed at the next
912 * collection for collecting all steps except g0s0.
915 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
916 (100 + (long)g0s0_pcnt_kept);
918 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
919 blocks = RtsFlags.GcFlags.minAllocAreaSize;
922 resizeNurseries((nat)blocks);
925 // we might have added extra large blocks to the nursery, so
926 // resize back to minAllocAreaSize again.
927 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
931 // mark the garbage collected CAFs as dead
932 #if 0 && defined(DEBUG) // doesn't work at the moment
933 if (major_gc) { gcCAFs(); }
937 // resetStaticObjectForRetainerProfiling() must be called before
939 resetStaticObjectForRetainerProfiling();
942 // zero the scavenged static object list
944 zero_static_object_list(scavenged_static_objects);
950 // start any pending finalizers
952 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
955 // send exceptions to any threads which were about to die
957 resurrectThreads(resurrected_threads);
960 // Update the stable pointer hash table.
961 updateStablePtrTable(major_gc);
963 // check sanity after GC
964 IF_DEBUG(sanity, checkSanity());
966 // extra GC trace info
967 IF_DEBUG(gc, statDescribeGens());
970 // symbol-table based profiling
971 /* heapCensus(to_blocks); */ /* ToDo */
974 // restore enclosing cost centre
980 // check for memory leaks if DEBUG is on
984 #ifdef RTS_GTK_FRONTPANEL
985 if (RtsFlags.GcFlags.frontpanel) {
986 updateFrontPanelAfterGC( N, live );
990 // ok, GC over: tell the stats department what happened.
991 stat_endGC(allocated, live, copied, scavd_copied, N);
993 #if defined(RTS_USER_SIGNALS)
994 if (RtsFlags.MiscFlags.install_signal_handlers) {
995 // unblock signals again
996 unblockUserSignals();
1003 /* -----------------------------------------------------------------------------
1004 isAlive determines whether the given closure is still alive (after
1005 a garbage collection) or not. It returns the new address of the
1006 closure if it is alive, or NULL otherwise.
1008 NOTE: Use it before compaction only!
1009 It untags and (if needed) retags pointers to closures.
1010 -------------------------------------------------------------------------- */
1014 isAlive(StgClosure *p)
1016 const StgInfoTable *info;
1022 /* The tag and the pointer are split, to be merged later when needed. */
1023 tag = GET_CLOSURE_TAG(p);
1024 q = UNTAG_CLOSURE(p);
1026 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1029 // ignore static closures
1031 // ToDo: for static closures, check the static link field.
1032 // Problem here is that we sometimes don't set the link field, eg.
1033 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1035 if (!HEAP_ALLOCED(q)) {
1039 // ignore closures in generations that we're not collecting.
1041 if (bd->gen_no > N) {
1045 // if it's a pointer into to-space, then we're done
1046 if (bd->flags & BF_EVACUATED) {
1050 // large objects use the evacuated flag
1051 if (bd->flags & BF_LARGE) {
1055 // check the mark bit for compacted steps
1056 if ((bd->flags & BF_COMPACTED) && is_marked((P_)q,bd)) {
1060 switch (info->type) {
1065 case IND_OLDGEN: // rely on compatible layout with StgInd
1066 case IND_OLDGEN_PERM:
1067 // follow indirections
1068 p = ((StgInd *)q)->indirectee;
1073 return ((StgEvacuated *)q)->evacuee;
1076 if (((StgTSO *)q)->what_next == ThreadRelocated) {
1077 p = (StgClosure *)((StgTSO *)q)->link;
1090 mark_root(StgClosure **root)
1092 *root = evacuate(*root);
1095 /* -----------------------------------------------------------------------------
1096 Initialising the static object & mutable lists
1097 -------------------------------------------------------------------------- */
1100 zero_static_object_list(StgClosure* first_static)
1104 const StgInfoTable *info;
1106 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1108 link = *STATIC_LINK(info, p);
1109 *STATIC_LINK(info,p) = NULL;
1113 /* -----------------------------------------------------------------------------
1115 -------------------------------------------------------------------------- */
1122 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1123 c = (StgIndStatic *)c->static_link)
1125 SET_INFO(c, c->saved_info);
1126 c->saved_info = NULL;
1127 // could, but not necessary: c->static_link = NULL;
1129 revertible_caf_list = NULL;
1133 markCAFs( evac_fn evac )
1137 for (c = (StgIndStatic *)caf_list; c != NULL;
1138 c = (StgIndStatic *)c->static_link)
1140 evac(&c->indirectee);
1142 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1143 c = (StgIndStatic *)c->static_link)
1145 evac(&c->indirectee);
1149 /* -----------------------------------------------------------------------------
1150 Sanity code for CAF garbage collection.
1152 With DEBUG turned on, we manage a CAF list in addition to the SRT
1153 mechanism. After GC, we run down the CAF list and blackhole any
1154 CAFs which have been garbage collected. This means we get an error
1155 whenever the program tries to enter a garbage collected CAF.
1157 Any garbage collected CAFs are taken off the CAF list at the same
1159 -------------------------------------------------------------------------- */
1161 #if 0 && defined(DEBUG)
1168 const StgInfoTable *info;
1179 ASSERT(info->type == IND_STATIC);
1181 if (STATIC_LINK(info,p) == NULL) {
1182 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1184 SET_INFO(p,&stg_BLACKHOLE_info);
1185 p = STATIC_LINK2(info,p);
1189 pp = &STATIC_LINK2(info,p);
1196 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
1200 /* -----------------------------------------------------------------------------
1202 * -------------------------------------------------------------------------- */
1206 printMutableList(generation *gen)
1211 debugBelch("mutable list %p: ", gen->mut_list);
1213 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
1214 for (p = bd->start; p < bd->free; p++) {
1215 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));