1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
19 #include "OSThreads.h"
20 #include "LdvProfile.h"
25 #include "BlockAlloc.h"
31 #include "ParTicky.h" // ToDo: move into Rts.h
32 #include "RtsSignals.h"
36 #if defined(RTS_GTK_FRONTPANEL)
37 #include "FrontPanel.h"
40 #include "RetainerProfile.h"
41 #include "RaiseAsync.h"
50 #include <string.h> // for memset()
52 /* STATIC OBJECT LIST.
55 * We maintain a linked list of static objects that are still live.
56 * The requirements for this list are:
58 * - we need to scan the list while adding to it, in order to
59 * scavenge all the static objects (in the same way that
60 * breadth-first scavenging works for dynamic objects).
62 * - we need to be able to tell whether an object is already on
63 * the list, to break loops.
65 * Each static object has a "static link field", which we use for
66 * linking objects on to the list. We use a stack-type list, consing
67 * objects on the front as they are added (this means that the
68 * scavenge phase is depth-first, not breadth-first, but that
71 * A separate list is kept for objects that have been scavenged
72 * already - this is so that we can zero all the marks afterwards.
74 * An object is on the list if its static link field is non-zero; this
75 * means that we have to mark the end of the list with '1', not NULL.
77 * Extra notes for generational GC:
79 * Each generation has a static object list associated with it. When
80 * collecting generations up to N, we treat the static object lists
81 * from generations > N as roots.
83 * We build up a static object list while collecting generations 0..N,
84 * which is then appended to the static object list of generation N+1.
86 StgClosure* static_objects; // live static objects
87 StgClosure* scavenged_static_objects; // static objects scavenged so far
89 /* N is the oldest generation being collected, where the generations
90 * are numbered starting at 0. A major GC (indicated by the major_gc
91 * flag) is when we're collecting all generations. We only attempt to
92 * deal with static objects and GC CAFs when doing a major GC.
97 /* Youngest generation that objects should be evacuated to in
98 * evacuate(). (Logically an argument to evacuate, but it's static
99 * a lot of the time so we optimise it into a global variable).
103 /* Whether to do eager promotion or not.
105 rtsBool eager_promotion;
107 /* Flag indicating failure to evacuate an object to the desired
110 rtsBool failed_to_evac;
112 /* Saved nursery (used for 2-space collector only)
114 static bdescr *saved_nursery;
115 static nat saved_n_blocks;
117 /* Data used for allocation area sizing.
119 lnat new_blocks; // blocks allocated during this GC
120 lnat new_scavd_blocks; // ditto, but depth-first blocks
121 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
130 /* -----------------------------------------------------------------------------
131 Static function declarations
132 -------------------------------------------------------------------------- */
134 static void mark_root ( StgClosure **root );
136 static void zero_static_object_list ( StgClosure* first_static );
138 #if 0 && defined(DEBUG)
139 static void gcCAFs ( void );
142 /* -----------------------------------------------------------------------------
143 inline functions etc. for dealing with the mark bitmap & stack.
144 -------------------------------------------------------------------------- */
146 #define MARK_STACK_BLOCKS 4
148 bdescr *mark_stack_bdescr;
153 // Flag and pointers used for falling back to a linear scan when the
154 // mark stack overflows.
155 rtsBool mark_stack_overflowed;
156 bdescr *oldgen_scan_bd;
159 /* -----------------------------------------------------------------------------
162 Rough outline of the algorithm: for garbage collecting generation N
163 (and all younger generations):
165 - follow all pointers in the root set. the root set includes all
166 mutable objects in all generations (mutable_list).
168 - for each pointer, evacuate the object it points to into either
170 + to-space of the step given by step->to, which is the next
171 highest step in this generation or the first step in the next
172 generation if this is the last step.
174 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
175 When we evacuate an object we attempt to evacuate
176 everything it points to into the same generation - this is
177 achieved by setting evac_gen to the desired generation. If
178 we can't do this, then an entry in the mut list has to
179 be made for the cross-generation pointer.
181 + if the object is already in a generation > N, then leave
184 - repeatedly scavenge to-space from each step in each generation
185 being collected until no more objects can be evacuated.
187 - free from-space in each step, and set from-space = to-space.
189 Locks held: all capabilities are held throughout GarbageCollect().
191 -------------------------------------------------------------------------- */
194 GarbageCollect ( rtsBool force_major_gc )
198 lnat live, allocated, copied = 0, scavd_copied = 0;
199 lnat oldgen_saved_blocks = 0;
205 CostCentreStack *prev_CCS;
208 debugTrace(DEBUG_gc, "starting GC");
210 #if defined(RTS_USER_SIGNALS)
215 // tell the STM to discard any cached closures its hoping to re-use
218 // tell the stats department that we've started a GC
222 // check for memory leaks if DEBUG is on
232 // attribute any costs to CCS_GC
238 /* Approximate how much we allocated.
239 * Todo: only when generating stats?
241 allocated = calcAllocated();
243 /* Figure out which generation to collect
245 if (force_major_gc) {
246 N = RtsFlags.GcFlags.generations - 1;
250 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
251 if (generations[g].steps[0].n_blocks +
252 generations[g].steps[0].n_large_blocks
253 >= generations[g].max_blocks) {
257 major_gc = (N == RtsFlags.GcFlags.generations-1);
260 #ifdef RTS_GTK_FRONTPANEL
261 if (RtsFlags.GcFlags.frontpanel) {
262 updateFrontPanelBeforeGC(N);
266 // check stack sanity *before* GC (ToDo: check all threads)
267 IF_DEBUG(sanity, checkFreeListSanity());
269 /* Initialise the static object lists
271 static_objects = END_OF_STATIC_LIST;
272 scavenged_static_objects = END_OF_STATIC_LIST;
274 /* Save the nursery if we're doing a two-space collection.
275 * g0s0->blocks will be used for to-space, so we need to get the
276 * nursery out of the way.
278 if (RtsFlags.GcFlags.generations == 1) {
279 saved_nursery = g0s0->blocks;
280 saved_n_blocks = g0s0->n_blocks;
285 /* Keep a count of how many new blocks we allocated during this GC
286 * (used for resizing the allocation area, later).
289 new_scavd_blocks = 0;
291 // Initialise to-space in all the generations/steps that we're
294 for (g = 0; g <= N; g++) {
296 // throw away the mutable list. Invariant: the mutable list
297 // always has at least one block; this means we can avoid a check for
298 // NULL in recordMutable().
300 freeChain(generations[g].mut_list);
301 generations[g].mut_list = allocBlock();
302 for (i = 0; i < n_capabilities; i++) {
303 freeChain(capabilities[i].mut_lists[g]);
304 capabilities[i].mut_lists[g] = allocBlock();
308 for (s = 0; s < generations[g].n_steps; s++) {
310 // generation 0, step 0 doesn't need to-space
311 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
315 stp = &generations[g].steps[s];
316 ASSERT(stp->gen_no == g);
318 // start a new to-space for this step.
319 stp->old_blocks = stp->blocks;
320 stp->n_old_blocks = stp->n_blocks;
322 // allocate the first to-space block; extra blocks will be
323 // chained on as necessary.
325 bd = gc_alloc_block(stp);
328 stp->scan = bd->start;
331 // allocate a block for "already scavenged" objects. This goes
332 // on the front of the stp->blocks list, so it won't be
333 // traversed by the scavenging sweep.
334 gc_alloc_scavd_block(stp);
336 // initialise the large object queues.
337 stp->new_large_objects = NULL;
338 stp->scavenged_large_objects = NULL;
339 stp->n_scavenged_large_blocks = 0;
341 // mark the large objects as not evacuated yet
342 for (bd = stp->large_objects; bd; bd = bd->link) {
343 bd->flags &= ~BF_EVACUATED;
346 // for a compacted step, we need to allocate the bitmap
347 if (stp->is_compacted) {
348 nat bitmap_size; // in bytes
349 bdescr *bitmap_bdescr;
352 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
354 if (bitmap_size > 0) {
355 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
357 stp->bitmap = bitmap_bdescr;
358 bitmap = bitmap_bdescr->start;
360 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
361 bitmap_size, bitmap);
363 // don't forget to fill it with zeros!
364 memset(bitmap, 0, bitmap_size);
366 // For each block in this step, point to its bitmap from the
368 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
369 bd->u.bitmap = bitmap;
370 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
372 // Also at this point we set the BF_COMPACTED flag
373 // for this block. The invariant is that
374 // BF_COMPACTED is always unset, except during GC
375 // when it is set on those blocks which will be
377 bd->flags |= BF_COMPACTED;
384 /* make sure the older generations have at least one block to
385 * allocate into (this makes things easier for copy(), see below).
387 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
388 for (s = 0; s < generations[g].n_steps; s++) {
389 stp = &generations[g].steps[s];
390 if (stp->hp_bd == NULL) {
391 ASSERT(stp->blocks == NULL);
392 bd = gc_alloc_block(stp);
396 if (stp->scavd_hp == NULL) {
397 gc_alloc_scavd_block(stp);
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->new_large_objects = NULL;
405 stp->scavenged_large_objects = NULL;
406 stp->n_scavenged_large_blocks = 0;
409 /* Move the private mutable lists from each capability onto the
410 * main mutable list for the generation.
412 for (i = 0; i < n_capabilities; i++) {
413 for (bd = capabilities[i].mut_lists[g];
414 bd->link != NULL; bd = bd->link) {
417 bd->link = generations[g].mut_list;
418 generations[g].mut_list = capabilities[i].mut_lists[g];
419 capabilities[i].mut_lists[g] = allocBlock();
423 /* Allocate a mark stack if we're doing a major collection.
426 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
427 mark_stack = (StgPtr *)mark_stack_bdescr->start;
428 mark_sp = mark_stack;
429 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
431 mark_stack_bdescr = NULL;
434 eager_promotion = rtsTrue; // for now
436 /* -----------------------------------------------------------------------
437 * follow all the roots that we know about:
438 * - mutable lists from each generation > N
439 * we want to *scavenge* these roots, not evacuate them: they're not
440 * going to move in this GC.
441 * Also: do them in reverse generation order. This is because we
442 * often want to promote objects that are pointed to by older
443 * generations early, so we don't have to repeatedly copy them.
444 * Doing the generations in reverse order ensures that we don't end
445 * up in the situation where we want to evac an object to gen 3 and
446 * it has already been evaced to gen 2.
450 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
451 generations[g].saved_mut_list = generations[g].mut_list;
452 generations[g].mut_list = allocBlock();
453 // mut_list always has at least one block.
456 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
457 scavenge_mutable_list(&generations[g]);
459 for (st = generations[g].n_steps-1; st >= 0; st--) {
460 scavenge(&generations[g].steps[st]);
465 /* follow roots from the CAF list (used by GHCi)
470 /* follow all the roots that the application knows about.
475 /* Mark the weak pointer list, and prepare to detect dead weak
481 /* Mark the stable pointer table.
483 markStablePtrTable(mark_root);
485 /* -------------------------------------------------------------------------
486 * Repeatedly scavenge all the areas we know about until there's no
487 * more scavenging to be done.
494 // scavenge static objects
495 if (major_gc && static_objects != END_OF_STATIC_LIST) {
496 IF_DEBUG(sanity, checkStaticObjects(static_objects));
500 /* When scavenging the older generations: Objects may have been
501 * evacuated from generations <= N into older generations, and we
502 * need to scavenge these objects. We're going to try to ensure that
503 * any evacuations that occur move the objects into at least the
504 * same generation as the object being scavenged, otherwise we
505 * have to create new entries on the mutable list for the older
509 // scavenge each step in generations 0..maxgen
515 // scavenge objects in compacted generation
516 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
517 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
518 scavenge_mark_stack();
522 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
523 for (st = generations[gen].n_steps; --st >= 0; ) {
524 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
527 stp = &generations[gen].steps[st];
529 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
534 if (stp->new_large_objects != NULL) {
543 // if any blackholes are alive, make the threads that wait on
545 if (traverseBlackholeQueue())
548 if (flag) { goto loop; }
550 // must be last... invariant is that everything is fully
551 // scavenged at this point.
552 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
557 /* Update the pointers from the task list - these are
558 * treated as weak pointers because we want to allow a main thread
559 * to get a BlockedOnDeadMVar exception in the same way as any other
560 * thread. Note that the threads should all have been retained by
561 * GC by virtue of being on the all_threads list, we're just
562 * updating pointers here.
567 for (task = all_tasks; task != NULL; task = task->all_link) {
568 if (!task->stopped && task->tso) {
569 ASSERT(task->tso->bound == task);
570 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
572 barf("task %p: main thread %d has been GC'd",
585 // Now see which stable names are still alive.
588 // Tidy the end of the to-space chains
589 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
590 for (s = 0; s < generations[g].n_steps; s++) {
591 stp = &generations[g].steps[s];
592 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
593 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
594 stp->hp_bd->free = stp->hp;
595 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
601 // We call processHeapClosureForDead() on every closure destroyed during
602 // the current garbage collection, so we invoke LdvCensusForDead().
603 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
604 || RtsFlags.ProfFlags.bioSelector != NULL)
608 // NO MORE EVACUATION AFTER THIS POINT!
609 // Finally: compaction of the oldest generation.
610 if (major_gc && oldest_gen->steps[0].is_compacted) {
611 // save number of blocks for stats
612 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
616 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
618 /* run through all the generations/steps and tidy up
620 copied = new_blocks * BLOCK_SIZE_W;
621 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
622 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
625 generations[g].collections++; // for stats
628 // Count the mutable list as bytes "copied" for the purposes of
629 // stats. Every mutable list is copied during every GC.
631 nat mut_list_size = 0;
632 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
633 mut_list_size += bd->free - bd->start;
635 copied += mut_list_size;
638 "mut_list_size: %lu (%d vars, %d arrays, %d others)",
639 (unsigned long)(mut_list_size * sizeof(W_)),
640 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
643 for (s = 0; s < generations[g].n_steps; s++) {
645 stp = &generations[g].steps[s];
647 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
648 // stats information: how much we copied
650 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
652 scavd_copied -= stp->scavd_hpLim - stp->scavd_hp;
656 // for generations we collected...
659 /* free old memory and shift to-space into from-space for all
660 * the collected steps (except the allocation area). These
661 * freed blocks will probaby be quickly recycled.
663 if (!(g == 0 && s == 0)) {
664 if (stp->is_compacted) {
665 // for a compacted step, just shift the new to-space
666 // onto the front of the now-compacted existing blocks.
667 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
668 bd->flags &= ~BF_EVACUATED; // now from-space
670 // tack the new blocks on the end of the existing blocks
671 if (stp->old_blocks != NULL) {
672 for (bd = stp->old_blocks; bd != NULL; bd = next) {
673 // NB. this step might not be compacted next
674 // time, so reset the BF_COMPACTED flags.
675 // They are set before GC if we're going to
676 // compact. (search for BF_COMPACTED above).
677 bd->flags &= ~BF_COMPACTED;
680 bd->link = stp->blocks;
683 stp->blocks = stp->old_blocks;
685 // add the new blocks to the block tally
686 stp->n_blocks += stp->n_old_blocks;
687 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
689 freeChain(stp->old_blocks);
690 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
691 bd->flags &= ~BF_EVACUATED; // now from-space
694 stp->old_blocks = NULL;
695 stp->n_old_blocks = 0;
698 /* LARGE OBJECTS. The current live large objects are chained on
699 * scavenged_large, having been moved during garbage
700 * collection from large_objects. Any objects left on
701 * large_objects list are therefore dead, so we free them here.
703 for (bd = stp->large_objects; bd != NULL; bd = next) {
709 // update the count of blocks used by large objects
710 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
711 bd->flags &= ~BF_EVACUATED;
713 stp->large_objects = stp->scavenged_large_objects;
714 stp->n_large_blocks = stp->n_scavenged_large_blocks;
717 // for older generations...
719 /* For older generations, we need to append the
720 * scavenged_large_object list (i.e. large objects that have been
721 * promoted during this GC) to the large_object list for that step.
723 for (bd = stp->scavenged_large_objects; bd; bd = next) {
725 bd->flags &= ~BF_EVACUATED;
726 dbl_link_onto(bd, &stp->large_objects);
729 // add the new blocks we promoted during this GC
730 stp->n_large_blocks += stp->n_scavenged_large_blocks;
735 /* Reset the sizes of the older generations when we do a major
738 * CURRENT STRATEGY: make all generations except zero the same size.
739 * We have to stay within the maximum heap size, and leave a certain
740 * percentage of the maximum heap size available to allocate into.
742 if (major_gc && RtsFlags.GcFlags.generations > 1) {
743 nat live, size, min_alloc;
744 nat max = RtsFlags.GcFlags.maxHeapSize;
745 nat gens = RtsFlags.GcFlags.generations;
747 // live in the oldest generations
748 live = oldest_gen->steps[0].n_blocks +
749 oldest_gen->steps[0].n_large_blocks;
751 // default max size for all generations except zero
752 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
753 RtsFlags.GcFlags.minOldGenSize);
755 // minimum size for generation zero
756 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
757 RtsFlags.GcFlags.minAllocAreaSize);
759 // Auto-enable compaction when the residency reaches a
760 // certain percentage of the maximum heap size (default: 30%).
761 if (RtsFlags.GcFlags.generations > 1 &&
762 (RtsFlags.GcFlags.compact ||
764 oldest_gen->steps[0].n_blocks >
765 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
766 oldest_gen->steps[0].is_compacted = 1;
767 // debugBelch("compaction: on\n", live);
769 oldest_gen->steps[0].is_compacted = 0;
770 // debugBelch("compaction: off\n", live);
773 // if we're going to go over the maximum heap size, reduce the
774 // size of the generations accordingly. The calculation is
775 // different if compaction is turned on, because we don't need
776 // to double the space required to collect the old generation.
779 // this test is necessary to ensure that the calculations
780 // below don't have any negative results - we're working
781 // with unsigned values here.
782 if (max < min_alloc) {
786 if (oldest_gen->steps[0].is_compacted) {
787 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
788 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
791 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
792 size = (max - min_alloc) / ((gens - 1) * 2);
802 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
803 min_alloc, size, max);
806 for (g = 0; g < gens; g++) {
807 generations[g].max_blocks = size;
811 // Guess the amount of live data for stats.
814 /* Free the small objects allocated via allocate(), since this will
815 * all have been copied into G0S1 now.
817 if (small_alloc_list != NULL) {
818 freeChain(small_alloc_list);
820 small_alloc_list = NULL;
824 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
826 // Start a new pinned_object_block
827 pinned_object_block = NULL;
829 /* Free the mark stack.
831 if (mark_stack_bdescr != NULL) {
832 freeGroup(mark_stack_bdescr);
837 for (g = 0; g <= N; g++) {
838 for (s = 0; s < generations[g].n_steps; s++) {
839 stp = &generations[g].steps[s];
840 if (stp->bitmap != NULL) {
841 freeGroup(stp->bitmap);
847 /* Two-space collector:
848 * Free the old to-space, and estimate the amount of live data.
850 if (RtsFlags.GcFlags.generations == 1) {
853 if (g0s0->old_blocks != NULL) {
854 freeChain(g0s0->old_blocks);
856 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
857 bd->flags = 0; // now from-space
859 g0s0->old_blocks = g0s0->blocks;
860 g0s0->n_old_blocks = g0s0->n_blocks;
861 g0s0->blocks = saved_nursery;
862 g0s0->n_blocks = saved_n_blocks;
864 /* For a two-space collector, we need to resize the nursery. */
866 /* set up a new nursery. Allocate a nursery size based on a
867 * function of the amount of live data (by default a factor of 2)
868 * Use the blocks from the old nursery if possible, freeing up any
871 * If we get near the maximum heap size, then adjust our nursery
872 * size accordingly. If the nursery is the same size as the live
873 * data (L), then we need 3L bytes. We can reduce the size of the
874 * nursery to bring the required memory down near 2L bytes.
876 * A normal 2-space collector would need 4L bytes to give the same
877 * performance we get from 3L bytes, reducing to the same
878 * performance at 2L bytes.
880 blocks = g0s0->n_old_blocks;
882 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
883 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
884 RtsFlags.GcFlags.maxHeapSize ) {
885 long adjusted_blocks; // signed on purpose
888 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
890 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
891 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
893 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
894 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
897 blocks = adjusted_blocks;
900 blocks *= RtsFlags.GcFlags.oldGenFactor;
901 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
902 blocks = RtsFlags.GcFlags.minAllocAreaSize;
905 resizeNurseries(blocks);
908 /* Generational collector:
909 * If the user has given us a suggested heap size, adjust our
910 * allocation area to make best use of the memory available.
913 if (RtsFlags.GcFlags.heapSizeSuggestion) {
915 nat needed = calcNeeded(); // approx blocks needed at next GC
917 /* Guess how much will be live in generation 0 step 0 next time.
918 * A good approximation is obtained by finding the
919 * percentage of g0s0 that was live at the last minor GC.
922 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
925 /* Estimate a size for the allocation area based on the
926 * information available. We might end up going slightly under
927 * or over the suggested heap size, but we should be pretty
930 * Formula: suggested - needed
931 * ----------------------------
932 * 1 + g0s0_pcnt_kept/100
934 * where 'needed' is the amount of memory needed at the next
935 * collection for collecting all steps except g0s0.
938 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
939 (100 + (long)g0s0_pcnt_kept);
941 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
942 blocks = RtsFlags.GcFlags.minAllocAreaSize;
945 resizeNurseries((nat)blocks);
948 // we might have added extra large blocks to the nursery, so
949 // resize back to minAllocAreaSize again.
950 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
954 // mark the garbage collected CAFs as dead
955 #if 0 && defined(DEBUG) // doesn't work at the moment
956 if (major_gc) { gcCAFs(); }
960 // resetStaticObjectForRetainerProfiling() must be called before
962 resetStaticObjectForRetainerProfiling();
965 // zero the scavenged static object list
967 zero_static_object_list(scavenged_static_objects);
973 // start any pending finalizers
975 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
978 // send exceptions to any threads which were about to die
980 resurrectThreads(resurrected_threads);
983 // Update the stable pointer hash table.
984 updateStablePtrTable(major_gc);
986 // check sanity after GC
987 IF_DEBUG(sanity, checkSanity());
989 // extra GC trace info
990 IF_DEBUG(gc, statDescribeGens());
993 // symbol-table based profiling
994 /* heapCensus(to_blocks); */ /* ToDo */
997 // restore enclosing cost centre
1003 // check for memory leaks if DEBUG is on
1007 #ifdef RTS_GTK_FRONTPANEL
1008 if (RtsFlags.GcFlags.frontpanel) {
1009 updateFrontPanelAfterGC( N, live );
1013 // ok, GC over: tell the stats department what happened.
1014 stat_endGC(allocated, live, copied, scavd_copied, N);
1016 #if defined(RTS_USER_SIGNALS)
1017 // unblock signals again
1018 unblockUserSignals();
1024 /* -----------------------------------------------------------------------------
1025 isAlive determines whether the given closure is still alive (after
1026 a garbage collection) or not. It returns the new address of the
1027 closure if it is alive, or NULL otherwise.
1029 NOTE: Use it before compaction only!
1030 -------------------------------------------------------------------------- */
1034 isAlive(StgClosure *p)
1036 const StgInfoTable *info;
1041 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1044 // ignore static closures
1046 // ToDo: for static closures, check the static link field.
1047 // Problem here is that we sometimes don't set the link field, eg.
1048 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1050 if (!HEAP_ALLOCED(p)) {
1054 // ignore closures in generations that we're not collecting.
1056 if (bd->gen_no > N) {
1060 // if it's a pointer into to-space, then we're done
1061 if (bd->flags & BF_EVACUATED) {
1065 // large objects use the evacuated flag
1066 if (bd->flags & BF_LARGE) {
1070 // check the mark bit for compacted steps
1071 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1075 switch (info->type) {
1080 case IND_OLDGEN: // rely on compatible layout with StgInd
1081 case IND_OLDGEN_PERM:
1082 // follow indirections
1083 p = ((StgInd *)p)->indirectee;
1088 return ((StgEvacuated *)p)->evacuee;
1091 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1092 p = (StgClosure *)((StgTSO *)p)->link;
1105 mark_root(StgClosure **root)
1107 *root = evacuate(*root);
1110 /* -----------------------------------------------------------------------------
1111 Initialising the static object & mutable lists
1112 -------------------------------------------------------------------------- */
1115 zero_static_object_list(StgClosure* first_static)
1119 const StgInfoTable *info;
1121 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1123 link = *STATIC_LINK(info, p);
1124 *STATIC_LINK(info,p) = NULL;
1128 /* -----------------------------------------------------------------------------
1130 -------------------------------------------------------------------------- */
1137 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1138 c = (StgIndStatic *)c->static_link)
1140 SET_INFO(c, c->saved_info);
1141 c->saved_info = NULL;
1142 // could, but not necessary: c->static_link = NULL;
1144 revertible_caf_list = NULL;
1148 markCAFs( evac_fn evac )
1152 for (c = (StgIndStatic *)caf_list; c != NULL;
1153 c = (StgIndStatic *)c->static_link)
1155 evac(&c->indirectee);
1157 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1158 c = (StgIndStatic *)c->static_link)
1160 evac(&c->indirectee);
1164 /* -----------------------------------------------------------------------------
1165 Sanity code for CAF garbage collection.
1167 With DEBUG turned on, we manage a CAF list in addition to the SRT
1168 mechanism. After GC, we run down the CAF list and blackhole any
1169 CAFs which have been garbage collected. This means we get an error
1170 whenever the program tries to enter a garbage collected CAF.
1172 Any garbage collected CAFs are taken off the CAF list at the same
1174 -------------------------------------------------------------------------- */
1176 #if 0 && defined(DEBUG)
1183 const StgInfoTable *info;
1194 ASSERT(info->type == IND_STATIC);
1196 if (STATIC_LINK(info,p) == NULL) {
1197 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1199 SET_INFO(p,&stg_BLACKHOLE_info);
1200 p = STATIC_LINK2(info,p);
1204 pp = &STATIC_LINK2(info,p);
1211 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
1215 /* -----------------------------------------------------------------------------
1217 * -------------------------------------------------------------------------- */
1221 printMutableList(generation *gen)
1226 debugBelch("mutable list %p: ", gen->mut_list);
1228 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
1229 for (p = bd->start; p < bd->free; p++) {
1230 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));