1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
19 #include "OSThreads.h"
22 #include "LdvProfile.h"
27 #include "BlockAlloc.h"
33 #include "ParTicky.h" // ToDo: move into Rts.h
34 #include "RtsSignals.h"
38 #if defined(RTS_GTK_FRONTPANEL)
39 #include "FrontPanel.h"
42 #include "RetainerProfile.h"
43 #include "RaiseAsync.h"
52 #include <string.h> // for memset()
54 /* STATIC OBJECT LIST.
57 * We maintain a linked list of static objects that are still live.
58 * The requirements for this list are:
60 * - we need to scan the list while adding to it, in order to
61 * scavenge all the static objects (in the same way that
62 * breadth-first scavenging works for dynamic objects).
64 * - we need to be able to tell whether an object is already on
65 * the list, to break loops.
67 * Each static object has a "static link field", which we use for
68 * linking objects on to the list. We use a stack-type list, consing
69 * objects on the front as they are added (this means that the
70 * scavenge phase is depth-first, not breadth-first, but that
73 * A separate list is kept for objects that have been scavenged
74 * already - this is so that we can zero all the marks afterwards.
76 * An object is on the list if its static link field is non-zero; this
77 * means that we have to mark the end of the list with '1', not NULL.
79 * Extra notes for generational GC:
81 * Each generation has a static object list associated with it. When
82 * collecting generations up to N, we treat the static object lists
83 * from generations > N as roots.
85 * We build up a static object list while collecting generations 0..N,
86 * which is then appended to the static object list of generation N+1.
88 StgClosure* static_objects; // live static objects
89 StgClosure* scavenged_static_objects; // static objects scavenged so far
91 /* N is the oldest generation being collected, where the generations
92 * are numbered starting at 0. A major GC (indicated by the major_gc
93 * flag) is when we're collecting all generations. We only attempt to
94 * deal with static objects and GC CAFs when doing a major GC.
99 /* Youngest generation that objects should be evacuated to in
100 * evacuate(). (Logically an argument to evacuate, but it's static
101 * a lot of the time so we optimise it into a global variable).
105 /* Whether to do eager promotion or not.
107 rtsBool eager_promotion;
109 /* Flag indicating failure to evacuate an object to the desired
112 rtsBool failed_to_evac;
114 /* Saved nursery (used for 2-space collector only)
116 static bdescr *saved_nursery;
117 static nat saved_n_blocks;
119 /* Data used for allocation area sizing.
121 lnat new_blocks; // blocks allocated during this GC
122 lnat new_scavd_blocks; // ditto, but depth-first blocks
123 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
132 /* -----------------------------------------------------------------------------
133 Static function declarations
134 -------------------------------------------------------------------------- */
136 static void mark_root ( StgClosure **root );
138 static void zero_static_object_list ( StgClosure* first_static );
140 #if 0 && defined(DEBUG)
141 static void gcCAFs ( void );
144 /* -----------------------------------------------------------------------------
145 inline functions etc. for dealing with the mark bitmap & stack.
146 -------------------------------------------------------------------------- */
148 #define MARK_STACK_BLOCKS 4
150 bdescr *mark_stack_bdescr;
155 // Flag and pointers used for falling back to a linear scan when the
156 // mark stack overflows.
157 rtsBool mark_stack_overflowed;
158 bdescr *oldgen_scan_bd;
161 /* -----------------------------------------------------------------------------
164 Rough outline of the algorithm: for garbage collecting generation N
165 (and all younger generations):
167 - follow all pointers in the root set. the root set includes all
168 mutable objects in all generations (mutable_list).
170 - for each pointer, evacuate the object it points to into either
172 + to-space of the step given by step->to, which is the next
173 highest step in this generation or the first step in the next
174 generation if this is the last step.
176 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
177 When we evacuate an object we attempt to evacuate
178 everything it points to into the same generation - this is
179 achieved by setting evac_gen to the desired generation. If
180 we can't do this, then an entry in the mut list has to
181 be made for the cross-generation pointer.
183 + if the object is already in a generation > N, then leave
186 - repeatedly scavenge to-space from each step in each generation
187 being collected until no more objects can be evacuated.
189 - free from-space in each step, and set from-space = to-space.
191 Locks held: all capabilities are held throughout GarbageCollect().
193 -------------------------------------------------------------------------- */
196 GarbageCollect ( rtsBool force_major_gc )
200 lnat live, allocated, copied = 0, scavd_copied = 0;
201 lnat oldgen_saved_blocks = 0;
207 CostCentreStack *prev_CCS;
210 debugTrace(DEBUG_gc, "starting GC");
212 #if defined(RTS_USER_SIGNALS)
217 // tell the STM to discard any cached closures its hoping to re-use
220 // tell the stats department that we've started a GC
224 // check for memory leaks if DEBUG is on
234 // attribute any costs to CCS_GC
240 /* Approximate how much we allocated.
241 * Todo: only when generating stats?
243 allocated = calcAllocated();
245 /* Figure out which generation to collect
247 if (force_major_gc) {
248 N = RtsFlags.GcFlags.generations - 1;
252 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
253 if (generations[g].steps[0].n_blocks +
254 generations[g].steps[0].n_large_blocks
255 >= generations[g].max_blocks) {
259 major_gc = (N == RtsFlags.GcFlags.generations-1);
262 #ifdef RTS_GTK_FRONTPANEL
263 if (RtsFlags.GcFlags.frontpanel) {
264 updateFrontPanelBeforeGC(N);
268 // check stack sanity *before* GC (ToDo: check all threads)
269 IF_DEBUG(sanity, checkFreeListSanity());
271 /* Initialise the static object lists
273 static_objects = END_OF_STATIC_LIST;
274 scavenged_static_objects = END_OF_STATIC_LIST;
276 /* Save the nursery if we're doing a two-space collection.
277 * g0s0->blocks will be used for to-space, so we need to get the
278 * nursery out of the way.
280 if (RtsFlags.GcFlags.generations == 1) {
281 saved_nursery = g0s0->blocks;
282 saved_n_blocks = g0s0->n_blocks;
287 /* Keep a count of how many new blocks we allocated during this GC
288 * (used for resizing the allocation area, later).
291 new_scavd_blocks = 0;
293 // Initialise to-space in all the generations/steps that we're
296 for (g = 0; g <= N; g++) {
298 // throw away the mutable list. Invariant: the mutable list
299 // always has at least one block; this means we can avoid a check for
300 // NULL in recordMutable().
302 freeChain(generations[g].mut_list);
303 generations[g].mut_list = allocBlock();
304 for (i = 0; i < n_capabilities; i++) {
305 freeChain(capabilities[i].mut_lists[g]);
306 capabilities[i].mut_lists[g] = allocBlock();
310 for (s = 0; s < generations[g].n_steps; s++) {
312 // generation 0, step 0 doesn't need to-space
313 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
317 stp = &generations[g].steps[s];
318 ASSERT(stp->gen_no == g);
320 // start a new to-space for this step.
321 stp->old_blocks = stp->blocks;
322 stp->n_old_blocks = stp->n_blocks;
324 // allocate the first to-space block; extra blocks will be
325 // chained on as necessary.
327 bd = gc_alloc_block(stp);
330 stp->scan = bd->start;
333 // allocate a block for "already scavenged" objects. This goes
334 // on the front of the stp->blocks list, so it won't be
335 // traversed by the scavenging sweep.
336 gc_alloc_scavd_block(stp);
338 // initialise the large object queues.
339 stp->new_large_objects = NULL;
340 stp->scavenged_large_objects = NULL;
341 stp->n_scavenged_large_blocks = 0;
343 // mark the large objects as not evacuated yet
344 for (bd = stp->large_objects; bd; bd = bd->link) {
345 bd->flags &= ~BF_EVACUATED;
348 // for a compacted step, we need to allocate the bitmap
349 if (stp->is_compacted) {
350 nat bitmap_size; // in bytes
351 bdescr *bitmap_bdescr;
354 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
356 if (bitmap_size > 0) {
357 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
359 stp->bitmap = bitmap_bdescr;
360 bitmap = bitmap_bdescr->start;
362 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
363 bitmap_size, bitmap);
365 // don't forget to fill it with zeros!
366 memset(bitmap, 0, bitmap_size);
368 // For each block in this step, point to its bitmap from the
370 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
371 bd->u.bitmap = bitmap;
372 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
374 // Also at this point we set the BF_COMPACTED flag
375 // for this block. The invariant is that
376 // BF_COMPACTED is always unset, except during GC
377 // when it is set on those blocks which will be
379 bd->flags |= BF_COMPACTED;
386 /* make sure the older generations have at least one block to
387 * allocate into (this makes things easier for copy(), see below).
389 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
390 for (s = 0; s < generations[g].n_steps; s++) {
391 stp = &generations[g].steps[s];
392 if (stp->hp_bd == NULL) {
393 ASSERT(stp->blocks == NULL);
394 bd = gc_alloc_block(stp);
398 if (stp->scavd_hp == NULL) {
399 gc_alloc_scavd_block(stp);
402 /* Set the scan pointer for older generations: remember we
403 * still have to scavenge objects that have been promoted. */
405 stp->scan_bd = stp->hp_bd;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
411 /* Move the private mutable lists from each capability onto the
412 * main mutable list for the generation.
414 for (i = 0; i < n_capabilities; i++) {
415 for (bd = capabilities[i].mut_lists[g];
416 bd->link != NULL; bd = bd->link) {
419 bd->link = generations[g].mut_list;
420 generations[g].mut_list = capabilities[i].mut_lists[g];
421 capabilities[i].mut_lists[g] = allocBlock();
425 /* Allocate a mark stack if we're doing a major collection.
428 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
429 mark_stack = (StgPtr *)mark_stack_bdescr->start;
430 mark_sp = mark_stack;
431 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
433 mark_stack_bdescr = NULL;
436 eager_promotion = rtsTrue; // for now
438 /* -----------------------------------------------------------------------
439 * follow all the roots that we know about:
440 * - mutable lists from each generation > N
441 * we want to *scavenge* these roots, not evacuate them: they're not
442 * going to move in this GC.
443 * Also: do them in reverse generation order. This is because we
444 * often want to promote objects that are pointed to by older
445 * generations early, so we don't have to repeatedly copy them.
446 * Doing the generations in reverse order ensures that we don't end
447 * up in the situation where we want to evac an object to gen 3 and
448 * it has already been evaced to gen 2.
452 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
453 generations[g].saved_mut_list = generations[g].mut_list;
454 generations[g].mut_list = allocBlock();
455 // mut_list always has at least one block.
458 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
459 scavenge_mutable_list(&generations[g]);
461 for (st = generations[g].n_steps-1; st >= 0; st--) {
462 scavenge(&generations[g].steps[st]);
467 /* follow roots from the CAF list (used by GHCi)
472 /* follow all the roots that the application knows about.
477 /* Mark the weak pointer list, and prepare to detect dead weak
483 /* Mark the stable pointer table.
485 markStablePtrTable(mark_root);
487 /* Mark the root pointer table.
489 markRootPtrTable(mark_root);
491 /* -------------------------------------------------------------------------
492 * Repeatedly scavenge all the areas we know about until there's no
493 * more scavenging to be done.
500 // scavenge static objects
501 if (major_gc && static_objects != END_OF_STATIC_LIST) {
502 IF_DEBUG(sanity, checkStaticObjects(static_objects));
506 /* When scavenging the older generations: Objects may have been
507 * evacuated from generations <= N into older generations, and we
508 * need to scavenge these objects. We're going to try to ensure that
509 * any evacuations that occur move the objects into at least the
510 * same generation as the object being scavenged, otherwise we
511 * have to create new entries on the mutable list for the older
515 // scavenge each step in generations 0..maxgen
521 // scavenge objects in compacted generation
522 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
523 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
524 scavenge_mark_stack();
528 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
529 for (st = generations[gen].n_steps; --st >= 0; ) {
530 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
533 stp = &generations[gen].steps[st];
535 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
540 if (stp->new_large_objects != NULL) {
549 // if any blackholes are alive, make the threads that wait on
551 if (traverseBlackholeQueue())
554 if (flag) { goto loop; }
556 // must be last... invariant is that everything is fully
557 // scavenged at this point.
558 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
563 /* Update the pointers from the task list - these are
564 * treated as weak pointers because we want to allow a main thread
565 * to get a BlockedOnDeadMVar exception in the same way as any other
566 * thread. Note that the threads should all have been retained by
567 * GC by virtue of being on the all_threads list, we're just
568 * updating pointers here.
573 for (task = all_tasks; task != NULL; task = task->all_link) {
574 if (!task->stopped && task->tso) {
575 ASSERT(task->tso->bound == task);
576 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
578 barf("task %p: main thread %d has been GC'd",
591 // Now see which stable names are still alive.
594 // Tidy the end of the to-space chains
595 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
596 for (s = 0; s < generations[g].n_steps; s++) {
597 stp = &generations[g].steps[s];
598 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
599 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
600 stp->hp_bd->free = stp->hp;
601 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
607 // We call processHeapClosureForDead() on every closure destroyed during
608 // the current garbage collection, so we invoke LdvCensusForDead().
609 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
610 || RtsFlags.ProfFlags.bioSelector != NULL)
614 // NO MORE EVACUATION AFTER THIS POINT!
615 // Finally: compaction of the oldest generation.
616 if (major_gc && oldest_gen->steps[0].is_compacted) {
617 // save number of blocks for stats
618 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
622 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
624 /* run through all the generations/steps and tidy up
626 copied = new_blocks * BLOCK_SIZE_W;
627 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
628 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
631 generations[g].collections++; // for stats
634 // Count the mutable list as bytes "copied" for the purposes of
635 // stats. Every mutable list is copied during every GC.
637 nat mut_list_size = 0;
638 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
639 mut_list_size += bd->free - bd->start;
641 copied += mut_list_size;
644 "mut_list_size: %lu (%d vars, %d arrays, %d others)",
645 (unsigned long)(mut_list_size * sizeof(W_)),
646 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
649 for (s = 0; s < generations[g].n_steps; s++) {
651 stp = &generations[g].steps[s];
653 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
654 // stats information: how much we copied
656 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
658 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
662 // for generations we collected...
665 /* free old memory and shift to-space into from-space for all
666 * the collected steps (except the allocation area). These
667 * freed blocks will probaby be quickly recycled.
669 if (!(g == 0 && s == 0)) {
670 if (stp->is_compacted) {
671 // for a compacted step, just shift the new to-space
672 // onto the front of the now-compacted existing blocks.
673 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
674 bd->flags &= ~BF_EVACUATED; // now from-space
676 // tack the new blocks on the end of the existing blocks
677 if (stp->old_blocks != NULL) {
678 for (bd = stp->old_blocks; bd != NULL; bd = next) {
679 // NB. this step might not be compacted next
680 // time, so reset the BF_COMPACTED flags.
681 // They are set before GC if we're going to
682 // compact. (search for BF_COMPACTED above).
683 bd->flags &= ~BF_COMPACTED;
686 bd->link = stp->blocks;
689 stp->blocks = stp->old_blocks;
691 // add the new blocks to the block tally
692 stp->n_blocks += stp->n_old_blocks;
693 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
695 freeChain(stp->old_blocks);
696 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
697 bd->flags &= ~BF_EVACUATED; // now from-space
700 stp->old_blocks = NULL;
701 stp->n_old_blocks = 0;
704 /* LARGE OBJECTS. The current live large objects are chained on
705 * scavenged_large, having been moved during garbage
706 * collection from large_objects. Any objects left on
707 * large_objects list are therefore dead, so we free them here.
709 for (bd = stp->large_objects; bd != NULL; bd = next) {
715 // update the count of blocks used by large objects
716 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
717 bd->flags &= ~BF_EVACUATED;
719 stp->large_objects = stp->scavenged_large_objects;
720 stp->n_large_blocks = stp->n_scavenged_large_blocks;
723 // for older generations...
725 /* For older generations, we need to append the
726 * scavenged_large_object list (i.e. large objects that have been
727 * promoted during this GC) to the large_object list for that step.
729 for (bd = stp->scavenged_large_objects; bd; bd = next) {
731 bd->flags &= ~BF_EVACUATED;
732 dbl_link_onto(bd, &stp->large_objects);
735 // add the new blocks we promoted during this GC
736 stp->n_large_blocks += stp->n_scavenged_large_blocks;
741 /* Reset the sizes of the older generations when we do a major
744 * CURRENT STRATEGY: make all generations except zero the same size.
745 * We have to stay within the maximum heap size, and leave a certain
746 * percentage of the maximum heap size available to allocate into.
748 if (major_gc && RtsFlags.GcFlags.generations > 1) {
749 nat live, size, min_alloc;
750 nat max = RtsFlags.GcFlags.maxHeapSize;
751 nat gens = RtsFlags.GcFlags.generations;
753 // live in the oldest generations
754 live = oldest_gen->steps[0].n_blocks +
755 oldest_gen->steps[0].n_large_blocks;
757 // default max size for all generations except zero
758 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
759 RtsFlags.GcFlags.minOldGenSize);
761 // minimum size for generation zero
762 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
763 RtsFlags.GcFlags.minAllocAreaSize);
765 // Auto-enable compaction when the residency reaches a
766 // certain percentage of the maximum heap size (default: 30%).
767 if (RtsFlags.GcFlags.generations > 1 &&
768 (RtsFlags.GcFlags.compact ||
770 oldest_gen->steps[0].n_blocks >
771 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
772 oldest_gen->steps[0].is_compacted = 1;
773 // debugBelch("compaction: on\n", live);
775 oldest_gen->steps[0].is_compacted = 0;
776 // debugBelch("compaction: off\n", live);
779 // if we're going to go over the maximum heap size, reduce the
780 // size of the generations accordingly. The calculation is
781 // different if compaction is turned on, because we don't need
782 // to double the space required to collect the old generation.
785 // this test is necessary to ensure that the calculations
786 // below don't have any negative results - we're working
787 // with unsigned values here.
788 if (max < min_alloc) {
792 if (oldest_gen->steps[0].is_compacted) {
793 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
794 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
797 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
798 size = (max - min_alloc) / ((gens - 1) * 2);
808 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
809 min_alloc, size, max);
812 for (g = 0; g < gens; g++) {
813 generations[g].max_blocks = size;
817 // Guess the amount of live data for stats.
820 /* Free the small objects allocated via allocate(), since this will
821 * all have been copied into G0S1 now.
823 if (small_alloc_list != NULL) {
824 freeChain(small_alloc_list);
826 small_alloc_list = NULL;
830 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
832 // Start a new pinned_object_block
833 pinned_object_block = NULL;
835 /* Free the mark stack.
837 if (mark_stack_bdescr != NULL) {
838 freeGroup(mark_stack_bdescr);
843 for (g = 0; g <= N; g++) {
844 for (s = 0; s < generations[g].n_steps; s++) {
845 stp = &generations[g].steps[s];
846 if (stp->bitmap != NULL) {
847 freeGroup(stp->bitmap);
853 /* Two-space collector:
854 * Free the old to-space, and estimate the amount of live data.
856 if (RtsFlags.GcFlags.generations == 1) {
859 if (g0s0->old_blocks != NULL) {
860 freeChain(g0s0->old_blocks);
862 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
863 bd->flags = 0; // now from-space
865 g0s0->old_blocks = g0s0->blocks;
866 g0s0->n_old_blocks = g0s0->n_blocks;
867 g0s0->blocks = saved_nursery;
868 g0s0->n_blocks = saved_n_blocks;
870 /* For a two-space collector, we need to resize the nursery. */
872 /* set up a new nursery. Allocate a nursery size based on a
873 * function of the amount of live data (by default a factor of 2)
874 * Use the blocks from the old nursery if possible, freeing up any
877 * If we get near the maximum heap size, then adjust our nursery
878 * size accordingly. If the nursery is the same size as the live
879 * data (L), then we need 3L bytes. We can reduce the size of the
880 * nursery to bring the required memory down near 2L bytes.
882 * A normal 2-space collector would need 4L bytes to give the same
883 * performance we get from 3L bytes, reducing to the same
884 * performance at 2L bytes.
886 blocks = g0s0->n_old_blocks;
888 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
889 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
890 RtsFlags.GcFlags.maxHeapSize ) {
891 long adjusted_blocks; // signed on purpose
894 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
896 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
897 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
899 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
900 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
903 blocks = adjusted_blocks;
906 blocks *= RtsFlags.GcFlags.oldGenFactor;
907 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
908 blocks = RtsFlags.GcFlags.minAllocAreaSize;
911 resizeNurseries(blocks);
914 /* Generational collector:
915 * If the user has given us a suggested heap size, adjust our
916 * allocation area to make best use of the memory available.
919 if (RtsFlags.GcFlags.heapSizeSuggestion) {
921 nat needed = calcNeeded(); // approx blocks needed at next GC
923 /* Guess how much will be live in generation 0 step 0 next time.
924 * A good approximation is obtained by finding the
925 * percentage of g0s0 that was live at the last minor GC.
928 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
931 /* Estimate a size for the allocation area based on the
932 * information available. We might end up going slightly under
933 * or over the suggested heap size, but we should be pretty
936 * Formula: suggested - needed
937 * ----------------------------
938 * 1 + g0s0_pcnt_kept/100
940 * where 'needed' is the amount of memory needed at the next
941 * collection for collecting all steps except g0s0.
944 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
945 (100 + (long)g0s0_pcnt_kept);
947 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
948 blocks = RtsFlags.GcFlags.minAllocAreaSize;
951 resizeNurseries((nat)blocks);
954 // we might have added extra large blocks to the nursery, so
955 // resize back to minAllocAreaSize again.
956 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
960 // mark the garbage collected CAFs as dead
961 #if 0 && defined(DEBUG) // doesn't work at the moment
962 if (major_gc) { gcCAFs(); }
966 // resetStaticObjectForRetainerProfiling() must be called before
968 resetStaticObjectForRetainerProfiling();
971 // zero the scavenged static object list
973 zero_static_object_list(scavenged_static_objects);
979 // start any pending finalizers
981 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
984 // send exceptions to any threads which were about to die
986 resurrectThreads(resurrected_threads);
989 // Update the stable pointer hash table.
990 updateStablePtrTable(major_gc);
992 // check sanity after GC
993 IF_DEBUG(sanity, checkSanity());
995 // extra GC trace info
996 IF_DEBUG(gc, statDescribeGens());
999 // symbol-table based profiling
1000 /* heapCensus(to_blocks); */ /* ToDo */
1003 // restore enclosing cost centre
1009 // check for memory leaks if DEBUG is on
1013 #ifdef RTS_GTK_FRONTPANEL
1014 if (RtsFlags.GcFlags.frontpanel) {
1015 updateFrontPanelAfterGC( N, live );
1019 // ok, GC over: tell the stats department what happened.
1020 stat_endGC(allocated, live, copied, scavd_copied, N);
1022 #if defined(RTS_USER_SIGNALS)
1023 // unblock signals again
1024 unblockUserSignals();
1030 /* -----------------------------------------------------------------------------
1031 isAlive determines whether the given closure is still alive (after
1032 a garbage collection) or not. It returns the new address of the
1033 closure if it is alive, or NULL otherwise.
1035 NOTE: Use it before compaction only!
1036 -------------------------------------------------------------------------- */
1040 isAlive(StgClosure *p)
1042 const StgInfoTable *info;
1047 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1050 // ignore static closures
1052 // ToDo: for static closures, check the static link field.
1053 // Problem here is that we sometimes don't set the link field, eg.
1054 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1056 if (!HEAP_ALLOCED(p)) {
1060 // ignore closures in generations that we're not collecting.
1062 if (bd->gen_no > N) {
1066 // if it's a pointer into to-space, then we're done
1067 if (bd->flags & BF_EVACUATED) {
1071 // large objects use the evacuated flag
1072 if (bd->flags & BF_LARGE) {
1076 // check the mark bit for compacted steps
1077 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1081 switch (info->type) {
1086 case IND_OLDGEN: // rely on compatible layout with StgInd
1087 case IND_OLDGEN_PERM:
1088 // follow indirections
1089 p = ((StgInd *)p)->indirectee;
1094 return ((StgEvacuated *)p)->evacuee;
1097 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1098 p = (StgClosure *)((StgTSO *)p)->link;
1111 mark_root(StgClosure **root)
1113 *root = evacuate(*root);
1116 /* -----------------------------------------------------------------------------
1117 Initialising the static object & mutable lists
1118 -------------------------------------------------------------------------- */
1121 zero_static_object_list(StgClosure* first_static)
1125 const StgInfoTable *info;
1127 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1129 link = *STATIC_LINK(info, p);
1130 *STATIC_LINK(info,p) = NULL;
1134 /* -----------------------------------------------------------------------------
1136 -------------------------------------------------------------------------- */
1143 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1144 c = (StgIndStatic *)c->static_link)
1146 SET_INFO(c, c->saved_info);
1147 c->saved_info = NULL;
1148 // could, but not necessary: c->static_link = NULL;
1150 revertible_caf_list = NULL;
1154 markCAFs( evac_fn evac )
1158 for (c = (StgIndStatic *)caf_list; c != NULL;
1159 c = (StgIndStatic *)c->static_link)
1161 evac(&c->indirectee);
1163 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1164 c = (StgIndStatic *)c->static_link)
1166 evac(&c->indirectee);
1170 /* -----------------------------------------------------------------------------
1171 Sanity code for CAF garbage collection.
1173 With DEBUG turned on, we manage a CAF list in addition to the SRT
1174 mechanism. After GC, we run down the CAF list and blackhole any
1175 CAFs which have been garbage collected. This means we get an error
1176 whenever the program tries to enter a garbage collected CAF.
1178 Any garbage collected CAFs are taken off the CAF list at the same
1180 -------------------------------------------------------------------------- */
1182 #if 0 && defined(DEBUG)
1189 const StgInfoTable *info;
1200 ASSERT(info->type == IND_STATIC);
1202 if (STATIC_LINK(info,p) == NULL) {
1203 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1205 SET_INFO(p,&stg_BLACKHOLE_info);
1206 p = STATIC_LINK2(info,p);
1210 pp = &STATIC_LINK2(info,p);
1217 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
1221 /* -----------------------------------------------------------------------------
1223 * -------------------------------------------------------------------------- */
1227 printMutableList(generation *gen)
1232 debugBelch("mutable list %p: ", gen->mut_list);
1234 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
1235 for (p = bd->start; p < bd->free; p++) {
1236 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));