1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2003
5 * Generational garbage collector
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
14 #include "OSThreads.h"
17 #include "LdvProfile.h"
22 #include "BlockAlloc.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "RtsSignals.h"
33 #if defined(RTS_GTK_FRONTPANEL)
34 #include "FrontPanel.h"
37 #include "RetainerProfile.h"
38 #include "RaiseAsync.h"
47 #include <string.h> // for memset()
49 /* STATIC OBJECT LIST.
52 * We maintain a linked list of static objects that are still live.
53 * The requirements for this list are:
55 * - we need to scan the list while adding to it, in order to
56 * scavenge all the static objects (in the same way that
57 * breadth-first scavenging works for dynamic objects).
59 * - we need to be able to tell whether an object is already on
60 * the list, to break loops.
62 * Each static object has a "static link field", which we use for
63 * linking objects on to the list. We use a stack-type list, consing
64 * objects on the front as they are added (this means that the
65 * scavenge phase is depth-first, not breadth-first, but that
68 * A separate list is kept for objects that have been scavenged
69 * already - this is so that we can zero all the marks afterwards.
71 * An object is on the list if its static link field is non-zero; this
72 * means that we have to mark the end of the list with '1', not NULL.
74 * Extra notes for generational GC:
76 * Each generation has a static object list associated with it. When
77 * collecting generations up to N, we treat the static object lists
78 * from generations > N as roots.
80 * We build up a static object list while collecting generations 0..N,
81 * which is then appended to the static object list of generation N+1.
83 StgClosure* static_objects; // live static objects
84 StgClosure* scavenged_static_objects; // static objects scavenged so far
86 /* N is the oldest generation being collected, where the generations
87 * are numbered starting at 0. A major GC (indicated by the major_gc
88 * flag) is when we're collecting all generations. We only attempt to
89 * deal with static objects and GC CAFs when doing a major GC.
94 /* Youngest generation that objects should be evacuated to in
95 * evacuate(). (Logically an argument to evacuate, but it's static
96 * a lot of the time so we optimise it into a global variable).
100 /* Whether to do eager promotion or not.
102 rtsBool eager_promotion;
104 /* Flag indicating failure to evacuate an object to the desired
107 rtsBool failed_to_evac;
109 /* Saved nursery (used for 2-space collector only)
111 static bdescr *saved_nursery;
112 static nat saved_n_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat new_scavd_blocks; // ditto, but depth-first blocks
118 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
127 /* -----------------------------------------------------------------------------
128 Static function declarations
129 -------------------------------------------------------------------------- */
131 static void mark_root ( StgClosure **root );
133 static void zero_static_object_list ( StgClosure* first_static );
135 #if 0 && defined(DEBUG)
136 static void gcCAFs ( void );
139 /* -----------------------------------------------------------------------------
140 inline functions etc. for dealing with the mark bitmap & stack.
141 -------------------------------------------------------------------------- */
143 #define MARK_STACK_BLOCKS 4
145 bdescr *mark_stack_bdescr;
150 // Flag and pointers used for falling back to a linear scan when the
151 // mark stack overflows.
152 rtsBool mark_stack_overflowed;
153 bdescr *oldgen_scan_bd;
156 /* -----------------------------------------------------------------------------
159 Rough outline of the algorithm: for garbage collecting generation N
160 (and all younger generations):
162 - follow all pointers in the root set. the root set includes all
163 mutable objects in all generations (mutable_list).
165 - for each pointer, evacuate the object it points to into either
167 + to-space of the step given by step->to, which is the next
168 highest step in this generation or the first step in the next
169 generation if this is the last step.
171 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
172 When we evacuate an object we attempt to evacuate
173 everything it points to into the same generation - this is
174 achieved by setting evac_gen to the desired generation. If
175 we can't do this, then an entry in the mut list has to
176 be made for the cross-generation pointer.
178 + if the object is already in a generation > N, then leave
181 - repeatedly scavenge to-space from each step in each generation
182 being collected until no more objects can be evacuated.
184 - free from-space in each step, and set from-space = to-space.
186 Locks held: all capabilities are held throughout GarbageCollect().
188 -------------------------------------------------------------------------- */
191 GarbageCollect ( rtsBool force_major_gc )
195 lnat live, allocated, copied = 0, scavd_copied = 0;
196 lnat oldgen_saved_blocks = 0;
202 CostCentreStack *prev_CCS;
205 debugTrace(DEBUG_gc, "starting GC");
207 #if defined(RTS_USER_SIGNALS)
212 // tell the STM to discard any cached closures its hoping to re-use
215 // tell the stats department that we've started a GC
219 // check for memory leaks if DEBUG is on
229 // attribute any costs to CCS_GC
235 /* Approximate how much we allocated.
236 * Todo: only when generating stats?
238 allocated = calcAllocated();
240 /* Figure out which generation to collect
242 if (force_major_gc) {
243 N = RtsFlags.GcFlags.generations - 1;
247 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
248 if (generations[g].steps[0].n_blocks +
249 generations[g].steps[0].n_large_blocks
250 >= generations[g].max_blocks) {
254 major_gc = (N == RtsFlags.GcFlags.generations-1);
257 #ifdef RTS_GTK_FRONTPANEL
258 if (RtsFlags.GcFlags.frontpanel) {
259 updateFrontPanelBeforeGC(N);
263 // check stack sanity *before* GC (ToDo: check all threads)
264 IF_DEBUG(sanity, checkFreeListSanity());
266 /* Initialise the static object lists
268 static_objects = END_OF_STATIC_LIST;
269 scavenged_static_objects = END_OF_STATIC_LIST;
271 /* Save the nursery if we're doing a two-space collection.
272 * g0s0->blocks will be used for to-space, so we need to get the
273 * nursery out of the way.
275 if (RtsFlags.GcFlags.generations == 1) {
276 saved_nursery = g0s0->blocks;
277 saved_n_blocks = g0s0->n_blocks;
282 /* Keep a count of how many new blocks we allocated during this GC
283 * (used for resizing the allocation area, later).
286 new_scavd_blocks = 0;
288 // Initialise to-space in all the generations/steps that we're
291 for (g = 0; g <= N; g++) {
293 // throw away the mutable list. Invariant: the mutable list
294 // always has at least one block; this means we can avoid a check for
295 // NULL in recordMutable().
297 freeChain(generations[g].mut_list);
298 generations[g].mut_list = allocBlock();
299 for (i = 0; i < n_capabilities; i++) {
300 freeChain(capabilities[i].mut_lists[g]);
301 capabilities[i].mut_lists[g] = allocBlock();
305 for (s = 0; s < generations[g].n_steps; s++) {
307 // generation 0, step 0 doesn't need to-space
308 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
312 stp = &generations[g].steps[s];
313 ASSERT(stp->gen_no == g);
315 // start a new to-space for this step.
316 stp->old_blocks = stp->blocks;
317 stp->n_old_blocks = stp->n_blocks;
319 // allocate the first to-space block; extra blocks will be
320 // chained on as necessary.
322 bd = gc_alloc_block(stp);
325 stp->scan = bd->start;
328 // allocate a block for "already scavenged" objects. This goes
329 // on the front of the stp->blocks list, so it won't be
330 // traversed by the scavenging sweep.
331 gc_alloc_scavd_block(stp);
333 // initialise the large object queues.
334 stp->new_large_objects = NULL;
335 stp->scavenged_large_objects = NULL;
336 stp->n_scavenged_large_blocks = 0;
338 // mark the large objects as not evacuated yet
339 for (bd = stp->large_objects; bd; bd = bd->link) {
340 bd->flags &= ~BF_EVACUATED;
343 // for a compacted step, we need to allocate the bitmap
344 if (stp->is_compacted) {
345 nat bitmap_size; // in bytes
346 bdescr *bitmap_bdescr;
349 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
351 if (bitmap_size > 0) {
352 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
354 stp->bitmap = bitmap_bdescr;
355 bitmap = bitmap_bdescr->start;
357 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
358 bitmap_size, bitmap);
360 // don't forget to fill it with zeros!
361 memset(bitmap, 0, bitmap_size);
363 // For each block in this step, point to its bitmap from the
365 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
366 bd->u.bitmap = bitmap;
367 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
369 // Also at this point we set the BF_COMPACTED flag
370 // for this block. The invariant is that
371 // BF_COMPACTED is always unset, except during GC
372 // when it is set on those blocks which will be
374 bd->flags |= BF_COMPACTED;
381 /* make sure the older generations have at least one block to
382 * allocate into (this makes things easier for copy(), see below).
384 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
385 for (s = 0; s < generations[g].n_steps; s++) {
386 stp = &generations[g].steps[s];
387 if (stp->hp_bd == NULL) {
388 ASSERT(stp->blocks == NULL);
389 bd = gc_alloc_block(stp);
393 if (stp->scavd_hp == NULL) {
394 gc_alloc_scavd_block(stp);
397 /* Set the scan pointer for older generations: remember we
398 * still have to scavenge objects that have been promoted. */
400 stp->scan_bd = stp->hp_bd;
401 stp->new_large_objects = NULL;
402 stp->scavenged_large_objects = NULL;
403 stp->n_scavenged_large_blocks = 0;
406 /* Move the private mutable lists from each capability onto the
407 * main mutable list for the generation.
409 for (i = 0; i < n_capabilities; i++) {
410 for (bd = capabilities[i].mut_lists[g];
411 bd->link != NULL; bd = bd->link) {
414 bd->link = generations[g].mut_list;
415 generations[g].mut_list = capabilities[i].mut_lists[g];
416 capabilities[i].mut_lists[g] = allocBlock();
420 /* Allocate a mark stack if we're doing a major collection.
423 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
424 mark_stack = (StgPtr *)mark_stack_bdescr->start;
425 mark_sp = mark_stack;
426 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
428 mark_stack_bdescr = NULL;
431 eager_promotion = rtsTrue; // for now
433 /* -----------------------------------------------------------------------
434 * follow all the roots that we know about:
435 * - mutable lists from each generation > N
436 * we want to *scavenge* these roots, not evacuate them: they're not
437 * going to move in this GC.
438 * Also: do them in reverse generation order. This is because we
439 * often want to promote objects that are pointed to by older
440 * generations early, so we don't have to repeatedly copy them.
441 * Doing the generations in reverse order ensures that we don't end
442 * up in the situation where we want to evac an object to gen 3 and
443 * it has already been evaced to gen 2.
447 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
448 generations[g].saved_mut_list = generations[g].mut_list;
449 generations[g].mut_list = allocBlock();
450 // mut_list always has at least one block.
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 scavenge_mutable_list(&generations[g]);
456 for (st = generations[g].n_steps-1; st >= 0; st--) {
457 scavenge(&generations[g].steps[st]);
462 /* follow roots from the CAF list (used by GHCi)
467 /* follow all the roots that the application knows about.
472 /* Mark the weak pointer list, and prepare to detect dead weak
478 /* Mark the stable pointer table.
480 markStablePtrTable(mark_root);
482 /* Mark the root pointer table.
484 markRootPtrTable(mark_root);
486 /* -------------------------------------------------------------------------
487 * Repeatedly scavenge all the areas we know about until there's no
488 * more scavenging to be done.
495 // scavenge static objects
496 if (major_gc && static_objects != END_OF_STATIC_LIST) {
497 IF_DEBUG(sanity, checkStaticObjects(static_objects));
501 /* When scavenging the older generations: Objects may have been
502 * evacuated from generations <= N into older generations, and we
503 * need to scavenge these objects. We're going to try to ensure that
504 * any evacuations that occur move the objects into at least the
505 * same generation as the object being scavenged, otherwise we
506 * have to create new entries on the mutable list for the older
510 // scavenge each step in generations 0..maxgen
516 // scavenge objects in compacted generation
517 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
518 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
519 scavenge_mark_stack();
523 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
524 for (st = generations[gen].n_steps; --st >= 0; ) {
525 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
528 stp = &generations[gen].steps[st];
530 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
535 if (stp->new_large_objects != NULL) {
544 // if any blackholes are alive, make the threads that wait on
546 if (traverseBlackholeQueue())
549 if (flag) { goto loop; }
551 // must be last... invariant is that everything is fully
552 // scavenged at this point.
553 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
558 /* Update the pointers from the task list - these are
559 * treated as weak pointers because we want to allow a main thread
560 * to get a BlockedOnDeadMVar exception in the same way as any other
561 * thread. Note that the threads should all have been retained by
562 * GC by virtue of being on the all_threads list, we're just
563 * updating pointers here.
568 for (task = all_tasks; task != NULL; task = task->all_link) {
569 if (!task->stopped && task->tso) {
570 ASSERT(task->tso->bound == task);
571 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
573 barf("task %p: main thread %d has been GC'd",
586 // Now see which stable names are still alive.
589 // Tidy the end of the to-space chains
590 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
591 for (s = 0; s < generations[g].n_steps; s++) {
592 stp = &generations[g].steps[s];
593 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
594 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
595 stp->hp_bd->free = stp->hp;
596 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
602 // We call processHeapClosureForDead() on every closure destroyed during
603 // the current garbage collection, so we invoke LdvCensusForDead().
604 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
605 || RtsFlags.ProfFlags.bioSelector != NULL)
609 // NO MORE EVACUATION AFTER THIS POINT!
610 // Finally: compaction of the oldest generation.
611 if (major_gc && oldest_gen->steps[0].is_compacted) {
612 // save number of blocks for stats
613 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
617 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
619 /* run through all the generations/steps and tidy up
621 copied = new_blocks * BLOCK_SIZE_W;
622 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
623 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
626 generations[g].collections++; // for stats
629 // Count the mutable list as bytes "copied" for the purposes of
630 // stats. Every mutable list is copied during every GC.
632 nat mut_list_size = 0;
633 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
634 mut_list_size += bd->free - bd->start;
636 copied += mut_list_size;
639 "mut_list_size: %lu (%d vars, %d arrays, %d others)",
640 (unsigned long)(mut_list_size * sizeof(W_)),
641 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
644 for (s = 0; s < generations[g].n_steps; s++) {
646 stp = &generations[g].steps[s];
648 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
649 // stats information: how much we copied
651 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
653 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
657 // for generations we collected...
660 /* free old memory and shift to-space into from-space for all
661 * the collected steps (except the allocation area). These
662 * freed blocks will probaby be quickly recycled.
664 if (!(g == 0 && s == 0)) {
665 if (stp->is_compacted) {
666 // for a compacted step, just shift the new to-space
667 // onto the front of the now-compacted existing blocks.
668 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
669 bd->flags &= ~BF_EVACUATED; // now from-space
671 // tack the new blocks on the end of the existing blocks
672 if (stp->old_blocks != NULL) {
673 for (bd = stp->old_blocks; bd != NULL; bd = next) {
674 // NB. this step might not be compacted next
675 // time, so reset the BF_COMPACTED flags.
676 // They are set before GC if we're going to
677 // compact. (search for BF_COMPACTED above).
678 bd->flags &= ~BF_COMPACTED;
681 bd->link = stp->blocks;
684 stp->blocks = stp->old_blocks;
686 // add the new blocks to the block tally
687 stp->n_blocks += stp->n_old_blocks;
688 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
690 freeChain(stp->old_blocks);
691 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
692 bd->flags &= ~BF_EVACUATED; // now from-space
695 stp->old_blocks = NULL;
696 stp->n_old_blocks = 0;
699 /* LARGE OBJECTS. The current live large objects are chained on
700 * scavenged_large, having been moved during garbage
701 * collection from large_objects. Any objects left on
702 * large_objects list are therefore dead, so we free them here.
704 for (bd = stp->large_objects; bd != NULL; bd = next) {
710 // update the count of blocks used by large objects
711 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
712 bd->flags &= ~BF_EVACUATED;
714 stp->large_objects = stp->scavenged_large_objects;
715 stp->n_large_blocks = stp->n_scavenged_large_blocks;
718 // for older generations...
720 /* For older generations, we need to append the
721 * scavenged_large_object list (i.e. large objects that have been
722 * promoted during this GC) to the large_object list for that step.
724 for (bd = stp->scavenged_large_objects; bd; bd = next) {
726 bd->flags &= ~BF_EVACUATED;
727 dbl_link_onto(bd, &stp->large_objects);
730 // add the new blocks we promoted during this GC
731 stp->n_large_blocks += stp->n_scavenged_large_blocks;
736 /* Reset the sizes of the older generations when we do a major
739 * CURRENT STRATEGY: make all generations except zero the same size.
740 * We have to stay within the maximum heap size, and leave a certain
741 * percentage of the maximum heap size available to allocate into.
743 if (major_gc && RtsFlags.GcFlags.generations > 1) {
744 nat live, size, min_alloc;
745 nat max = RtsFlags.GcFlags.maxHeapSize;
746 nat gens = RtsFlags.GcFlags.generations;
748 // live in the oldest generations
749 live = oldest_gen->steps[0].n_blocks +
750 oldest_gen->steps[0].n_large_blocks;
752 // default max size for all generations except zero
753 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
754 RtsFlags.GcFlags.minOldGenSize);
756 // minimum size for generation zero
757 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
758 RtsFlags.GcFlags.minAllocAreaSize);
760 // Auto-enable compaction when the residency reaches a
761 // certain percentage of the maximum heap size (default: 30%).
762 if (RtsFlags.GcFlags.generations > 1 &&
763 (RtsFlags.GcFlags.compact ||
765 oldest_gen->steps[0].n_blocks >
766 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
767 oldest_gen->steps[0].is_compacted = 1;
768 // debugBelch("compaction: on\n", live);
770 oldest_gen->steps[0].is_compacted = 0;
771 // debugBelch("compaction: off\n", live);
774 // if we're going to go over the maximum heap size, reduce the
775 // size of the generations accordingly. The calculation is
776 // different if compaction is turned on, because we don't need
777 // to double the space required to collect the old generation.
780 // this test is necessary to ensure that the calculations
781 // below don't have any negative results - we're working
782 // with unsigned values here.
783 if (max < min_alloc) {
787 if (oldest_gen->steps[0].is_compacted) {
788 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
789 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
792 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
793 size = (max - min_alloc) / ((gens - 1) * 2);
803 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
804 min_alloc, size, max);
807 for (g = 0; g < gens; g++) {
808 generations[g].max_blocks = size;
812 // Guess the amount of live data for stats.
815 /* Free the small objects allocated via allocate(), since this will
816 * all have been copied into G0S1 now.
818 if (small_alloc_list != NULL) {
819 freeChain(small_alloc_list);
821 small_alloc_list = NULL;
825 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
827 // Start a new pinned_object_block
828 pinned_object_block = NULL;
830 /* Free the mark stack.
832 if (mark_stack_bdescr != NULL) {
833 freeGroup(mark_stack_bdescr);
838 for (g = 0; g <= N; g++) {
839 for (s = 0; s < generations[g].n_steps; s++) {
840 stp = &generations[g].steps[s];
841 if (stp->bitmap != NULL) {
842 freeGroup(stp->bitmap);
848 /* Two-space collector:
849 * Free the old to-space, and estimate the amount of live data.
851 if (RtsFlags.GcFlags.generations == 1) {
854 if (g0s0->old_blocks != NULL) {
855 freeChain(g0s0->old_blocks);
857 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
858 bd->flags = 0; // now from-space
860 g0s0->old_blocks = g0s0->blocks;
861 g0s0->n_old_blocks = g0s0->n_blocks;
862 g0s0->blocks = saved_nursery;
863 g0s0->n_blocks = saved_n_blocks;
865 /* For a two-space collector, we need to resize the nursery. */
867 /* set up a new nursery. Allocate a nursery size based on a
868 * function of the amount of live data (by default a factor of 2)
869 * Use the blocks from the old nursery if possible, freeing up any
872 * If we get near the maximum heap size, then adjust our nursery
873 * size accordingly. If the nursery is the same size as the live
874 * data (L), then we need 3L bytes. We can reduce the size of the
875 * nursery to bring the required memory down near 2L bytes.
877 * A normal 2-space collector would need 4L bytes to give the same
878 * performance we get from 3L bytes, reducing to the same
879 * performance at 2L bytes.
881 blocks = g0s0->n_old_blocks;
883 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
884 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
885 RtsFlags.GcFlags.maxHeapSize ) {
886 long adjusted_blocks; // signed on purpose
889 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
891 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
892 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
894 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
895 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
898 blocks = adjusted_blocks;
901 blocks *= RtsFlags.GcFlags.oldGenFactor;
902 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
903 blocks = RtsFlags.GcFlags.minAllocAreaSize;
906 resizeNurseries(blocks);
909 /* Generational collector:
910 * If the user has given us a suggested heap size, adjust our
911 * allocation area to make best use of the memory available.
914 if (RtsFlags.GcFlags.heapSizeSuggestion) {
916 nat needed = calcNeeded(); // approx blocks needed at next GC
918 /* Guess how much will be live in generation 0 step 0 next time.
919 * A good approximation is obtained by finding the
920 * percentage of g0s0 that was live at the last minor GC.
923 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
926 /* Estimate a size for the allocation area based on the
927 * information available. We might end up going slightly under
928 * or over the suggested heap size, but we should be pretty
931 * Formula: suggested - needed
932 * ----------------------------
933 * 1 + g0s0_pcnt_kept/100
935 * where 'needed' is the amount of memory needed at the next
936 * collection for collecting all steps except g0s0.
939 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
940 (100 + (long)g0s0_pcnt_kept);
942 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
943 blocks = RtsFlags.GcFlags.minAllocAreaSize;
946 resizeNurseries((nat)blocks);
949 // we might have added extra large blocks to the nursery, so
950 // resize back to minAllocAreaSize again.
951 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
955 // mark the garbage collected CAFs as dead
956 #if 0 && defined(DEBUG) // doesn't work at the moment
957 if (major_gc) { gcCAFs(); }
961 // resetStaticObjectForRetainerProfiling() must be called before
963 resetStaticObjectForRetainerProfiling();
966 // zero the scavenged static object list
968 zero_static_object_list(scavenged_static_objects);
974 // start any pending finalizers
976 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
979 // send exceptions to any threads which were about to die
981 resurrectThreads(resurrected_threads);
984 // Update the stable pointer hash table.
985 updateStablePtrTable(major_gc);
987 // check sanity after GC
988 IF_DEBUG(sanity, checkSanity());
990 // extra GC trace info
991 IF_DEBUG(gc, statDescribeGens());
994 // symbol-table based profiling
995 /* heapCensus(to_blocks); */ /* ToDo */
998 // restore enclosing cost centre
1004 // check for memory leaks if DEBUG is on
1008 #ifdef RTS_GTK_FRONTPANEL
1009 if (RtsFlags.GcFlags.frontpanel) {
1010 updateFrontPanelAfterGC( N, live );
1014 // ok, GC over: tell the stats department what happened.
1015 stat_endGC(allocated, live, copied, scavd_copied, N);
1017 #if defined(RTS_USER_SIGNALS)
1018 // unblock signals again
1019 unblockUserSignals();
1025 /* -----------------------------------------------------------------------------
1026 isAlive determines whether the given closure is still alive (after
1027 a garbage collection) or not. It returns the new address of the
1028 closure if it is alive, or NULL otherwise.
1030 NOTE: Use it before compaction only!
1031 -------------------------------------------------------------------------- */
1035 isAlive(StgClosure *p)
1037 const StgInfoTable *info;
1042 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1045 // ignore static closures
1047 // ToDo: for static closures, check the static link field.
1048 // Problem here is that we sometimes don't set the link field, eg.
1049 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1051 if (!HEAP_ALLOCED(p)) {
1055 // ignore closures in generations that we're not collecting.
1057 if (bd->gen_no > N) {
1061 // if it's a pointer into to-space, then we're done
1062 if (bd->flags & BF_EVACUATED) {
1066 // large objects use the evacuated flag
1067 if (bd->flags & BF_LARGE) {
1071 // check the mark bit for compacted steps
1072 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1076 switch (info->type) {
1081 case IND_OLDGEN: // rely on compatible layout with StgInd
1082 case IND_OLDGEN_PERM:
1083 // follow indirections
1084 p = ((StgInd *)p)->indirectee;
1089 return ((StgEvacuated *)p)->evacuee;
1092 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1093 p = (StgClosure *)((StgTSO *)p)->link;
1106 mark_root(StgClosure **root)
1108 *root = evacuate(*root);
1111 /* -----------------------------------------------------------------------------
1112 Initialising the static object & mutable lists
1113 -------------------------------------------------------------------------- */
1116 zero_static_object_list(StgClosure* first_static)
1120 const StgInfoTable *info;
1122 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1124 link = *STATIC_LINK(info, p);
1125 *STATIC_LINK(info,p) = NULL;
1129 /* -----------------------------------------------------------------------------
1131 -------------------------------------------------------------------------- */
1138 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1139 c = (StgIndStatic *)c->static_link)
1141 SET_INFO(c, c->saved_info);
1142 c->saved_info = NULL;
1143 // could, but not necessary: c->static_link = NULL;
1145 revertible_caf_list = NULL;
1149 markCAFs( evac_fn evac )
1153 for (c = (StgIndStatic *)caf_list; c != NULL;
1154 c = (StgIndStatic *)c->static_link)
1156 evac(&c->indirectee);
1158 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1159 c = (StgIndStatic *)c->static_link)
1161 evac(&c->indirectee);
1165 /* -----------------------------------------------------------------------------
1166 Sanity code for CAF garbage collection.
1168 With DEBUG turned on, we manage a CAF list in addition to the SRT
1169 mechanism. After GC, we run down the CAF list and blackhole any
1170 CAFs which have been garbage collected. This means we get an error
1171 whenever the program tries to enter a garbage collected CAF.
1173 Any garbage collected CAFs are taken off the CAF list at the same
1175 -------------------------------------------------------------------------- */
1177 #if 0 && defined(DEBUG)
1184 const StgInfoTable *info;
1195 ASSERT(info->type == IND_STATIC);
1197 if (STATIC_LINK(info,p) == NULL) {
1198 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1200 SET_INFO(p,&stg_BLACKHOLE_info);
1201 p = STATIC_LINK2(info,p);
1205 pp = &STATIC_LINK2(info,p);
1212 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
1216 /* -----------------------------------------------------------------------------
1218 * -------------------------------------------------------------------------- */
1222 printMutableList(generation *gen)
1227 debugBelch("mutable list %p: ", gen->mut_list);
1229 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
1230 for (p = bd->start; p < bd->free; p++) {
1231 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));