1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
19 #include "OSThreads.h"
20 #include "LdvProfile.h"
25 #include "BlockAlloc.h"
31 #include "ParTicky.h" // ToDo: move into Rts.h
32 #include "RtsSignals.h"
36 #if defined(RTS_GTK_FRONTPANEL)
37 #include "FrontPanel.h"
40 #include "RetainerProfile.h"
41 #include "RaiseAsync.h"
50 #include <string.h> // for memset()
52 /* STATIC OBJECT LIST.
55 * We maintain a linked list of static objects that are still live.
56 * The requirements for this list are:
58 * - we need to scan the list while adding to it, in order to
59 * scavenge all the static objects (in the same way that
60 * breadth-first scavenging works for dynamic objects).
62 * - we need to be able to tell whether an object is already on
63 * the list, to break loops.
65 * Each static object has a "static link field", which we use for
66 * linking objects on to the list. We use a stack-type list, consing
67 * objects on the front as they are added (this means that the
68 * scavenge phase is depth-first, not breadth-first, but that
71 * A separate list is kept for objects that have been scavenged
72 * already - this is so that we can zero all the marks afterwards.
74 * An object is on the list if its static link field is non-zero; this
75 * means that we have to mark the end of the list with '1', not NULL.
77 * Extra notes for generational GC:
79 * Each generation has a static object list associated with it. When
80 * collecting generations up to N, we treat the static object lists
81 * from generations > N as roots.
83 * We build up a static object list while collecting generations 0..N,
84 * which is then appended to the static object list of generation N+1.
86 StgClosure* static_objects; // live static objects
87 StgClosure* scavenged_static_objects; // static objects scavenged so far
89 /* N is the oldest generation being collected, where the generations
90 * are numbered starting at 0. A major GC (indicated by the major_gc
91 * flag) is when we're collecting all generations. We only attempt to
92 * deal with static objects and GC CAFs when doing a major GC.
97 /* Youngest generation that objects should be evacuated to in
98 * evacuate(). (Logically an argument to evacuate, but it's static
99 * a lot of the time so we optimise it into a global variable).
103 /* Whether to do eager promotion or not.
105 rtsBool eager_promotion;
107 /* Flag indicating failure to evacuate an object to the desired
110 rtsBool failed_to_evac;
112 /* Saved nursery (used for 2-space collector only)
114 static bdescr *saved_nursery;
115 static nat saved_n_blocks;
117 /* Data used for allocation area sizing.
119 lnat new_blocks; // blocks allocated during this GC
120 lnat new_scavd_blocks; // ditto, but depth-first blocks
121 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
130 /* -----------------------------------------------------------------------------
131 Static function declarations
132 -------------------------------------------------------------------------- */
134 static void mark_root ( StgClosure **root );
136 static void zero_static_object_list ( StgClosure* first_static );
138 #if 0 && defined(DEBUG)
139 static void gcCAFs ( void );
142 /* -----------------------------------------------------------------------------
143 inline functions etc. for dealing with the mark bitmap & stack.
144 -------------------------------------------------------------------------- */
146 #define MARK_STACK_BLOCKS 4
148 bdescr *mark_stack_bdescr;
153 // Flag and pointers used for falling back to a linear scan when the
154 // mark stack overflows.
155 rtsBool mark_stack_overflowed;
156 bdescr *oldgen_scan_bd;
159 /* -----------------------------------------------------------------------------
162 Rough outline of the algorithm: for garbage collecting generation N
163 (and all younger generations):
165 - follow all pointers in the root set. the root set includes all
166 mutable objects in all generations (mutable_list).
168 - for each pointer, evacuate the object it points to into either
170 + to-space of the step given by step->to, which is the next
171 highest step in this generation or the first step in the next
172 generation if this is the last step.
174 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
175 When we evacuate an object we attempt to evacuate
176 everything it points to into the same generation - this is
177 achieved by setting evac_gen to the desired generation. If
178 we can't do this, then an entry in the mut list has to
179 be made for the cross-generation pointer.
181 + if the object is already in a generation > N, then leave
184 - repeatedly scavenge to-space from each step in each generation
185 being collected until no more objects can be evacuated.
187 - free from-space in each step, and set from-space = to-space.
189 Locks held: all capabilities are held throughout GarbageCollect().
191 -------------------------------------------------------------------------- */
194 GarbageCollect ( rtsBool force_major_gc )
198 lnat live, allocated, copied = 0, scavd_copied = 0;
199 lnat oldgen_saved_blocks = 0;
203 CostCentreStack *prev_CCS;
208 debugTrace(DEBUG_gc, "starting GC");
210 #if defined(RTS_USER_SIGNALS)
211 if (RtsFlags.MiscFlags.install_signal_handlers) {
217 // tell the STM to discard any cached closures its hoping to re-use
220 // tell the stats department that we've started a GC
224 // check for memory leaks if DEBUG is on
234 // attribute any costs to CCS_GC
240 /* Approximate how much we allocated.
241 * Todo: only when generating stats?
243 allocated = calcAllocated();
245 /* Figure out which generation to collect
247 if (force_major_gc) {
248 N = RtsFlags.GcFlags.generations - 1;
252 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
253 if (generations[g].steps[0].n_blocks +
254 generations[g].steps[0].n_large_blocks
255 >= generations[g].max_blocks) {
259 major_gc = (N == RtsFlags.GcFlags.generations-1);
262 #ifdef RTS_GTK_FRONTPANEL
263 if (RtsFlags.GcFlags.frontpanel) {
264 updateFrontPanelBeforeGC(N);
268 // check stack sanity *before* GC (ToDo: check all threads)
269 IF_DEBUG(sanity, checkFreeListSanity());
271 /* Initialise the static object lists
273 static_objects = END_OF_STATIC_LIST;
274 scavenged_static_objects = END_OF_STATIC_LIST;
276 /* Save the nursery if we're doing a two-space collection.
277 * g0s0->blocks will be used for to-space, so we need to get the
278 * nursery out of the way.
280 if (RtsFlags.GcFlags.generations == 1) {
281 saved_nursery = g0s0->blocks;
282 saved_n_blocks = g0s0->n_blocks;
287 /* Keep a count of how many new blocks we allocated during this GC
288 * (used for resizing the allocation area, later).
291 new_scavd_blocks = 0;
293 // Initialise to-space in all the generations/steps that we're
296 for (g = 0; g <= N; g++) {
298 // throw away the mutable list. Invariant: the mutable list
299 // always has at least one block; this means we can avoid a check for
300 // NULL in recordMutable().
302 freeChain(generations[g].mut_list);
303 generations[g].mut_list = allocBlock();
304 for (i = 0; i < n_capabilities; i++) {
305 freeChain(capabilities[i].mut_lists[g]);
306 capabilities[i].mut_lists[g] = allocBlock();
310 for (s = 0; s < generations[g].n_steps; s++) {
312 // generation 0, step 0 doesn't need to-space
313 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
317 stp = &generations[g].steps[s];
318 ASSERT(stp->gen_no == g);
320 // start a new to-space for this step.
321 stp->old_blocks = stp->blocks;
322 stp->n_old_blocks = stp->n_blocks;
324 // allocate the first to-space block; extra blocks will be
325 // chained on as necessary.
327 bd = gc_alloc_block(stp);
330 stp->scan = bd->start;
333 // allocate a block for "already scavenged" objects. This goes
334 // on the front of the stp->blocks list, so it won't be
335 // traversed by the scavenging sweep.
336 gc_alloc_scavd_block(stp);
338 // initialise the large object queues.
339 stp->new_large_objects = NULL;
340 stp->scavenged_large_objects = NULL;
341 stp->n_scavenged_large_blocks = 0;
343 // mark the large objects as not evacuated yet
344 for (bd = stp->large_objects; bd; bd = bd->link) {
345 bd->flags &= ~BF_EVACUATED;
348 // for a compacted step, we need to allocate the bitmap
349 if (stp->is_compacted) {
350 nat bitmap_size; // in bytes
351 bdescr *bitmap_bdescr;
354 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
356 if (bitmap_size > 0) {
357 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
359 stp->bitmap = bitmap_bdescr;
360 bitmap = bitmap_bdescr->start;
362 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
363 bitmap_size, bitmap);
365 // don't forget to fill it with zeros!
366 memset(bitmap, 0, bitmap_size);
368 // For each block in this step, point to its bitmap from the
370 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
371 bd->u.bitmap = bitmap;
372 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
374 // Also at this point we set the BF_COMPACTED flag
375 // for this block. The invariant is that
376 // BF_COMPACTED is always unset, except during GC
377 // when it is set on those blocks which will be
379 bd->flags |= BF_COMPACTED;
386 /* make sure the older generations have at least one block to
387 * allocate into (this makes things easier for copy(), see below).
389 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
390 for (s = 0; s < generations[g].n_steps; s++) {
391 stp = &generations[g].steps[s];
392 if (stp->hp_bd == NULL) {
393 ASSERT(stp->blocks == NULL);
394 bd = gc_alloc_block(stp);
398 if (stp->scavd_hp == NULL) {
399 gc_alloc_scavd_block(stp);
402 /* Set the scan pointer for older generations: remember we
403 * still have to scavenge objects that have been promoted. */
405 stp->scan_bd = stp->hp_bd;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
411 /* Move the private mutable lists from each capability onto the
412 * main mutable list for the generation.
414 for (i = 0; i < n_capabilities; i++) {
415 for (bd = capabilities[i].mut_lists[g];
416 bd->link != NULL; bd = bd->link) {
419 bd->link = generations[g].mut_list;
420 generations[g].mut_list = capabilities[i].mut_lists[g];
421 capabilities[i].mut_lists[g] = allocBlock();
425 /* Allocate a mark stack if we're doing a major collection.
428 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
429 mark_stack = (StgPtr *)mark_stack_bdescr->start;
430 mark_sp = mark_stack;
431 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
433 mark_stack_bdescr = NULL;
436 eager_promotion = rtsTrue; // for now
438 /* -----------------------------------------------------------------------
439 * follow all the roots that we know about:
440 * - mutable lists from each generation > N
441 * we want to *scavenge* these roots, not evacuate them: they're not
442 * going to move in this GC.
443 * Also: do them in reverse generation order. This is because we
444 * often want to promote objects that are pointed to by older
445 * generations early, so we don't have to repeatedly copy them.
446 * Doing the generations in reverse order ensures that we don't end
447 * up in the situation where we want to evac an object to gen 3 and
448 * it has already been evaced to gen 2.
452 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
453 generations[g].saved_mut_list = generations[g].mut_list;
454 generations[g].mut_list = allocBlock();
455 // mut_list always has at least one block.
458 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
459 scavenge_mutable_list(&generations[g]);
461 for (st = generations[g].n_steps-1; st >= 0; st--) {
462 scavenge(&generations[g].steps[st]);
467 /* follow roots from the CAF list (used by GHCi)
472 /* follow all the roots that the application knows about.
477 /* Mark the weak pointer list, and prepare to detect dead weak
483 /* Mark the stable pointer table.
485 markStablePtrTable(mark_root);
487 /* -------------------------------------------------------------------------
488 * Repeatedly scavenge all the areas we know about until there's no
489 * more scavenging to be done.
496 // scavenge static objects
497 if (major_gc && static_objects != END_OF_STATIC_LIST) {
498 IF_DEBUG(sanity, checkStaticObjects(static_objects));
502 /* When scavenging the older generations: Objects may have been
503 * evacuated from generations <= N into older generations, and we
504 * need to scavenge these objects. We're going to try to ensure that
505 * any evacuations that occur move the objects into at least the
506 * same generation as the object being scavenged, otherwise we
507 * have to create new entries on the mutable list for the older
511 // scavenge each step in generations 0..maxgen
517 // scavenge objects in compacted generation
518 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
519 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
520 scavenge_mark_stack();
524 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
525 for (st = generations[gen].n_steps; --st >= 0; ) {
526 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
529 stp = &generations[gen].steps[st];
531 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
536 if (stp->new_large_objects != NULL) {
545 // if any blackholes are alive, make the threads that wait on
547 if (traverseBlackholeQueue())
550 if (flag) { goto loop; }
552 // must be last... invariant is that everything is fully
553 // scavenged at this point.
554 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
559 /* Update the pointers from the task list - these are
560 * treated as weak pointers because we want to allow a main thread
561 * to get a BlockedOnDeadMVar exception in the same way as any other
562 * thread. Note that the threads should all have been retained by
563 * GC by virtue of being on the all_threads list, we're just
564 * updating pointers here.
569 for (task = all_tasks; task != NULL; task = task->all_link) {
570 if (!task->stopped && task->tso) {
571 ASSERT(task->tso->bound == task);
572 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
574 barf("task %p: main thread %d has been GC'd",
587 // Now see which stable names are still alive.
590 // Tidy the end of the to-space chains
591 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
592 for (s = 0; s < generations[g].n_steps; s++) {
593 stp = &generations[g].steps[s];
594 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
595 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
596 stp->hp_bd->free = stp->hp;
597 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
603 // We call processHeapClosureForDead() on every closure destroyed during
604 // the current garbage collection, so we invoke LdvCensusForDead().
605 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
606 || RtsFlags.ProfFlags.bioSelector != NULL)
610 // NO MORE EVACUATION AFTER THIS POINT!
611 // Finally: compaction of the oldest generation.
612 if (major_gc && oldest_gen->steps[0].is_compacted) {
613 // save number of blocks for stats
614 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
618 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
620 /* run through all the generations/steps and tidy up
622 copied = new_blocks * BLOCK_SIZE_W;
623 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
624 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
627 generations[g].collections++; // for stats
630 // Count the mutable list as bytes "copied" for the purposes of
631 // stats. Every mutable list is copied during every GC.
633 nat mut_list_size = 0;
634 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
635 mut_list_size += bd->free - bd->start;
637 copied += mut_list_size;
640 "mut_list_size: %lu (%d vars, %d arrays, %d others)",
641 (unsigned long)(mut_list_size * sizeof(W_)),
642 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
645 for (s = 0; s < generations[g].n_steps; s++) {
647 stp = &generations[g].steps[s];
649 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
650 // stats information: how much we copied
652 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
654 scavd_copied -= stp->scavd_hpLim - stp->scavd_hp;
658 // for generations we collected...
661 /* free old memory and shift to-space into from-space for all
662 * the collected steps (except the allocation area). These
663 * freed blocks will probaby be quickly recycled.
665 if (!(g == 0 && s == 0)) {
666 if (stp->is_compacted) {
667 // for a compacted step, just shift the new to-space
668 // onto the front of the now-compacted existing blocks.
669 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
670 bd->flags &= ~BF_EVACUATED; // now from-space
672 // tack the new blocks on the end of the existing blocks
673 if (stp->old_blocks != NULL) {
674 for (bd = stp->old_blocks; bd != NULL; bd = next) {
675 // NB. this step might not be compacted next
676 // time, so reset the BF_COMPACTED flags.
677 // They are set before GC if we're going to
678 // compact. (search for BF_COMPACTED above).
679 bd->flags &= ~BF_COMPACTED;
682 bd->link = stp->blocks;
685 stp->blocks = stp->old_blocks;
687 // add the new blocks to the block tally
688 stp->n_blocks += stp->n_old_blocks;
689 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
691 freeChain(stp->old_blocks);
692 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
693 bd->flags &= ~BF_EVACUATED; // now from-space
696 stp->old_blocks = NULL;
697 stp->n_old_blocks = 0;
700 /* LARGE OBJECTS. The current live large objects are chained on
701 * scavenged_large, having been moved during garbage
702 * collection from large_objects. Any objects left on
703 * large_objects list are therefore dead, so we free them here.
705 for (bd = stp->large_objects; bd != NULL; bd = next) {
711 // update the count of blocks used by large objects
712 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
713 bd->flags &= ~BF_EVACUATED;
715 stp->large_objects = stp->scavenged_large_objects;
716 stp->n_large_blocks = stp->n_scavenged_large_blocks;
719 // for older generations...
721 /* For older generations, we need to append the
722 * scavenged_large_object list (i.e. large objects that have been
723 * promoted during this GC) to the large_object list for that step.
725 for (bd = stp->scavenged_large_objects; bd; bd = next) {
727 bd->flags &= ~BF_EVACUATED;
728 dbl_link_onto(bd, &stp->large_objects);
731 // add the new blocks we promoted during this GC
732 stp->n_large_blocks += stp->n_scavenged_large_blocks;
737 /* Reset the sizes of the older generations when we do a major
740 * CURRENT STRATEGY: make all generations except zero the same size.
741 * We have to stay within the maximum heap size, and leave a certain
742 * percentage of the maximum heap size available to allocate into.
744 if (major_gc && RtsFlags.GcFlags.generations > 1) {
745 nat live, size, min_alloc;
746 nat max = RtsFlags.GcFlags.maxHeapSize;
747 nat gens = RtsFlags.GcFlags.generations;
749 // live in the oldest generations
750 live = oldest_gen->steps[0].n_blocks +
751 oldest_gen->steps[0].n_large_blocks;
753 // default max size for all generations except zero
754 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
755 RtsFlags.GcFlags.minOldGenSize);
757 // minimum size for generation zero
758 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
759 RtsFlags.GcFlags.minAllocAreaSize);
761 // Auto-enable compaction when the residency reaches a
762 // certain percentage of the maximum heap size (default: 30%).
763 if (RtsFlags.GcFlags.generations > 1 &&
764 (RtsFlags.GcFlags.compact ||
766 oldest_gen->steps[0].n_blocks >
767 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
768 oldest_gen->steps[0].is_compacted = 1;
769 // debugBelch("compaction: on\n", live);
771 oldest_gen->steps[0].is_compacted = 0;
772 // debugBelch("compaction: off\n", live);
775 // if we're going to go over the maximum heap size, reduce the
776 // size of the generations accordingly. The calculation is
777 // different if compaction is turned on, because we don't need
778 // to double the space required to collect the old generation.
781 // this test is necessary to ensure that the calculations
782 // below don't have any negative results - we're working
783 // with unsigned values here.
784 if (max < min_alloc) {
788 if (oldest_gen->steps[0].is_compacted) {
789 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
790 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
793 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
794 size = (max - min_alloc) / ((gens - 1) * 2);
804 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
805 min_alloc, size, max);
808 for (g = 0; g < gens; g++) {
809 generations[g].max_blocks = size;
813 // Guess the amount of live data for stats.
816 /* Free the small objects allocated via allocate(), since this will
817 * all have been copied into G0S1 now.
819 if (small_alloc_list != NULL) {
820 freeChain(small_alloc_list);
822 small_alloc_list = NULL;
826 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
828 // Start a new pinned_object_block
829 pinned_object_block = NULL;
831 /* Free the mark stack.
833 if (mark_stack_bdescr != NULL) {
834 freeGroup(mark_stack_bdescr);
839 for (g = 0; g <= N; g++) {
840 for (s = 0; s < generations[g].n_steps; s++) {
841 stp = &generations[g].steps[s];
842 if (stp->bitmap != NULL) {
843 freeGroup(stp->bitmap);
849 /* Two-space collector:
850 * Free the old to-space, and estimate the amount of live data.
852 if (RtsFlags.GcFlags.generations == 1) {
855 if (g0s0->old_blocks != NULL) {
856 freeChain(g0s0->old_blocks);
858 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
859 bd->flags = 0; // now from-space
861 g0s0->old_blocks = g0s0->blocks;
862 g0s0->n_old_blocks = g0s0->n_blocks;
863 g0s0->blocks = saved_nursery;
864 g0s0->n_blocks = saved_n_blocks;
866 /* For a two-space collector, we need to resize the nursery. */
868 /* set up a new nursery. Allocate a nursery size based on a
869 * function of the amount of live data (by default a factor of 2)
870 * Use the blocks from the old nursery if possible, freeing up any
873 * If we get near the maximum heap size, then adjust our nursery
874 * size accordingly. If the nursery is the same size as the live
875 * data (L), then we need 3L bytes. We can reduce the size of the
876 * nursery to bring the required memory down near 2L bytes.
878 * A normal 2-space collector would need 4L bytes to give the same
879 * performance we get from 3L bytes, reducing to the same
880 * performance at 2L bytes.
882 blocks = g0s0->n_old_blocks;
884 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
885 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
886 RtsFlags.GcFlags.maxHeapSize ) {
887 long adjusted_blocks; // signed on purpose
890 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
892 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
893 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
895 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
896 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
899 blocks = adjusted_blocks;
902 blocks *= RtsFlags.GcFlags.oldGenFactor;
903 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
904 blocks = RtsFlags.GcFlags.minAllocAreaSize;
907 resizeNurseries(blocks);
910 /* Generational collector:
911 * If the user has given us a suggested heap size, adjust our
912 * allocation area to make best use of the memory available.
915 if (RtsFlags.GcFlags.heapSizeSuggestion) {
917 nat needed = calcNeeded(); // approx blocks needed at next GC
919 /* Guess how much will be live in generation 0 step 0 next time.
920 * A good approximation is obtained by finding the
921 * percentage of g0s0 that was live at the last minor GC.
924 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
927 /* Estimate a size for the allocation area based on the
928 * information available. We might end up going slightly under
929 * or over the suggested heap size, but we should be pretty
932 * Formula: suggested - needed
933 * ----------------------------
934 * 1 + g0s0_pcnt_kept/100
936 * where 'needed' is the amount of memory needed at the next
937 * collection for collecting all steps except g0s0.
940 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
941 (100 + (long)g0s0_pcnt_kept);
943 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
944 blocks = RtsFlags.GcFlags.minAllocAreaSize;
947 resizeNurseries((nat)blocks);
950 // we might have added extra large blocks to the nursery, so
951 // resize back to minAllocAreaSize again.
952 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
956 // mark the garbage collected CAFs as dead
957 #if 0 && defined(DEBUG) // doesn't work at the moment
958 if (major_gc) { gcCAFs(); }
962 // resetStaticObjectForRetainerProfiling() must be called before
964 resetStaticObjectForRetainerProfiling();
967 // zero the scavenged static object list
969 zero_static_object_list(scavenged_static_objects);
975 // start any pending finalizers
977 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
980 // send exceptions to any threads which were about to die
982 resurrectThreads(resurrected_threads);
985 // Update the stable pointer hash table.
986 updateStablePtrTable(major_gc);
988 // check sanity after GC
989 IF_DEBUG(sanity, checkSanity());
991 // extra GC trace info
992 IF_DEBUG(gc, statDescribeGens());
995 // symbol-table based profiling
996 /* heapCensus(to_blocks); */ /* ToDo */
999 // restore enclosing cost centre
1005 // check for memory leaks if DEBUG is on
1009 #ifdef RTS_GTK_FRONTPANEL
1010 if (RtsFlags.GcFlags.frontpanel) {
1011 updateFrontPanelAfterGC( N, live );
1015 // ok, GC over: tell the stats department what happened.
1016 stat_endGC(allocated, live, copied, scavd_copied, N);
1018 #if defined(RTS_USER_SIGNALS)
1019 if (RtsFlags.MiscFlags.install_signal_handlers) {
1020 // unblock signals again
1021 unblockUserSignals();
1028 /* -----------------------------------------------------------------------------
1029 isAlive determines whether the given closure is still alive (after
1030 a garbage collection) or not. It returns the new address of the
1031 closure if it is alive, or NULL otherwise.
1033 NOTE: Use it before compaction only!
1034 It untags and (if needed) retags pointers to closures.
1035 -------------------------------------------------------------------------- */
1039 isAlive(StgClosure *p)
1041 const StgInfoTable *info;
1046 /* The tag and the pointer are split, to be merged later when needed. */
1047 tag = GET_CLOSURE_TAG(p);
1048 p = UNTAG_CLOSURE(p);
1050 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1053 // ignore static closures
1055 // ToDo: for static closures, check the static link field.
1056 // Problem here is that we sometimes don't set the link field, eg.
1057 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1059 if (!HEAP_ALLOCED(p)) {
1060 return TAG_CLOSURE(tag,p);
1063 // ignore closures in generations that we're not collecting.
1065 if (bd->gen_no > N) {
1066 return TAG_CLOSURE(tag,p);
1069 // if it's a pointer into to-space, then we're done
1070 if (bd->flags & BF_EVACUATED) {
1071 return TAG_CLOSURE(tag,p);
1074 // large objects use the evacuated flag
1075 if (bd->flags & BF_LARGE) {
1079 // check the mark bit for compacted steps
1080 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1081 return TAG_CLOSURE(tag,p);
1084 switch (info->type) {
1089 case IND_OLDGEN: // rely on compatible layout with StgInd
1090 case IND_OLDGEN_PERM:
1091 // follow indirections
1092 p = ((StgInd *)p)->indirectee;
1097 return ((StgEvacuated *)p)->evacuee;
1100 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1101 p = (StgClosure *)((StgTSO *)p)->link;
1114 mark_root(StgClosure **root)
1116 *root = evacuate(*root);
1119 /* -----------------------------------------------------------------------------
1120 Initialising the static object & mutable lists
1121 -------------------------------------------------------------------------- */
1124 zero_static_object_list(StgClosure* first_static)
1128 const StgInfoTable *info;
1130 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1132 link = *STATIC_LINK(info, p);
1133 *STATIC_LINK(info,p) = NULL;
1137 /* -----------------------------------------------------------------------------
1139 -------------------------------------------------------------------------- */
1146 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1147 c = (StgIndStatic *)c->static_link)
1149 SET_INFO(c, c->saved_info);
1150 c->saved_info = NULL;
1151 // could, but not necessary: c->static_link = NULL;
1153 revertible_caf_list = NULL;
1157 markCAFs( evac_fn evac )
1161 for (c = (StgIndStatic *)caf_list; c != NULL;
1162 c = (StgIndStatic *)c->static_link)
1164 evac(&c->indirectee);
1166 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1167 c = (StgIndStatic *)c->static_link)
1169 evac(&c->indirectee);
1173 /* -----------------------------------------------------------------------------
1174 Sanity code for CAF garbage collection.
1176 With DEBUG turned on, we manage a CAF list in addition to the SRT
1177 mechanism. After GC, we run down the CAF list and blackhole any
1178 CAFs which have been garbage collected. This means we get an error
1179 whenever the program tries to enter a garbage collected CAF.
1181 Any garbage collected CAFs are taken off the CAF list at the same
1183 -------------------------------------------------------------------------- */
1185 #if 0 && defined(DEBUG)
1192 const StgInfoTable *info;
1203 ASSERT(info->type == IND_STATIC);
1205 if (STATIC_LINK(info,p) == NULL) {
1206 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1208 SET_INFO(p,&stg_BLACKHOLE_info);
1209 p = STATIC_LINK2(info,p);
1213 pp = &STATIC_LINK2(info,p);
1220 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
1224 /* -----------------------------------------------------------------------------
1226 * -------------------------------------------------------------------------- */
1230 printMutableList(generation *gen)
1235 debugBelch("mutable list %p: ", gen->mut_list);
1237 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
1238 for (p = bd->start; p < bd->free; p++) {
1239 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));