1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2003
5 * Generational garbage collector
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
14 #include "OSThreads.h"
17 #include "LdvProfile.h"
22 #include "BlockAlloc.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "RtsSignals.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
37 # include "ParallelDebug.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
46 #include "RetainerProfile.h"
47 #include "RaiseAsync.h"
56 #include <string.h> // for memset()
58 /* STATIC OBJECT LIST.
61 * We maintain a linked list of static objects that are still live.
62 * The requirements for this list are:
64 * - we need to scan the list while adding to it, in order to
65 * scavenge all the static objects (in the same way that
66 * breadth-first scavenging works for dynamic objects).
68 * - we need to be able to tell whether an object is already on
69 * the list, to break loops.
71 * Each static object has a "static link field", which we use for
72 * linking objects on to the list. We use a stack-type list, consing
73 * objects on the front as they are added (this means that the
74 * scavenge phase is depth-first, not breadth-first, but that
77 * A separate list is kept for objects that have been scavenged
78 * already - this is so that we can zero all the marks afterwards.
80 * An object is on the list if its static link field is non-zero; this
81 * means that we have to mark the end of the list with '1', not NULL.
83 * Extra notes for generational GC:
85 * Each generation has a static object list associated with it. When
86 * collecting generations up to N, we treat the static object lists
87 * from generations > N as roots.
89 * We build up a static object list while collecting generations 0..N,
90 * which is then appended to the static object list of generation N+1.
92 StgClosure* static_objects; // live static objects
93 StgClosure* scavenged_static_objects; // static objects scavenged so far
95 /* N is the oldest generation being collected, where the generations
96 * are numbered starting at 0. A major GC (indicated by the major_gc
97 * flag) is when we're collecting all generations. We only attempt to
98 * deal with static objects and GC CAFs when doing a major GC.
103 /* Youngest generation that objects should be evacuated to in
104 * evacuate(). (Logically an argument to evacuate, but it's static
105 * a lot of the time so we optimise it into a global variable).
109 /* Whether to do eager promotion or not.
111 rtsBool eager_promotion;
113 /* Flag indicating failure to evacuate an object to the desired
116 rtsBool failed_to_evac;
118 /* Saved nursery (used for 2-space collector only)
120 static bdescr *saved_nursery;
121 static nat saved_n_blocks;
123 /* Data used for allocation area sizing.
125 lnat new_blocks; // blocks allocated during this GC
126 lnat new_scavd_blocks; // ditto, but depth-first blocks
127 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
136 /* -----------------------------------------------------------------------------
137 Static function declarations
138 -------------------------------------------------------------------------- */
140 static void mark_root ( StgClosure **root );
142 static void zero_static_object_list ( StgClosure* first_static );
144 #if 0 && defined(DEBUG)
145 static void gcCAFs ( void );
148 /* -----------------------------------------------------------------------------
149 inline functions etc. for dealing with the mark bitmap & stack.
150 -------------------------------------------------------------------------- */
152 #define MARK_STACK_BLOCKS 4
154 bdescr *mark_stack_bdescr;
159 // Flag and pointers used for falling back to a linear scan when the
160 // mark stack overflows.
161 rtsBool mark_stack_overflowed;
162 bdescr *oldgen_scan_bd;
165 /* -----------------------------------------------------------------------------
168 Rough outline of the algorithm: for garbage collecting generation N
169 (and all younger generations):
171 - follow all pointers in the root set. the root set includes all
172 mutable objects in all generations (mutable_list).
174 - for each pointer, evacuate the object it points to into either
176 + to-space of the step given by step->to, which is the next
177 highest step in this generation or the first step in the next
178 generation if this is the last step.
180 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
181 When we evacuate an object we attempt to evacuate
182 everything it points to into the same generation - this is
183 achieved by setting evac_gen to the desired generation. If
184 we can't do this, then an entry in the mut list has to
185 be made for the cross-generation pointer.
187 + if the object is already in a generation > N, then leave
190 - repeatedly scavenge to-space from each step in each generation
191 being collected until no more objects can be evacuated.
193 - free from-space in each step, and set from-space = to-space.
195 Locks held: all capabilities are held throughout GarbageCollect().
197 -------------------------------------------------------------------------- */
200 GarbageCollect ( rtsBool force_major_gc )
204 lnat live, allocated, copied = 0, scavd_copied = 0;
205 lnat oldgen_saved_blocks = 0;
211 CostCentreStack *prev_CCS;
214 debugTrace(DEBUG_gc, "starting GC");
216 #if defined(RTS_USER_SIGNALS)
221 // tell the STM to discard any cached closures its hoping to re-use
224 // tell the stats department that we've started a GC
228 // check for memory leaks if DEBUG is on
238 // Init stats and print par specific (timing) info
239 PAR_TICKY_PAR_START();
241 // attribute any costs to CCS_GC
247 /* Approximate how much we allocated.
248 * Todo: only when generating stats?
250 allocated = calcAllocated();
252 /* Figure out which generation to collect
254 if (force_major_gc) {
255 N = RtsFlags.GcFlags.generations - 1;
259 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
260 if (generations[g].steps[0].n_blocks +
261 generations[g].steps[0].n_large_blocks
262 >= generations[g].max_blocks) {
266 major_gc = (N == RtsFlags.GcFlags.generations-1);
269 #ifdef RTS_GTK_FRONTPANEL
270 if (RtsFlags.GcFlags.frontpanel) {
271 updateFrontPanelBeforeGC(N);
275 // check stack sanity *before* GC (ToDo: check all threads)
277 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
279 IF_DEBUG(sanity, checkFreeListSanity());
281 /* Initialise the static object lists
283 static_objects = END_OF_STATIC_LIST;
284 scavenged_static_objects = END_OF_STATIC_LIST;
286 /* Save the nursery if we're doing a two-space collection.
287 * g0s0->blocks will be used for to-space, so we need to get the
288 * nursery out of the way.
290 if (RtsFlags.GcFlags.generations == 1) {
291 saved_nursery = g0s0->blocks;
292 saved_n_blocks = g0s0->n_blocks;
297 /* Keep a count of how many new blocks we allocated during this GC
298 * (used for resizing the allocation area, later).
301 new_scavd_blocks = 0;
303 // Initialise to-space in all the generations/steps that we're
306 for (g = 0; g <= N; g++) {
308 // throw away the mutable list. Invariant: the mutable list
309 // always has at least one block; this means we can avoid a check for
310 // NULL in recordMutable().
312 freeChain(generations[g].mut_list);
313 generations[g].mut_list = allocBlock();
314 for (i = 0; i < n_capabilities; i++) {
315 freeChain(capabilities[i].mut_lists[g]);
316 capabilities[i].mut_lists[g] = allocBlock();
320 for (s = 0; s < generations[g].n_steps; s++) {
322 // generation 0, step 0 doesn't need to-space
323 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
327 stp = &generations[g].steps[s];
328 ASSERT(stp->gen_no == g);
330 // start a new to-space for this step.
331 stp->old_blocks = stp->blocks;
332 stp->n_old_blocks = stp->n_blocks;
334 // allocate the first to-space block; extra blocks will be
335 // chained on as necessary.
337 bd = gc_alloc_block(stp);
340 stp->scan = bd->start;
343 // allocate a block for "already scavenged" objects. This goes
344 // on the front of the stp->blocks list, so it won't be
345 // traversed by the scavenging sweep.
346 gc_alloc_scavd_block(stp);
348 // initialise the large object queues.
349 stp->new_large_objects = NULL;
350 stp->scavenged_large_objects = NULL;
351 stp->n_scavenged_large_blocks = 0;
353 // mark the large objects as not evacuated yet
354 for (bd = stp->large_objects; bd; bd = bd->link) {
355 bd->flags &= ~BF_EVACUATED;
358 // for a compacted step, we need to allocate the bitmap
359 if (stp->is_compacted) {
360 nat bitmap_size; // in bytes
361 bdescr *bitmap_bdescr;
364 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
366 if (bitmap_size > 0) {
367 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
369 stp->bitmap = bitmap_bdescr;
370 bitmap = bitmap_bdescr->start;
372 debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
373 bitmap_size, bitmap);
375 // don't forget to fill it with zeros!
376 memset(bitmap, 0, bitmap_size);
378 // For each block in this step, point to its bitmap from the
380 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
381 bd->u.bitmap = bitmap;
382 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
384 // Also at this point we set the BF_COMPACTED flag
385 // for this block. The invariant is that
386 // BF_COMPACTED is always unset, except during GC
387 // when it is set on those blocks which will be
389 bd->flags |= BF_COMPACTED;
396 /* make sure the older generations have at least one block to
397 * allocate into (this makes things easier for copy(), see below).
399 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
400 for (s = 0; s < generations[g].n_steps; s++) {
401 stp = &generations[g].steps[s];
402 if (stp->hp_bd == NULL) {
403 ASSERT(stp->blocks == NULL);
404 bd = gc_alloc_block(stp);
408 if (stp->scavd_hp == NULL) {
409 gc_alloc_scavd_block(stp);
412 /* Set the scan pointer for older generations: remember we
413 * still have to scavenge objects that have been promoted. */
415 stp->scan_bd = stp->hp_bd;
416 stp->new_large_objects = NULL;
417 stp->scavenged_large_objects = NULL;
418 stp->n_scavenged_large_blocks = 0;
421 /* Move the private mutable lists from each capability onto the
422 * main mutable list for the generation.
424 for (i = 0; i < n_capabilities; i++) {
425 for (bd = capabilities[i].mut_lists[g];
426 bd->link != NULL; bd = bd->link) {
429 bd->link = generations[g].mut_list;
430 generations[g].mut_list = capabilities[i].mut_lists[g];
431 capabilities[i].mut_lists[g] = allocBlock();
435 /* Allocate a mark stack if we're doing a major collection.
438 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
439 mark_stack = (StgPtr *)mark_stack_bdescr->start;
440 mark_sp = mark_stack;
441 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
443 mark_stack_bdescr = NULL;
446 eager_promotion = rtsTrue; // for now
448 /* -----------------------------------------------------------------------
449 * follow all the roots that we know about:
450 * - mutable lists from each generation > N
451 * we want to *scavenge* these roots, not evacuate them: they're not
452 * going to move in this GC.
453 * Also: do them in reverse generation order. This is because we
454 * often want to promote objects that are pointed to by older
455 * generations early, so we don't have to repeatedly copy them.
456 * Doing the generations in reverse order ensures that we don't end
457 * up in the situation where we want to evac an object to gen 3 and
458 * it has already been evaced to gen 2.
462 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
463 generations[g].saved_mut_list = generations[g].mut_list;
464 generations[g].mut_list = allocBlock();
465 // mut_list always has at least one block.
468 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
469 IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
470 scavenge_mutable_list(&generations[g]);
472 for (st = generations[g].n_steps-1; st >= 0; st--) {
473 scavenge(&generations[g].steps[st]);
478 /* follow roots from the CAF list (used by GHCi)
483 /* follow all the roots that the application knows about.
489 /* And don't forget to mark the TSO if we got here direct from
491 /* Not needed in a seq version?
493 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
497 // Mark the entries in the GALA table of the parallel system
498 markLocalGAs(major_gc);
499 // Mark all entries on the list of pending fetches
500 markPendingFetches(major_gc);
503 /* Mark the weak pointer list, and prepare to detect dead weak
509 /* Mark the stable pointer table.
511 markStablePtrTable(mark_root);
513 /* Mark the root pointer table.
515 markRootPtrTable(mark_root);
517 /* -------------------------------------------------------------------------
518 * Repeatedly scavenge all the areas we know about until there's no
519 * more scavenging to be done.
526 // scavenge static objects
527 if (major_gc && static_objects != END_OF_STATIC_LIST) {
528 IF_DEBUG(sanity, checkStaticObjects(static_objects));
532 /* When scavenging the older generations: Objects may have been
533 * evacuated from generations <= N into older generations, and we
534 * need to scavenge these objects. We're going to try to ensure that
535 * any evacuations that occur move the objects into at least the
536 * same generation as the object being scavenged, otherwise we
537 * have to create new entries on the mutable list for the older
541 // scavenge each step in generations 0..maxgen
547 // scavenge objects in compacted generation
548 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
549 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
550 scavenge_mark_stack();
554 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
555 for (st = generations[gen].n_steps; --st >= 0; ) {
556 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
559 stp = &generations[gen].steps[st];
561 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
566 if (stp->new_large_objects != NULL) {
575 // if any blackholes are alive, make the threads that wait on
577 if (traverseBlackholeQueue())
580 if (flag) { goto loop; }
582 // must be last... invariant is that everything is fully
583 // scavenged at this point.
584 if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
589 /* Update the pointers from the task list - these are
590 * treated as weak pointers because we want to allow a main thread
591 * to get a BlockedOnDeadMVar exception in the same way as any other
592 * thread. Note that the threads should all have been retained by
593 * GC by virtue of being on the all_threads list, we're just
594 * updating pointers here.
599 for (task = all_tasks; task != NULL; task = task->all_link) {
600 if (!task->stopped && task->tso) {
601 ASSERT(task->tso->bound == task);
602 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
604 barf("task %p: main thread %d has been GC'd",
618 // Reconstruct the Global Address tables used in GUM
619 rebuildGAtables(major_gc);
620 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
623 // Now see which stable names are still alive.
626 // Tidy the end of the to-space chains
627 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
628 for (s = 0; s < generations[g].n_steps; s++) {
629 stp = &generations[g].steps[s];
630 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
631 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
632 stp->hp_bd->free = stp->hp;
633 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
639 // We call processHeapClosureForDead() on every closure destroyed during
640 // the current garbage collection, so we invoke LdvCensusForDead().
641 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
642 || RtsFlags.ProfFlags.bioSelector != NULL)
646 // NO MORE EVACUATION AFTER THIS POINT!
647 // Finally: compaction of the oldest generation.
648 if (major_gc && oldest_gen->steps[0].is_compacted) {
649 // save number of blocks for stats
650 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
654 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
656 /* run through all the generations/steps and tidy up
658 copied = new_blocks * BLOCK_SIZE_W;
659 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
660 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
663 generations[g].collections++; // for stats
666 // Count the mutable list as bytes "copied" for the purposes of
667 // stats. Every mutable list is copied during every GC.
669 nat mut_list_size = 0;
670 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
671 mut_list_size += bd->free - bd->start;
673 copied += mut_list_size;
676 "mut_list_size: %lu (%d vars, %d arrays, %d others)",
677 (unsigned long)(mut_list_size * sizeof(W_)),
678 mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
681 for (s = 0; s < generations[g].n_steps; s++) {
683 stp = &generations[g].steps[s];
685 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
686 // stats information: how much we copied
688 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
690 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
694 // for generations we collected...
697 /* free old memory and shift to-space into from-space for all
698 * the collected steps (except the allocation area). These
699 * freed blocks will probaby be quickly recycled.
701 if (!(g == 0 && s == 0)) {
702 if (stp->is_compacted) {
703 // for a compacted step, just shift the new to-space
704 // onto the front of the now-compacted existing blocks.
705 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
706 bd->flags &= ~BF_EVACUATED; // now from-space
708 // tack the new blocks on the end of the existing blocks
709 if (stp->old_blocks != NULL) {
710 for (bd = stp->old_blocks; bd != NULL; bd = next) {
711 // NB. this step might not be compacted next
712 // time, so reset the BF_COMPACTED flags.
713 // They are set before GC if we're going to
714 // compact. (search for BF_COMPACTED above).
715 bd->flags &= ~BF_COMPACTED;
718 bd->link = stp->blocks;
721 stp->blocks = stp->old_blocks;
723 // add the new blocks to the block tally
724 stp->n_blocks += stp->n_old_blocks;
725 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
727 freeChain(stp->old_blocks);
728 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
729 bd->flags &= ~BF_EVACUATED; // now from-space
732 stp->old_blocks = NULL;
733 stp->n_old_blocks = 0;
736 /* LARGE OBJECTS. The current live large objects are chained on
737 * scavenged_large, having been moved during garbage
738 * collection from large_objects. Any objects left on
739 * large_objects list are therefore dead, so we free them here.
741 for (bd = stp->large_objects; bd != NULL; bd = next) {
747 // update the count of blocks used by large objects
748 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
749 bd->flags &= ~BF_EVACUATED;
751 stp->large_objects = stp->scavenged_large_objects;
752 stp->n_large_blocks = stp->n_scavenged_large_blocks;
755 // for older generations...
757 /* For older generations, we need to append the
758 * scavenged_large_object list (i.e. large objects that have been
759 * promoted during this GC) to the large_object list for that step.
761 for (bd = stp->scavenged_large_objects; bd; bd = next) {
763 bd->flags &= ~BF_EVACUATED;
764 dbl_link_onto(bd, &stp->large_objects);
767 // add the new blocks we promoted during this GC
768 stp->n_large_blocks += stp->n_scavenged_large_blocks;
773 /* Reset the sizes of the older generations when we do a major
776 * CURRENT STRATEGY: make all generations except zero the same size.
777 * We have to stay within the maximum heap size, and leave a certain
778 * percentage of the maximum heap size available to allocate into.
780 if (major_gc && RtsFlags.GcFlags.generations > 1) {
781 nat live, size, min_alloc;
782 nat max = RtsFlags.GcFlags.maxHeapSize;
783 nat gens = RtsFlags.GcFlags.generations;
785 // live in the oldest generations
786 live = oldest_gen->steps[0].n_blocks +
787 oldest_gen->steps[0].n_large_blocks;
789 // default max size for all generations except zero
790 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
791 RtsFlags.GcFlags.minOldGenSize);
793 // minimum size for generation zero
794 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
795 RtsFlags.GcFlags.minAllocAreaSize);
797 // Auto-enable compaction when the residency reaches a
798 // certain percentage of the maximum heap size (default: 30%).
799 if (RtsFlags.GcFlags.generations > 1 &&
800 (RtsFlags.GcFlags.compact ||
802 oldest_gen->steps[0].n_blocks >
803 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
804 oldest_gen->steps[0].is_compacted = 1;
805 // debugBelch("compaction: on\n", live);
807 oldest_gen->steps[0].is_compacted = 0;
808 // debugBelch("compaction: off\n", live);
811 // if we're going to go over the maximum heap size, reduce the
812 // size of the generations accordingly. The calculation is
813 // different if compaction is turned on, because we don't need
814 // to double the space required to collect the old generation.
817 // this test is necessary to ensure that the calculations
818 // below don't have any negative results - we're working
819 // with unsigned values here.
820 if (max < min_alloc) {
824 if (oldest_gen->steps[0].is_compacted) {
825 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
826 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
829 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
830 size = (max - min_alloc) / ((gens - 1) * 2);
840 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
841 min_alloc, size, max);
844 for (g = 0; g < gens; g++) {
845 generations[g].max_blocks = size;
849 // Guess the amount of live data for stats.
852 /* Free the small objects allocated via allocate(), since this will
853 * all have been copied into G0S1 now.
855 if (small_alloc_list != NULL) {
856 freeChain(small_alloc_list);
858 small_alloc_list = NULL;
862 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
864 // Start a new pinned_object_block
865 pinned_object_block = NULL;
867 /* Free the mark stack.
869 if (mark_stack_bdescr != NULL) {
870 freeGroup(mark_stack_bdescr);
875 for (g = 0; g <= N; g++) {
876 for (s = 0; s < generations[g].n_steps; s++) {
877 stp = &generations[g].steps[s];
878 if (stp->bitmap != NULL) {
879 freeGroup(stp->bitmap);
885 /* Two-space collector:
886 * Free the old to-space, and estimate the amount of live data.
888 if (RtsFlags.GcFlags.generations == 1) {
891 if (g0s0->old_blocks != NULL) {
892 freeChain(g0s0->old_blocks);
894 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
895 bd->flags = 0; // now from-space
897 g0s0->old_blocks = g0s0->blocks;
898 g0s0->n_old_blocks = g0s0->n_blocks;
899 g0s0->blocks = saved_nursery;
900 g0s0->n_blocks = saved_n_blocks;
902 /* For a two-space collector, we need to resize the nursery. */
904 /* set up a new nursery. Allocate a nursery size based on a
905 * function of the amount of live data (by default a factor of 2)
906 * Use the blocks from the old nursery if possible, freeing up any
909 * If we get near the maximum heap size, then adjust our nursery
910 * size accordingly. If the nursery is the same size as the live
911 * data (L), then we need 3L bytes. We can reduce the size of the
912 * nursery to bring the required memory down near 2L bytes.
914 * A normal 2-space collector would need 4L bytes to give the same
915 * performance we get from 3L bytes, reducing to the same
916 * performance at 2L bytes.
918 blocks = g0s0->n_old_blocks;
920 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
921 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
922 RtsFlags.GcFlags.maxHeapSize ) {
923 long adjusted_blocks; // signed on purpose
926 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
928 debugTrace(DEBUG_gc, "near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld",
929 RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks);
931 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
932 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
935 blocks = adjusted_blocks;
938 blocks *= RtsFlags.GcFlags.oldGenFactor;
939 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
940 blocks = RtsFlags.GcFlags.minAllocAreaSize;
943 resizeNurseries(blocks);
946 /* Generational collector:
947 * If the user has given us a suggested heap size, adjust our
948 * allocation area to make best use of the memory available.
951 if (RtsFlags.GcFlags.heapSizeSuggestion) {
953 nat needed = calcNeeded(); // approx blocks needed at next GC
955 /* Guess how much will be live in generation 0 step 0 next time.
956 * A good approximation is obtained by finding the
957 * percentage of g0s0 that was live at the last minor GC.
960 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
963 /* Estimate a size for the allocation area based on the
964 * information available. We might end up going slightly under
965 * or over the suggested heap size, but we should be pretty
968 * Formula: suggested - needed
969 * ----------------------------
970 * 1 + g0s0_pcnt_kept/100
972 * where 'needed' is the amount of memory needed at the next
973 * collection for collecting all steps except g0s0.
976 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
977 (100 + (long)g0s0_pcnt_kept);
979 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
980 blocks = RtsFlags.GcFlags.minAllocAreaSize;
983 resizeNurseries((nat)blocks);
986 // we might have added extra large blocks to the nursery, so
987 // resize back to minAllocAreaSize again.
988 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
992 // mark the garbage collected CAFs as dead
993 #if 0 && defined(DEBUG) // doesn't work at the moment
994 if (major_gc) { gcCAFs(); }
998 // resetStaticObjectForRetainerProfiling() must be called before
1000 resetStaticObjectForRetainerProfiling();
1003 // zero the scavenged static object list
1005 zero_static_object_list(scavenged_static_objects);
1008 // Reset the nursery
1011 // start any pending finalizers
1013 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
1016 // send exceptions to any threads which were about to die
1018 resurrectThreads(resurrected_threads);
1021 // Update the stable pointer hash table.
1022 updateStablePtrTable(major_gc);
1024 // check sanity after GC
1025 IF_DEBUG(sanity, checkSanity());
1027 // extra GC trace info
1028 IF_DEBUG(gc, statDescribeGens());
1031 // symbol-table based profiling
1032 /* heapCensus(to_blocks); */ /* ToDo */
1035 // restore enclosing cost centre
1041 // check for memory leaks if DEBUG is on
1045 #ifdef RTS_GTK_FRONTPANEL
1046 if (RtsFlags.GcFlags.frontpanel) {
1047 updateFrontPanelAfterGC( N, live );
1051 // ok, GC over: tell the stats department what happened.
1052 stat_endGC(allocated, live, copied, scavd_copied, N);
1054 #if defined(RTS_USER_SIGNALS)
1055 // unblock signals again
1056 unblockUserSignals();
1064 /* -----------------------------------------------------------------------------
1065 isAlive determines whether the given closure is still alive (after
1066 a garbage collection) or not. It returns the new address of the
1067 closure if it is alive, or NULL otherwise.
1069 NOTE: Use it before compaction only!
1070 -------------------------------------------------------------------------- */
1074 isAlive(StgClosure *p)
1076 const StgInfoTable *info;
1081 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1084 // ignore static closures
1086 // ToDo: for static closures, check the static link field.
1087 // Problem here is that we sometimes don't set the link field, eg.
1088 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1090 if (!HEAP_ALLOCED(p)) {
1094 // ignore closures in generations that we're not collecting.
1096 if (bd->gen_no > N) {
1100 // if it's a pointer into to-space, then we're done
1101 if (bd->flags & BF_EVACUATED) {
1105 // large objects use the evacuated flag
1106 if (bd->flags & BF_LARGE) {
1110 // check the mark bit for compacted steps
1111 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1115 switch (info->type) {
1120 case IND_OLDGEN: // rely on compatible layout with StgInd
1121 case IND_OLDGEN_PERM:
1122 // follow indirections
1123 p = ((StgInd *)p)->indirectee;
1128 return ((StgEvacuated *)p)->evacuee;
1131 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1132 p = (StgClosure *)((StgTSO *)p)->link;
1145 mark_root(StgClosure **root)
1147 *root = evacuate(*root);
1150 /* -----------------------------------------------------------------------------
1151 Initialising the static object & mutable lists
1152 -------------------------------------------------------------------------- */
1155 zero_static_object_list(StgClosure* first_static)
1159 const StgInfoTable *info;
1161 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1163 link = *STATIC_LINK(info, p);
1164 *STATIC_LINK(info,p) = NULL;
1168 /* -----------------------------------------------------------------------------
1170 -------------------------------------------------------------------------- */
1177 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1178 c = (StgIndStatic *)c->static_link)
1180 SET_INFO(c, c->saved_info);
1181 c->saved_info = NULL;
1182 // could, but not necessary: c->static_link = NULL;
1184 revertible_caf_list = NULL;
1188 markCAFs( evac_fn evac )
1192 for (c = (StgIndStatic *)caf_list; c != NULL;
1193 c = (StgIndStatic *)c->static_link)
1195 evac(&c->indirectee);
1197 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
1198 c = (StgIndStatic *)c->static_link)
1200 evac(&c->indirectee);
1204 /* -----------------------------------------------------------------------------
1205 Sanity code for CAF garbage collection.
1207 With DEBUG turned on, we manage a CAF list in addition to the SRT
1208 mechanism. After GC, we run down the CAF list and blackhole any
1209 CAFs which have been garbage collected. This means we get an error
1210 whenever the program tries to enter a garbage collected CAF.
1212 Any garbage collected CAFs are taken off the CAF list at the same
1214 -------------------------------------------------------------------------- */
1216 #if 0 && defined(DEBUG)
1223 const StgInfoTable *info;
1234 ASSERT(info->type == IND_STATIC);
1236 if (STATIC_LINK(info,p) == NULL) {
1237 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%04lx", (long)p);
1239 SET_INFO(p,&stg_BLACKHOLE_info);
1240 p = STATIC_LINK2(info,p);
1244 pp = &STATIC_LINK2(info,p);
1251 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
1255 /* -----------------------------------------------------------------------------
1257 * -------------------------------------------------------------------------- */
1261 printMutableList(generation *gen)
1266 debugBelch("mutable list %p: ", gen->mut_list);
1268 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
1269 for (p = bd->start; p < bd->free; p++) {
1270 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));