X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=d978fced12faa73d7dbc425eeb6b839c3efce11e;hb=0e5aaaaa0183894e411c3e3b7506eeb762bed31e;hp=6cf7e2a17693577ac9dc066998bf6836c98dd868;hpb=030787e51b95d3320d2b9032c119c32f7549a31a;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 6cf7e2a..d978fce 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.105 2001/07/24 05:04:58 ken Exp $ + * $Id: GC.c,v 1.122 2001/08/30 10:22:52 simonmar Exp $ * * (c) The GHC Team 1998-1999 * @@ -7,6 +7,7 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" #include "RtsFlags.h" #include "RtsUtils.h" @@ -131,12 +132,12 @@ static void zero_static_object_list ( StgClosure* first_static ); static void zero_mutable_list ( StgMutClosure *first ); static rtsBool traverse_weak_ptr_list ( void ); -static void cleanup_weak_ptr_list ( StgWeak **list ); +static void mark_weak_ptr_list ( StgWeak **list ); static void scavenge ( step * ); static void scavenge_mark_stack ( void ); static void scavenge_stack ( StgPtr p, StgPtr stack_end ); -static rtsBool scavenge_one ( StgClosure *p ); +static rtsBool scavenge_one ( StgPtr p ); static void scavenge_large ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); @@ -158,6 +159,12 @@ static StgPtr *mark_stack; static StgPtr *mark_sp; static StgPtr *mark_splim; +// Flag and pointers used for falling back to a linear scan when the +// mark stack overflows. +static rtsBool mark_stack_overflowed; +static bdescr *oldgen_scan_bd; +static StgPtr oldgen_scan; + static inline rtsBool mark_stack_empty(void) { @@ -171,6 +178,12 @@ mark_stack_full(void) } static inline void +reset_mark_stack(void) +{ + mark_sp = mark_stack; +} + +static inline void push_mark_stack(StgPtr p) { *mark_sp++ = p; @@ -211,6 +224,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) bdescr *bd; step *stp; lnat live, allocated, collected = 0, copied = 0; + lnat oldgen_saved_blocks = 0; nat g, s; #ifdef PROFILING @@ -247,7 +261,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } else { N = 0; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) { + if (generations[g].steps[0].n_blocks + + generations[g].steps[0].n_large_blocks + >= generations[g].max_blocks) { N = g; } } @@ -324,6 +340,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->scan_bd = bd; stp->new_large_objects = NULL; stp->scavenged_large_objects = NULL; + stp->n_scavenged_large_blocks = 0; new_blocks++; // mark the large objects as not evacuated yet for (bd = stp->large_objects; bd; bd = bd->link) { @@ -344,7 +361,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->bitmap = bitmap_bdescr; bitmap = bitmap_bdescr->start; - IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n", + IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p", bitmap_size, bitmap);); // don't forget to fill it with zeros! @@ -389,6 +406,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->n_to_blocks = 0; stp->new_large_objects = NULL; stp->scavenged_large_objects = NULL; + stp->n_scavenged_large_blocks = 0; } } @@ -469,6 +487,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) /* Mark the weak pointer list, and prepare to detect dead weak * pointers. */ + mark_weak_ptr_list(&weak_ptr_list); old_weak_ptr_list = weak_ptr_list; weak_ptr_list = NULL; weak_done = rtsFalse; @@ -510,12 +529,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) scavenge_static(); } - // scavenge objects in compacted generation - if (mark_stack_bdescr != NULL && !mark_stack_empty()) { - scavenge_mark_stack(); - flag = rtsTrue; - } - /* When scavenging the older generations: Objects may have been * evacuated from generations <= N into older generations, and we * need to scavenge these objects. We're going to try to ensure that @@ -527,10 +540,19 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // scavenge each step in generations 0..maxgen { - int gen, st; + long gen; + int st; + loop2: - for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) { - for (st = generations[gen].n_steps-1; st >= 0 ; st--) { + // scavenge objects in compacted generation + if (mark_stack_overflowed || oldgen_scan_bd != NULL || + (mark_stack_bdescr != NULL && !mark_stack_empty())) { + scavenge_mark_stack(); + flag = rtsTrue; + } + + for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) { + for (st = generations[gen].n_steps; --st >= 0; ) { if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) { continue; } @@ -558,11 +580,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } - /* Final traversal of the weak pointer list (see comment by - * cleanUpWeakPtrList below). - */ - cleanup_weak_ptr_list(&weak_ptr_list); - #if defined(PAR) // Reconstruct the Global Address tables used in GUM rebuildGAtables(major_gc); @@ -585,7 +602,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // NO MORE EVACUATION AFTER THIS POINT! // Finally: compaction of the oldest generation. - if (major_gc && RtsFlags.GcFlags.compact) { + if (major_gc && oldest_gen->steps[0].is_compacted) { + // save number of blocks for stats + oldgen_saved_blocks = oldest_gen->steps[0].n_blocks; compact(get_roots); } @@ -615,7 +634,12 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // for generations we collected... if (g <= N) { - collected += stp->n_blocks * BLOCK_SIZE_W; // for stats + // rough calculation of garbage collected, for stats output + if (stp->is_compacted) { + collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W; + } else { + collected += stp->n_blocks * BLOCK_SIZE_W; + } /* free old memory and shift to-space into from-space for all * the collected steps (except the allocation area). These @@ -663,29 +687,16 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) freeGroup(bd); bd = next; } + + // update the count of blocks used by large objects for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) { bd->flags &= ~BF_EVACUATED; } - stp->large_objects = stp->scavenged_large_objects; - - /* Set the maximum blocks for this generation, interpolating - * between the maximum size of the oldest and youngest - * generations. - * - * max_blocks = oldgen_max_blocks * G - * ---------------------- - * oldest_gen - */ - if (g != 0) { -#if 0 - generations[g].max_blocks = (oldest_gen->max_blocks * g) - / (RtsFlags.GcFlags.generations-1); -#endif - generations[g].max_blocks = oldest_gen->max_blocks; - } + stp->large_objects = stp->scavenged_large_objects; + stp->n_large_blocks = stp->n_scavenged_large_blocks; - // for older generations... } else { + // for older generations... /* For older generations, we need to append the * scavenged_large_object list (i.e. large objects that have been @@ -699,34 +710,80 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // add the new blocks we promoted during this GC stp->n_blocks += stp->n_to_blocks; + stp->n_large_blocks += stp->n_scavenged_large_blocks; } } } - - /* Set the maximum blocks for the oldest generation, based on twice - * the amount of live data now, adjusted to fit the maximum heap - * size if necessary. + + /* Reset the sizes of the older generations when we do a major + * collection. * - * This is an approximation, since in the worst case we'll need - * twice the amount of live data plus whatever space the other - * generations need. + * CURRENT STRATEGY: make all generations except zero the same size. + * We have to stay within the maximum heap size, and leave a certain + * percentage of the maximum heap size available to allocate into. */ if (major_gc && RtsFlags.GcFlags.generations > 1) { - oldest_gen->max_blocks = - stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor, - RtsFlags.GcFlags.minOldGenSize); - if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) { - oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2; - if (((int)oldest_gen->max_blocks - - (int)oldest_gen->steps[0].n_blocks) < - (RtsFlags.GcFlags.pcFreeHeap * - RtsFlags.GcFlags.maxHeapSize / 200)) { - heapOverflow(); - } + nat live, size, min_alloc; + nat max = RtsFlags.GcFlags.maxHeapSize; + nat gens = RtsFlags.GcFlags.generations; + + // live in the oldest generations + live = oldest_gen->steps[0].n_blocks + + oldest_gen->steps[0].n_large_blocks; + + // default max size for all generations except zero + size = stg_max(live * RtsFlags.GcFlags.oldGenFactor, + RtsFlags.GcFlags.minOldGenSize); + + // minimum size for generation zero + min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200, + RtsFlags.GcFlags.minAllocAreaSize); + + // Auto-enable compaction when the residency reaches a + // certain percentage of the maximum heap size (default: 30%). + if (RtsFlags.GcFlags.generations > 1 && + (RtsFlags.GcFlags.compact || + (max > 0 && + oldest_gen->steps[0].n_blocks > + (RtsFlags.GcFlags.compactThreshold * max) / 100))) { + oldest_gen->steps[0].is_compacted = 1; +// fprintf(stderr,"compaction: on\n", live); + } else { + oldest_gen->steps[0].is_compacted = 0; +// fprintf(stderr,"compaction: off\n", live); + } + + // if we're going to go over the maximum heap size, reduce the + // size of the generations accordingly. The calculation is + // different if compaction is turned on, because we don't need + // to double the space required to collect the old generation. + if (max != 0) { + if (oldest_gen->steps[0].is_compacted) { + if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) { + size = (max - min_alloc) / ((gens - 1) * 2 - 1); + } + } else { + if ( (size * (gens - 1) * 2) + min_alloc > max ) { + size = (max - min_alloc) / ((gens - 1) * 2); + } + } + + if (size < live) { + heapOverflow(); + } + } + +#if 0 + fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live, + min_alloc, size, max); +#endif + + for (g = 0; g < gens; g++) { + generations[g].max_blocks = size; } } - // Guess the amount of live data for stats. + // Guess the amount of live data for stats. live = calcLive(); /* Free the small objects allocated via allocate(), since this will @@ -741,6 +798,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) alloc_HpLim = NULL; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + // Start a new pinned_object_block + pinned_object_block = NULL; + /* Free the mark stack. */ if (mark_stack_bdescr != NULL) { @@ -774,9 +834,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) /* For a two-space collector, we need to resize the nursery. */ /* set up a new nursery. Allocate a nursery size based on a - * function of the amount of live data (currently a factor of 2, - * should be configurable (ToDo)). Use the blocks from the old - * nursery if possible, freeing up any left over blocks. + * function of the amount of live data (by default a factor of 2) + * Use the blocks from the old nursery if possible, freeing up any + * left over blocks. * * If we get near the maximum heap size, then adjust our nursery * size accordingly. If the nursery is the same size as the live @@ -785,17 +845,18 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) * * A normal 2-space collector would need 4L bytes to give the same * performance we get from 3L bytes, reducing to the same - * performance at 2L bytes. + * performance at 2L bytes. */ blocks = g0s0->n_to_blocks; - if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > - RtsFlags.GcFlags.maxHeapSize ) { - int adjusted_blocks; // signed on purpose + if ( RtsFlags.GcFlags.maxHeapSize != 0 && + blocks * RtsFlags.GcFlags.oldGenFactor * 2 > + RtsFlags.GcFlags.maxHeapSize ) { + long adjusted_blocks; // signed on purpose int pc_free; adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks); - IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); + IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize; if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ { heapOverflow(); @@ -817,11 +878,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) */ if (RtsFlags.GcFlags.heapSizeSuggestion) { - int blocks; + long blocks; nat needed = calcNeeded(); // approx blocks needed at next GC /* Guess how much will be live in generation 0 step 0 next time. - * A good approximation is the obtained by finding the + * A good approximation is obtained by finding the * percentage of g0s0 that was live at the last minor GC. */ if (N == 0) { @@ -841,10 +902,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) * collection for collecting all steps except g0s0. */ blocks = - (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) / - (100 + (int)g0s0_pcnt_kept); + (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) / + (100 + (long)g0s0_pcnt_kept); - if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) { + if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) { blocks = RtsFlags.GcFlags.minAllocAreaSize; } @@ -862,8 +923,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) zero_static_object_list(scavenged_static_objects); } - /* Reset the nursery - */ + // Reset the nursery resetNurseries(); // start any pending finalizers @@ -945,14 +1005,6 @@ traverse_weak_ptr_list(void) last_w = &old_weak_ptr_list; for (w = old_weak_ptr_list; w != NULL; w = next_w) { - /* First, this weak pointer might have been evacuated. If so, - * remove the forwarding pointer from the weak_ptr_list. - */ - if (get_itbl(w)->type == EVACUATED) { - w = (StgWeak *)((StgEvacuated *)w)->evacuee; - *last_w = w; - } - /* There might be a DEAD_WEAK on the list if finalizeWeak# was * called on a live weak pointer object. Just remove it. */ @@ -966,7 +1018,8 @@ traverse_weak_ptr_list(void) /* Now, check whether the key is reachable. */ - if ((new = isAlive(w->key))) { + new = isAlive(w->key); + if (new != NULL) { w->key = new; // evacuate the value and finalizer w->value = evacuate(w->value); @@ -978,7 +1031,7 @@ traverse_weak_ptr_list(void) w->link = weak_ptr_list; weak_ptr_list = w; flag = rtsTrue; - IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key)); + IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key)); continue; } else { @@ -1027,7 +1080,6 @@ traverse_weak_ptr_list(void) // not alive (yet): leave this thread on the old_all_threads list. prev = &(t->global_link); next = t->global_link; - continue; } else { // alive: move this thread onto the all_threads list. @@ -1035,7 +1087,6 @@ traverse_weak_ptr_list(void) t->global_link = all_threads; all_threads = t; *prev = next; - break; } } } @@ -1045,7 +1096,6 @@ traverse_weak_ptr_list(void) * of pending finalizers later on. */ if (flag == rtsFalse) { - cleanup_weak_ptr_list(&old_weak_ptr_list); for (w = old_weak_ptr_list; w; w = w->link) { w->finalizer = evacuate(w->finalizer); } @@ -1082,23 +1132,15 @@ traverse_weak_ptr_list(void) static void -cleanup_weak_ptr_list ( StgWeak **list ) +mark_weak_ptr_list ( StgWeak **list ) { StgWeak *w, **last_w; last_w = list; for (w = *list; w; w = w->link) { - - if (get_itbl(w)->type == EVACUATED) { - w = (StgWeak *)((StgEvacuated *)w)->evacuee; - *last_w = w; - } - - if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) { (StgClosure *)w = evacuate((StgClosure *)w); *last_w = w; - } - last_w = &(w->link); + last_w = &(w->link); } } @@ -1133,8 +1175,12 @@ isAlive(StgClosure *p) return p; } // large objects have an evacuated flag - if ((bd->flags & BF_LARGE) && (bd->flags & BF_EVACUATED)) { - return p; + if (bd->flags & BF_LARGE) { + if (bd->flags & BF_EVACUATED) { + return p; + } else { + return NULL; + } } // check the mark bit for compacted steps if (bd->step->is_compacted && is_marked((P_)p,bd)) { @@ -1295,9 +1341,10 @@ evacuate_large(StgPtr p) bdescr *bd = Bdescr(p); step *stp; - // should point to the beginning of the block - ASSERT(((W_)p & BLOCK_MASK) == 0); - + // object must be at the beginning of the block (or be a ByteArray) + ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS || + (((W_)p & BLOCK_MASK) == 0)); + // already evacuated? if (bd->flags & BF_EVACUATED) { /* Don't forget to set the failed_to_evac flag if we didn't get @@ -1444,7 +1491,8 @@ loop: if (!is_marked((P_)q,bd)) { mark((P_)q,bd); if (mark_stack_full()) { - barf("ToDo: mark stack full"); + mark_stack_overflowed = rtsTrue; + reset_mark_stack(); } push_mark_stack((P_)q); } @@ -1548,7 +1596,7 @@ loop: case CONSTR_0_2: case CONSTR_STATIC: { - StgWord32 offset = info->layout.selector_offset; + StgWord offset = info->layout.selector_offset; // check that the size is in range ASSERT(offset < @@ -1749,7 +1797,6 @@ loop: if (evac_gen > 0) { // optimisation StgClosure *p = ((StgEvacuated*)q)->evacuee; if (Bdescr((P_)p)->gen_no < evac_gen) { - IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p)); failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } @@ -1842,7 +1889,7 @@ loop: void move_TSO(StgTSO *src, StgTSO *dest) { - int diff; + ptrdiff_t diff; // relocate the stack pointers... diff = (StgPtr)dest - (StgPtr)src; // In *words* @@ -1859,7 +1906,7 @@ move_TSO(StgTSO *src, StgTSO *dest) -------------------------------------------------------------------------- */ StgTSO * -relocate_stack(StgTSO *dest, int diff) +relocate_stack(StgTSO *dest, ptrdiff_t diff) { StgUpdateFrame *su; StgCatchFrame *cf; @@ -2310,19 +2357,21 @@ scavenge(step *stp) static void scavenge_mark_stack(void) { - StgPtr p; + StgPtr p, q; StgInfoTable *info; nat saved_evac_gen; evac_gen = oldest_gen->no; saved_evac_gen = evac_gen; +linear_scan: while (!mark_stack_empty()) { p = pop_mark_stack(); info = get_itbl((StgClosure *)p); ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info))); + q = p; switch (info->type) { case MVAR: @@ -2547,7 +2596,7 @@ scavenge_mark_stack(void) p, info_type((StgClosure *)p))); break; } -#endif +#endif // PAR default: barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", @@ -2556,11 +2605,52 @@ scavenge_mark_stack(void) if (failed_to_evac) { failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)p, &generations[evac_gen]); + mkMutCons((StgClosure *)q, &generations[evac_gen]); } + + // mark the next bit to indicate "scavenged" + mark(q+1, Bdescr(q)); } // while (!mark_stack_empty()) -} + + // start a new linear scan if the mark stack overflowed at some point + if (mark_stack_overflowed && oldgen_scan_bd == NULL) { + IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan")); + mark_stack_overflowed = rtsFalse; + oldgen_scan_bd = oldest_gen->steps[0].blocks; + oldgen_scan = oldgen_scan_bd->start; + } + + if (oldgen_scan_bd) { + // push a new thing on the mark stack + loop: + // find a closure that is marked but not scavenged, and start + // from there. + while (oldgen_scan < oldgen_scan_bd->free + && !is_marked(oldgen_scan,oldgen_scan_bd)) { + oldgen_scan++; + } + + if (oldgen_scan < oldgen_scan_bd->free) { + + // already scavenged? + if (is_marked(oldgen_scan+1,oldgen_scan_bd)) { + oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE; + goto loop; + } + push_mark_stack(oldgen_scan); + // ToDo: bump the linear scan by the actual size of the object + oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE; + goto linear_scan; + } + + oldgen_scan_bd = oldgen_scan_bd->link; + if (oldgen_scan_bd != NULL) { + oldgen_scan = oldgen_scan_bd->start; + goto loop; + } + } +} /* ----------------------------------------------------------------------------- Scavenge one object. @@ -2571,104 +2661,131 @@ scavenge_mark_stack(void) -------------------------------------------------------------------------- */ static rtsBool -scavenge_one(StgClosure *p) +scavenge_one(StgPtr p) { - const StgInfoTable *info; - rtsBool no_luck; - - ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) - || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); - - info = get_itbl(p); - - switch (info -> type) { - - case FUN: - case FUN_1_0: // hardly worth specialising these guys - case FUN_0_1: - case FUN_1_1: - case FUN_0_2: - case FUN_2_0: - case THUNK: - case THUNK_1_0: - case THUNK_0_1: - case THUNK_1_1: - case THUNK_0_2: - case THUNK_2_0: - case CONSTR: - case CONSTR_1_0: - case CONSTR_0_1: - case CONSTR_1_1: - case CONSTR_0_2: - case CONSTR_2_0: - case WEAK: - case FOREIGN: - case IND_PERM: - case IND_OLDGEN_PERM: + const StgInfoTable *info; + nat saved_evac_gen = evac_gen; + rtsBool no_luck; + + ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p)) + || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p)))); + + info = get_itbl((StgClosure *)p); + + switch (info->type) { + + case FUN: + case FUN_1_0: // hardly worth specialising these guys + case FUN_0_1: + case FUN_1_1: + case FUN_0_2: + case FUN_2_0: + case THUNK: + case THUNK_1_0: + case THUNK_0_1: + case THUNK_1_1: + case THUNK_0_2: + case THUNK_2_0: + case CONSTR: + case CONSTR_1_0: + case CONSTR_0_1: + case CONSTR_1_1: + case CONSTR_0_2: + case CONSTR_2_0: + case WEAK: + case FOREIGN: + case IND_PERM: + case IND_OLDGEN_PERM: { - StgPtr q, end; - - end = (P_)p->payload + info->layout.payload.ptrs; - for (q = (P_)p->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); - } - break; + StgPtr q, end; + + end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) { + (StgClosure *)*q = evacuate((StgClosure *)*q); + } + break; } - - case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: - case BLACKHOLE: - break; - - case THUNK_SELECTOR: + + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + break; + + case THUNK_SELECTOR: { - StgSelector *s = (StgSelector *)p; - s->selectee = evacuate(s->selectee); - break; + StgSelector *s = (StgSelector *)p; + s->selectee = evacuate(s->selectee); + break; } - case AP_UPD: /* same as PAPs */ - case PAP: - /* Treat a PAP just like a section of stack, not forgetting to - * evacuate the function pointer too... - */ - { - StgPAP* pap = (StgPAP *)p; + case ARR_WORDS: + // nothing to follow + break; - pap->fun = evacuate(pap->fun); - scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); - break; + case MUT_ARR_PTRS: + { + // follow everything + StgPtr next; + + evac_gen = 0; // repeatedly mutable + recordMutable((StgMutClosure *)p); + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; } - case IND_OLDGEN: - /* This might happen if for instance a MUT_CONS was pointing to a - * THUNK which has since been updated. The IND_OLDGEN will - * be on the mutable list anyway, so we don't need to do anything - * here. - */ - break; + case MUT_ARR_PTRS_FROZEN: + { + // follow everything + StgPtr next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + break; + } - case MUT_ARR_PTRS_FROZEN: - { - // follow everything - StgPtr q, next; + case TSO: + { + StgTSO *tso = (StgTSO *)p; + + evac_gen = 0; // repeatedly mutable + scavengeTSO(tso); + recordMutable((StgMutClosure *)tso); + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; + } + + case AP_UPD: + case PAP: + { + StgPAP* pap = (StgPAP *)p; + pap->fun = evacuate(pap->fun); + scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + break; + } - q = (StgPtr)p; - next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); - } - break; - } + case IND_OLDGEN: + // This might happen if for instance a MUT_CONS was pointing to a + // THUNK which has since been updated. The IND_OLDGEN will + // be on the mutable list anyway, so we don't need to do anything + // here. + break; - default: - barf("scavenge_one: strange object %d", (int)(info->type)); - } + default: + barf("scavenge_one: strange object %d", (int)(info->type)); + } - no_luck = failed_to_evac; - failed_to_evac = rtsFalse; - return (no_luck); + no_luck = failed_to_evac; + failed_to_evac = rtsFalse; + return (no_luck); } /* ----------------------------------------------------------------------------- @@ -2736,7 +2853,7 @@ scavenge_mut_once_list(generation *gen) } else { size = gen->steps[0].scan - start; } - fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_)); + belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } #endif @@ -2760,23 +2877,21 @@ scavenge_mut_once_list(generation *gen) p->mut_link = NULL; } continue; - + case MUT_CONS: /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove * it from the mutable list if possible by promoting whatever it * points to. */ - scavenge_one((StgClosure *)((StgMutVar *)p)->var); - if (failed_to_evac == rtsTrue) { + if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) { /* didn't manage to promote everything, so put the * MUT_CONS back on the list. */ - failed_to_evac = rtsFalse; p->mut_link = new_list; new_list = p; } continue; - + default: // shouldn't have anything else on the mutables list barf("scavenge_mut_once_list: strange object? %d", (int)(info->type)); @@ -2826,12 +2941,31 @@ scavenge_mutable_list(generation *gen) continue; } + // Happens if a MUT_ARR_PTRS in the old generation is frozen + case MUT_ARR_PTRS_FROZEN: + { + StgPtr end, q; + + evac_gen = gen->no; + end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { + (StgClosure *)*q = evacuate((StgClosure *)*q); + } + evac_gen = 0; + p->mut_link = NULL; + if (failed_to_evac) { + failed_to_evac = rtsFalse; + mkMutCons((StgClosure *)p, gen); + } + continue; + } + case MUT_VAR: ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); p->mut_link = gen->mut_list; gen->mut_list = p; continue; - + case MVAR: { StgMVar *mvar = (StgMVar *)p; @@ -3000,7 +3134,7 @@ scavenge_static(void) */ if (failed_to_evac) { failed_to_evac = rtsFalse; - scavenged_static_objects = STATIC_LINK(info,p); + scavenged_static_objects = IND_STATIC_LINK(p); ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list; oldest_gen->mut_once_list = (StgMutClosure *)ind; } @@ -3248,9 +3382,7 @@ static void scavenge_large(step *stp) { bdescr *bd; - StgPtr p, q; - const StgInfoTable* info; - nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen + StgPtr p; bd = stp->new_large_objects; @@ -3264,73 +3396,12 @@ scavenge_large(step *stp) stp->new_large_objects = bd->link; dbl_link_onto(bd, &stp->scavenged_large_objects); - p = bd->start; - info = get_itbl((StgClosure *)p); - - // only certain objects can be "large"... - q = p; - switch (info->type) { - - case ARR_WORDS: - // nothing to follow - break; - - case MUT_ARR_PTRS: - { - // follow everything - StgPtr next; - - evac_gen = 0; // repeatedly mutable - recordMutable((StgMutClosure *)p); - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; - break; - } - - case MUT_ARR_PTRS_FROZEN: - { - // follow everything - StgPtr next; - - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - break; - } - - case TSO: - { - StgTSO *tso = (StgTSO *)p; + // update the block count in this step. + stp->n_scavenged_large_blocks += bd->blocks; - evac_gen = 0; // repeatedly mutable - scavengeTSO(tso); - recordMutable((StgMutClosure *)tso); - evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; - break; - } - - case AP_UPD: - case PAP: - { - StgPAP* pap = (StgPAP *)p; - pap->fun = evacuate(pap->fun); - scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); - break; - } - - default: - barf("scavenge_large: unknown/strange object %d", (int)(info->type)); - } - - if (failed_to_evac) { - failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)q, &generations[evac_gen]); + p = bd->start; + if (scavenge_one(p)) { + mkMutCons((StgClosure *)p, stp->gen); } } } @@ -3438,7 +3509,7 @@ gcCAFs(void) ASSERT(info->type == IND_STATIC); if (STATIC_LINK(info,p) == NULL) { - IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p)); + IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p)); // black hole it SET_INFO(p,&stg_BLACKHOLE_info); p = STATIC_LINK2(info,p); @@ -3452,7 +3523,7 @@ gcCAFs(void) } - // fprintf(stderr, "%d CAFs live\n", i); + // belch("%d CAFs live", i); } #endif @@ -3499,7 +3570,7 @@ threadLazyBlackHole(StgTSO *tso) if (bh->header.info != &stg_BLACKHOLE_BQ_info && bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); + belch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif SET_INFO(bh,&stg_BLACKHOLE_info); } @@ -3648,7 +3719,7 @@ threadSqueezeStack(StgTSO *tso) StgClosure *updatee_bypass = frame->updatee; #if DEBUG - IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame)); + IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame)); squeezes++; #endif @@ -3723,7 +3794,7 @@ threadSqueezeStack(StgTSO *tso) bh->header.info != &stg_BLACKHOLE_BQ_info && bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); + belch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif #ifdef DEBUG /* zero out the slop so that the sanity checker can tell @@ -3732,7 +3803,7 @@ threadSqueezeStack(StgTSO *tso) { StgInfoTable *info = get_itbl(bh); nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i; - /* don't zero out slop for a THUNK_SELECTOR, because it's layout + /* don't zero out slop for a THUNK_SELECTOR, because its layout * info is used for a different purpose, and it's exactly the * same size as a BLACKHOLE in any case. */ @@ -3762,10 +3833,10 @@ threadSqueezeStack(StgTSO *tso) else next_frame_bottom = tso->sp - 1; -#if DEBUG +#if 0 IF_DEBUG(gc, - fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom, - displacement)) + belch("sliding [%p, %p] by %ld", sp, next_frame_bottom, + displacement)) #endif while (sp >= next_frame_bottom) { @@ -3779,9 +3850,9 @@ threadSqueezeStack(StgTSO *tso) tso->sp += displacement; tso->su = prev_frame; -#if DEBUG +#if 0 IF_DEBUG(gc, - fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n", + belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames", squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames)) #endif }