X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=cfa23c11a8c3ac732e6504a9d3ee55b03a1e3129;hb=f8e722a44f00e5d388aad1a613f62361058dda0d;hp=034646daf8e37d6dc3ac1e2d079f55d5750bf7c2;hpb=e16e99739c41bab70c317745e481630ccd8349d5;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 034646d..cfa23c1 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.108 2001/07/25 09:14:21 simonmar Exp $ + * $Id: GC.c,v 1.140 2002/09/06 09:56:12 simonmar Exp $ * * (c) The GHC Team 1998-1999 * @@ -7,6 +7,7 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" #include "RtsFlags.h" #include "RtsUtils.h" @@ -40,7 +41,11 @@ #if defined(RTS_GTK_FRONTPANEL) #include "FrontPanel.h" #endif -#include + +#include "RetainerProfile.h" +#include "LdvProfile.h" + +#include /* STATIC OBJECT LIST. * @@ -76,8 +81,8 @@ * We build up a static object list while collecting generations 0..N, * which is then appended to the static object list of generation N+1. */ -StgClosure* static_objects; // live static objects -StgClosure* scavenged_static_objects; // static objects scavenged so far +static StgClosure* static_objects; // live static objects +StgClosure* scavenged_static_objects; // static objects scavenged so far /* N is the oldest generation being collected, where the generations * are numbered starting at 0. A major GC (indicated by the major_gc @@ -96,12 +101,17 @@ static nat evac_gen; /* Weak pointers */ StgWeak *old_weak_ptr_list; // also pending finaliser list -static rtsBool weak_done; // all done for this pass + +/* Which stage of processing various kinds of weak pointer are we at? + * (see traverse_weak_ptr_list() below for discussion). + */ +typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage; +static WeakStage weak_stage; /* List of all threads during GC */ static StgTSO *old_all_threads; -static StgTSO *resurrected_threads; +StgTSO *resurrected_threads; /* Flag indicating failure to evacuate an object to the desired * generation. @@ -110,16 +120,16 @@ static rtsBool failed_to_evac; /* Old to-space (used for two-space collector only) */ -bdescr *old_to_blocks; +static bdescr *old_to_blocks; /* Data used for allocation area sizing. */ -lnat new_blocks; // blocks allocated during this GC -lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC +static lnat new_blocks; // blocks allocated during this GC +static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC /* Used to avoid long recursion due to selector thunks */ -lnat thunk_selector_depth = 0; +static lnat thunk_selector_depth = 0; #define MAX_THUNK_SELECTOR_DEPTH 256 /* ----------------------------------------------------------------------------- @@ -132,17 +142,18 @@ static void zero_static_object_list ( StgClosure* first_static ); static void zero_mutable_list ( StgMutClosure *first ); static rtsBool traverse_weak_ptr_list ( void ); -static void cleanup_weak_ptr_list ( StgWeak **list ); +static void mark_weak_ptr_list ( StgWeak **list ); + +static StgClosure * eval_thunk_selector ( nat field, StgSelector * p ); static void scavenge ( step * ); static void scavenge_mark_stack ( void ); static void scavenge_stack ( StgPtr p, StgPtr stack_end ); -static rtsBool scavenge_one ( StgClosure *p ); +static rtsBool scavenge_one ( StgPtr p ); static void scavenge_large ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); static void scavenge_mut_once_list ( generation *g ); -static void scavengeCAFs ( void ); #if 0 && defined(DEBUG) static void gcCAFs ( void ); @@ -159,6 +170,12 @@ static StgPtr *mark_stack; static StgPtr *mark_sp; static StgPtr *mark_splim; +// Flag and pointers used for falling back to a linear scan when the +// mark stack overflows. +static rtsBool mark_stack_overflowed; +static bdescr *oldgen_scan_bd; +static StgPtr oldgen_scan; + static inline rtsBool mark_stack_empty(void) { @@ -172,6 +189,12 @@ mark_stack_full(void) } static inline void +reset_mark_stack(void) +{ + mark_sp = mark_stack; +} + +static inline void push_mark_stack(StgPtr p) { *mark_sp++ = p; @@ -204,6 +227,8 @@ pop_mark_stack(void) - free from-space in each step, and set from-space = to-space. + Locks held: sched_mutex + -------------------------------------------------------------------------- */ void @@ -349,7 +374,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->bitmap = bitmap_bdescr; bitmap = bitmap_bdescr->start; - IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n", + IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p", bitmap_size, bitmap);); // don't forget to fill it with zeros! @@ -450,7 +475,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } - scavengeCAFs(); + /* follow roots from the CAF list (used by GHCi) + */ + evac_gen = 0; + markCAFs(mark_root); /* follow all the roots that the application knows about. */ @@ -475,9 +503,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) /* Mark the weak pointer list, and prepare to detect dead weak * pointers. */ + mark_weak_ptr_list(&weak_ptr_list); old_weak_ptr_list = weak_ptr_list; weak_ptr_list = NULL; - weak_done = rtsFalse; + weak_stage = WeakPtrs; /* The all_threads list is like the weak_ptr_list. * See traverse_weak_ptr_list() for the details. @@ -516,12 +545,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) scavenge_static(); } - // scavenge objects in compacted generation - if (mark_stack_bdescr != NULL && !mark_stack_empty()) { - scavenge_mark_stack(); - flag = rtsTrue; - } - /* When scavenging the older generations: Objects may have been * evacuated from generations <= N into older generations, and we * need to scavenge these objects. We're going to try to ensure that @@ -535,7 +558,15 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) { long gen; int st; + loop2: + // scavenge objects in compacted generation + if (mark_stack_overflowed || oldgen_scan_bd != NULL || + (mark_stack_bdescr != NULL && !mark_stack_empty())) { + scavenge_mark_stack(); + flag = rtsTrue; + } + for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) { for (st = generations[gen].n_steps; --st >= 0; ) { if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) { @@ -559,16 +590,31 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (flag) { goto loop; } - // must be last... + // must be last... invariant is that everything is fully + // scavenged at this point. if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something goto loop; } } - /* Final traversal of the weak pointer list (see comment by - * cleanUpWeakPtrList below). + /* Update the pointers from the "main thread" list - these are + * treated as weak pointers because we want to allow a main thread + * to get a BlockedOnDeadMVar exception in the same way as any other + * thread. Note that the threads should all have been retained by + * GC by virtue of being on the all_threads list, we're just + * updating pointers here. */ - cleanup_weak_ptr_list(&weak_ptr_list); + { + StgMainThread *m; + StgTSO *tso; + for (m = main_threads; m != NULL; m = m->link) { + tso = (StgTSO *) isAlive((StgClosure *)m->tso); + if (tso == NULL) { + barf("main thread has been GC'd"); + } + m->tso = tso; + } + } #if defined(PAR) // Reconstruct the Global Address tables used in GUM @@ -590,9 +636,17 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } +#ifdef PROFILING + // We call processHeapClosureForDead() on every closure destroyed during + // the current garbage collection, so we invoke LdvCensusForDead(). + if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV + || RtsFlags.ProfFlags.bioSelector != NULL) + LdvCensusForDead(N); +#endif + // NO MORE EVACUATION AFTER THIS POINT! // Finally: compaction of the oldest generation. - if (major_gc && RtsFlags.GcFlags.compact) { + if (major_gc && oldest_gen->steps[0].is_compacted) { // save number of blocks for stats oldgen_saved_blocks = oldest_gen->steps[0].n_blocks; compact(get_roots); @@ -685,24 +739,8 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->large_objects = stp->scavenged_large_objects; stp->n_large_blocks = stp->n_scavenged_large_blocks; - /* Set the maximum blocks for this generation, interpolating - * between the maximum size of the oldest and youngest - * generations. - * - * max_blocks = oldgen_max_blocks * G - * ---------------------- - * oldest_gen - */ - if (g != 0) { -#if 0 - generations[g].max_blocks = (oldest_gen->max_blocks * g) - / (RtsFlags.GcFlags.generations-1); -#endif - generations[g].max_blocks = oldest_gen->max_blocks; - } - - // for older generations... } else { + // for older generations... /* For older generations, we need to append the * scavenged_large_object list (i.e. large objects that have been @@ -721,30 +759,83 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } - /* Set the maximum blocks for the oldest generation, based on twice - * the amount of live data now, adjusted to fit the maximum heap - * size if necessary. + /* Reset the sizes of the older generations when we do a major + * collection. * - * This is an approximation, since in the worst case we'll need - * twice the amount of live data plus whatever space the other - * generations need. + * CURRENT STRATEGY: make all generations except zero the same size. + * We have to stay within the maximum heap size, and leave a certain + * percentage of the maximum heap size available to allocate into. */ if (major_gc && RtsFlags.GcFlags.generations > 1) { - oldest_gen->max_blocks = - stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor, - RtsFlags.GcFlags.minOldGenSize); - if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) { - oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2; - if (((int)oldest_gen->max_blocks - - (int)oldest_gen->steps[0].n_blocks) < - (RtsFlags.GcFlags.pcFreeHeap * - RtsFlags.GcFlags.maxHeapSize / 200)) { - heapOverflow(); - } + nat live, size, min_alloc; + nat max = RtsFlags.GcFlags.maxHeapSize; + nat gens = RtsFlags.GcFlags.generations; + + // live in the oldest generations + live = oldest_gen->steps[0].n_blocks + + oldest_gen->steps[0].n_large_blocks; + + // default max size for all generations except zero + size = stg_max(live * RtsFlags.GcFlags.oldGenFactor, + RtsFlags.GcFlags.minOldGenSize); + + // minimum size for generation zero + min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200, + RtsFlags.GcFlags.minAllocAreaSize); + + // Auto-enable compaction when the residency reaches a + // certain percentage of the maximum heap size (default: 30%). + if (RtsFlags.GcFlags.generations > 1 && + (RtsFlags.GcFlags.compact || + (max > 0 && + oldest_gen->steps[0].n_blocks > + (RtsFlags.GcFlags.compactThreshold * max) / 100))) { + oldest_gen->steps[0].is_compacted = 1; +// fprintf(stderr,"compaction: on\n", live); + } else { + oldest_gen->steps[0].is_compacted = 0; +// fprintf(stderr,"compaction: off\n", live); + } + + // if we're going to go over the maximum heap size, reduce the + // size of the generations accordingly. The calculation is + // different if compaction is turned on, because we don't need + // to double the space required to collect the old generation. + if (max != 0) { + + // this test is necessary to ensure that the calculations + // below don't have any negative results - we're working + // with unsigned values here. + if (max < min_alloc) { + heapOverflow(); + } + + if (oldest_gen->steps[0].is_compacted) { + if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) { + size = (max - min_alloc) / ((gens - 1) * 2 - 1); + } + } else { + if ( (size * (gens - 1) * 2) + min_alloc > max ) { + size = (max - min_alloc) / ((gens - 1) * 2); + } + } + + if (size < live) { + heapOverflow(); + } + } + +#if 0 + fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live, + min_alloc, size, max); +#endif + + for (g = 0; g < gens; g++) { + generations[g].max_blocks = size; } } - // Guess the amount of live data for stats. + // Guess the amount of live data for stats. live = calcLive(); /* Free the small objects allocated via allocate(), since this will @@ -759,6 +850,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) alloc_HpLim = NULL; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + // Start a new pinned_object_block + pinned_object_block = NULL; + /* Free the mark stack. */ if (mark_stack_bdescr != NULL) { @@ -792,9 +886,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) /* For a two-space collector, we need to resize the nursery. */ /* set up a new nursery. Allocate a nursery size based on a - * function of the amount of live data (currently a factor of 2, - * should be configurable (ToDo)). Use the blocks from the old - * nursery if possible, freeing up any left over blocks. + * function of the amount of live data (by default a factor of 2) + * Use the blocks from the old nursery if possible, freeing up any + * left over blocks. * * If we get near the maximum heap size, then adjust our nursery * size accordingly. If the nursery is the same size as the live @@ -803,17 +897,18 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) * * A normal 2-space collector would need 4L bytes to give the same * performance we get from 3L bytes, reducing to the same - * performance at 2L bytes. + * performance at 2L bytes. */ blocks = g0s0->n_to_blocks; - if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > - RtsFlags.GcFlags.maxHeapSize ) { + if ( RtsFlags.GcFlags.maxHeapSize != 0 && + blocks * RtsFlags.GcFlags.oldGenFactor * 2 > + RtsFlags.GcFlags.maxHeapSize ) { long adjusted_blocks; // signed on purpose int pc_free; adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks); - IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); + IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize; if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ { heapOverflow(); @@ -867,6 +962,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } resizeNursery((nat)blocks); + + } else { + // we might have added extra large blocks to the nursery, so + // resize back to minAllocAreaSize again. + resizeNursery(RtsFlags.GcFlags.minAllocAreaSize); } } @@ -875,20 +975,29 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (major_gc) { gcCAFs(); } #endif +#ifdef PROFILING + // resetStaticObjectForRetainerProfiling() must be called before + // zeroing below. + resetStaticObjectForRetainerProfiling(); +#endif + // zero the scavenged static object list if (major_gc) { zero_static_object_list(scavenged_static_objects); } - /* Reset the nursery - */ + // Reset the nursery resetNurseries(); + RELEASE_LOCK(&sched_mutex); + // start any pending finalizers scheduleFinalizers(old_weak_ptr_list); // send exceptions to any threads which were about to die resurrectThreads(resurrected_threads); + + ACQUIRE_LOCK(&sched_mutex); // Update the stable pointer hash table. updateStablePtrTable(major_gc); @@ -906,7 +1015,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // restore enclosing cost centre #ifdef PROFILING - heapCensus(); CCCS = prev_CCS; #endif @@ -944,6 +1052,30 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) older generations than the one we're collecting. This could probably be optimised by keeping per-generation lists of weak pointers, but for a few weak pointers this scheme will work. + + There are three distinct stages to processing weak pointers: + + - weak_stage == WeakPtrs + + We process all the weak pointers whos keys are alive (evacuate + their values and finalizers), and repeat until we can find no new + live keys. If no live keys are found in this pass, then we + evacuate the finalizers of all the dead weak pointers in order to + run them. + + - weak_stage == WeakThreads + + Now, we discover which *threads* are still alive. Pointers to + threads from the all_threads and main thread lists are the + weakest of all: a pointers from the finalizer of a dead weak + pointer can keep a thread alive. Any threads found to be unreachable + are evacuated and placed on the resurrected_threads list so we + can send them a signal later. + + - weak_stage == WeakDone + + No more evacuation is done. + -------------------------------------------------------------------------- */ static rtsBool @@ -953,137 +1085,154 @@ traverse_weak_ptr_list(void) StgClosure *new; rtsBool flag = rtsFalse; - if (weak_done) { return rtsFalse; } - - /* doesn't matter where we evacuate values/finalizers to, since - * these pointers are treated as roots (iff the keys are alive). - */ - evac_gen = 0; - - last_w = &old_weak_ptr_list; - for (w = old_weak_ptr_list; w != NULL; w = next_w) { - - /* First, this weak pointer might have been evacuated. If so, - * remove the forwarding pointer from the weak_ptr_list. - */ - if (get_itbl(w)->type == EVACUATED) { - w = (StgWeak *)((StgEvacuated *)w)->evacuee; - *last_w = w; - } - - /* There might be a DEAD_WEAK on the list if finalizeWeak# was - * called on a live weak pointer object. Just remove it. - */ - if (w->header.info == &stg_DEAD_WEAK_info) { - next_w = ((StgDeadWeak *)w)->link; - *last_w = next_w; - continue; - } - - ASSERT(get_itbl(w)->type == WEAK); + switch (weak_stage) { - /* Now, check whether the key is reachable. - */ - if ((new = isAlive(w->key))) { - w->key = new; - // evacuate the value and finalizer - w->value = evacuate(w->value); - w->finalizer = evacuate(w->finalizer); - // remove this weak ptr from the old_weak_ptr list - *last_w = w->link; - // and put it on the new weak ptr list - next_w = w->link; - w->link = weak_ptr_list; - weak_ptr_list = w; - flag = rtsTrue; - IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key)); - continue; - } - else { - last_w = &(w->link); - next_w = w->link; - continue; - } - } + case WeakDone: + return rtsFalse; - /* Now deal with the all_threads list, which behaves somewhat like - * the weak ptr list. If we discover any threads that are about to - * become garbage, we wake them up and administer an exception. - */ - { - StgTSO *t, *tmp, *next, **prev; + case WeakPtrs: + /* doesn't matter where we evacuate values/finalizers to, since + * these pointers are treated as roots (iff the keys are alive). + */ + evac_gen = 0; + + last_w = &old_weak_ptr_list; + for (w = old_weak_ptr_list; w != NULL; w = next_w) { + + /* There might be a DEAD_WEAK on the list if finalizeWeak# was + * called on a live weak pointer object. Just remove it. + */ + if (w->header.info == &stg_DEAD_WEAK_info) { + next_w = ((StgDeadWeak *)w)->link; + *last_w = next_w; + continue; + } + + switch (get_itbl(w)->type) { - prev = &old_all_threads; - for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + case EVACUATED: + next_w = (StgWeak *)((StgEvacuated *)w)->evacuee; + *last_w = next_w; + continue; - (StgClosure *)tmp = isAlive((StgClosure *)t); - - if (tmp != NULL) { - t = tmp; - } + case WEAK: + /* Now, check whether the key is reachable. + */ + new = isAlive(w->key); + if (new != NULL) { + w->key = new; + // evacuate the value and finalizer + w->value = evacuate(w->value); + w->finalizer = evacuate(w->finalizer); + // remove this weak ptr from the old_weak_ptr list + *last_w = w->link; + // and put it on the new weak ptr list + next_w = w->link; + w->link = weak_ptr_list; + weak_ptr_list = w; + flag = rtsTrue; + IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", + w, w->key)); + continue; + } + else { + last_w = &(w->link); + next_w = w->link; + continue; + } - ASSERT(get_itbl(t)->type == TSO); - switch (t->what_next) { - case ThreadRelocated: - next = t->link; - *prev = next; - continue; - case ThreadKilled: - case ThreadComplete: - // finshed or died. The thread might still be alive, but we - // don't keep it on the all_threads list. Don't forget to - // stub out its global_link field. - next = t->global_link; - t->global_link = END_TSO_QUEUE; - *prev = next; - continue; - default: - ; + default: + barf("traverse_weak_ptr_list: not WEAK"); + } } + + /* If we didn't make any changes, then we can go round and kill all + * the dead weak pointers. The old_weak_ptr list is used as a list + * of pending finalizers later on. + */ + if (flag == rtsFalse) { + for (w = old_weak_ptr_list; w; w = w->link) { + w->finalizer = evacuate(w->finalizer); + } - if (tmp == NULL) { - // not alive (yet): leave this thread on the old_all_threads list. - prev = &(t->global_link); - next = t->global_link; - continue; - } - else { - // alive: move this thread onto the all_threads list. - next = t->global_link; - t->global_link = all_threads; - all_threads = t; - *prev = next; - break; + // Next, move to the WeakThreads stage after fully + // scavenging the finalizers we've just evacuated. + weak_stage = WeakThreads; } - } - } - /* If we didn't make any changes, then we can go round and kill all - * the dead weak pointers. The old_weak_ptr list is used as a list - * of pending finalizers later on. - */ - if (flag == rtsFalse) { - cleanup_weak_ptr_list(&old_weak_ptr_list); - for (w = old_weak_ptr_list; w; w = w->link) { - w->finalizer = evacuate(w->finalizer); - } + return rtsTrue; - /* And resurrect any threads which were about to become garbage. - */ - { - StgTSO *t, *tmp, *next; - for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { - next = t->global_link; - (StgClosure *)tmp = evacuate((StgClosure *)t); - tmp->global_link = resurrected_threads; - resurrected_threads = tmp; + case WeakThreads: + /* Now deal with the all_threads list, which behaves somewhat like + * the weak ptr list. If we discover any threads that are about to + * become garbage, we wake them up and administer an exception. + */ + { + StgTSO *t, *tmp, *next, **prev; + + prev = &old_all_threads; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + + (StgClosure *)tmp = isAlive((StgClosure *)t); + + if (tmp != NULL) { + t = tmp; + } + + ASSERT(get_itbl(t)->type == TSO); + switch (t->what_next) { + case ThreadRelocated: + next = t->link; + *prev = next; + continue; + case ThreadKilled: + case ThreadComplete: + // finshed or died. The thread might still be alive, but we + // don't keep it on the all_threads list. Don't forget to + // stub out its global_link field. + next = t->global_link; + t->global_link = END_TSO_QUEUE; + *prev = next; + continue; + default: + ; + } + + if (tmp == NULL) { + // not alive (yet): leave this thread on the + // old_all_threads list. + prev = &(t->global_link); + next = t->global_link; + } + else { + // alive: move this thread onto the all_threads list. + next = t->global_link; + t->global_link = all_threads; + all_threads = t; + *prev = next; + } + } } - } + + /* And resurrect any threads which were about to become garbage. + */ + { + StgTSO *t, *tmp, *next; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + next = t->global_link; + (StgClosure *)tmp = evacuate((StgClosure *)t); + tmp->global_link = resurrected_threads; + resurrected_threads = tmp; + } + } + + weak_stage = WeakDone; // *now* we're done, + return rtsTrue; // but one more round of scavenging, please - weak_done = rtsTrue; + default: + barf("traverse_weak_ptr_list"); } - return rtsTrue; } /* ----------------------------------------------------------------------------- @@ -1100,23 +1249,18 @@ traverse_weak_ptr_list(void) static void -cleanup_weak_ptr_list ( StgWeak **list ) +mark_weak_ptr_list ( StgWeak **list ) { StgWeak *w, **last_w; last_w = list; for (w = *list; w; w = w->link) { - - if (get_itbl(w)->type == EVACUATED) { - w = (StgWeak *)((StgEvacuated *)w)->evacuee; - *last_w = w; - } - - if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) { + // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here + ASSERT(w->header.info == &stg_DEAD_WEAK_info + || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED); (StgClosure *)w = evacuate((StgClosure *)w); *last_w = w; - } - last_w = &(w->link); + last_w = &(w->link); } } @@ -1146,6 +1290,7 @@ isAlive(StgClosure *p) loop: bd = Bdescr((P_)p); + // ignore closures in generations that we're not collecting. if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) { return p; @@ -1232,6 +1377,10 @@ static __inline__ StgClosure * copy(StgClosure *src, nat size, step *stp) { P_ to, from, dest; +#ifdef PROFILING + // @LDV profiling + nat size_org = size; +#endif TICK_GC_WORDS_COPIED(size); /* Find out where we're going, using the handy "to" pointer in @@ -1261,6 +1410,11 @@ copy(StgClosure *src, nat size, step *stp) dest = stp->hp; stp->hp = to; upd_evacuee(src,(StgClosure *)dest); +#ifdef PROFILING + // We store the size of the just evacuated object in the LDV word so that + // the profiler can guess the position of the next object later. + SET_EVACUAEE_FOR_LDV(src, size_org); +#endif return (StgClosure *)dest; } @@ -1270,10 +1424,14 @@ copy(StgClosure *src, nat size, step *stp) */ -static __inline__ StgClosure * +static StgClosure * copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) { P_ dest, to, from; +#ifdef PROFILING + // @LDV profiling + nat size_to_copy_org = size_to_copy; +#endif TICK_GC_WORDS_COPIED(size_to_copy); if (stp->gen_no < evac_gen) { @@ -1295,6 +1453,16 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) dest = stp->hp; stp->hp += size_to_reserve; upd_evacuee(src,(StgClosure *)dest); +#ifdef PROFILING + // We store the size of the just evacuated object in the LDV word so that + // the profiler can guess the position of the next object later. + // size_to_copy_org is wrong because the closure already occupies size_to_reserve + // words. + SET_EVACUAEE_FOR_LDV(src, size_to_reserve); + // fill the slop + if (size_to_reserve - size_to_copy_org > 0) + FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org)); +#endif return (StgClosure *)dest; } @@ -1303,8 +1471,8 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) Evacuate a large object This just consists of removing the object from the (doubly-linked) - large_alloc_list, and linking it on to the (singly-linked) - new_large_objects list, from where it will be scavenged later. + step->large_objects list, and linking it on to the (singly-linked) + step->new_large_objects list, from where it will be scavenged later. Convention: bd->flags has BF_EVACUATED set for a large object that has been evacuated, or unset otherwise. @@ -1317,9 +1485,10 @@ evacuate_large(StgPtr p) bdescr *bd = Bdescr(p); step *stp; - // should point to the beginning of the block - ASSERT(((W_)p & BLOCK_MASK) == 0); - + // object must be at the beginning of the block (or be a ByteArray) + ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS || + (((W_)p & BLOCK_MASK) == 0)); + // already evacuated? if (bd->flags & BF_EVACUATED) { /* Don't forget to set the failed_to_evac flag if we didn't get @@ -1466,7 +1635,8 @@ loop: if (!is_marked((P_)q,bd)) { mark((P_)q,bd); if (mark_stack_full()) { - barf("ToDo: mark stack full"); + mark_stack_overflowed = rtsTrue; + reset_mark_stack(); } push_mark_stack((P_)q); } @@ -1556,139 +1726,26 @@ loop: case THUNK_SELECTOR: { - const StgInfoTable* selectee_info; - StgClosure* selectee = ((StgSelector*)q)->selectee; + StgClosure *p; - selector_loop: - selectee_info = get_itbl(selectee); - switch (selectee_info->type) { - case CONSTR: - case CONSTR_1_0: - case CONSTR_0_1: - case CONSTR_2_0: - case CONSTR_1_1: - case CONSTR_0_2: - case CONSTR_STATIC: - { - StgWord offset = info->layout.selector_offset; - - // check that the size is in range - ASSERT(offset < - (StgWord32)(selectee_info->layout.payload.ptrs + - selectee_info->layout.payload.nptrs)); - - // perform the selection! - q = selectee->payload[offset]; - - /* if we're already in to-space, there's no need to continue - * with the evacuation, just update the source address with - * a pointer to the (evacuated) constructor field. - */ - if (HEAP_ALLOCED(q)) { - bdescr *bd = Bdescr((P_)q); - if (bd->flags & BF_EVACUATED) { - if (bd->gen_no < evac_gen) { - failed_to_evac = rtsTrue; - TICK_GC_FAILED_PROMOTION(); - } - return q; - } - } - - /* otherwise, carry on and evacuate this constructor field, - * (but not the constructor itself) - */ - goto loop; + if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) { + return copy(q,THUNK_SELECTOR_sizeW(),stp); } - case IND: - case IND_STATIC: - case IND_PERM: - case IND_OLDGEN: - case IND_OLDGEN_PERM: - selectee = ((StgInd *)selectee)->indirectee; - goto selector_loop; - - case EVACUATED: - selectee = ((StgEvacuated *)selectee)->evacuee; - goto selector_loop; - - case THUNK_SELECTOR: -# if 0 - /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or - something) to go into an infinite loop when the nightly - stage2 compiles PrelTup.lhs. */ - - /* we can't recurse indefinitely in evacuate(), so set a - * limit on the number of times we can go around this - * loop. - */ - if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) { - bdescr *bd; - bd = Bdescr((P_)selectee); - if (!bd->flags & BF_EVACUATED) { - thunk_selector_depth++; - selectee = evacuate(selectee); - thunk_selector_depth--; - goto selector_loop; - } - } - // otherwise, fall through... -# endif - - case AP_UPD: - case THUNK: - case THUNK_1_0: - case THUNK_0_1: - case THUNK_2_0: - case THUNK_1_1: - case THUNK_0_2: - case THUNK_STATIC: - case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: - case BLACKHOLE: - case BLACKHOLE_BQ: - // not evaluated yet - break; - -#if defined(PAR) - // a copy of the top-level cases below - case RBH: // cf. BLACKHOLE_BQ - { - //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); - to = copy(q,BLACKHOLE_sizeW(),stp); - //ToDo: derive size etc from reverted IP - //to = copy(q,size,stp); - // recordMutable((StgMutClosure *)to); - return to; - } - - case BLOCKED_FETCH: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); - to = copy(q,sizeofW(StgBlockedFetch),stp); - return to; - -# ifdef DIST - case REMOTE_REF: -# endif - case FETCH_ME: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); - to = copy(q,sizeofW(StgFetchMe),stp); - return to; - - case FETCH_ME_BQ: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); - to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); - return to; -#endif + p = eval_thunk_selector(info->layout.selector_offset, + (StgSelector *)q); - default: - barf("evacuate: THUNK_SELECTOR: strange selectee %d", - (int)(selectee_info->type)); - } + if (p == NULL) { + return copy(q,THUNK_SELECTOR_sizeW(),stp); + } else { + // q is still BLACKHOLE'd. + thunk_selector_depth++; + p = evacuate(p); + thunk_selector_depth--; + upd_evacuee(q,p); + return p; + } } - return copy(q,THUNK_SELECTOR_sizeW(),stp); case IND: case IND_OLDGEN: @@ -1770,7 +1827,7 @@ loop: */ if (evac_gen > 0) { // optimisation StgClosure *p = ((StgEvacuated*)q)->evacuee; - if (Bdescr((P_)p)->gen_no < evac_gen) { + if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) { failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } @@ -1848,11 +1905,133 @@ loop: return to; #endif - default: - barf("evacuate: strange closure type %d", (int)(info->type)); - } - - barf("evacuate"); + default: + barf("evacuate: strange closure type %d", (int)(info->type)); + } + + barf("evacuate"); +} + +/* ----------------------------------------------------------------------------- + Evaluate a THUNK_SELECTOR if possible. + + returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or + a closure pointer if we evaluated it and this is the result + + If the return value is non-NULL, the original selector thunk has + been BLACKHOLE'd, and should be updated with an indirection or a + forwarding pointer. If the return value is NULL, then the selector + thunk is unchanged. + -------------------------------------------------------------------------- */ + +static StgClosure * +eval_thunk_selector( nat field, StgSelector * p ) +{ + StgInfoTable *info; + const StgInfoTable *info_ptr; + StgClosure *selectee; + + selectee = p->selectee; + + // Save the real info pointer (NOTE: not the same as get_itbl()). + info_ptr = p->header.info; + + // BLACKHOLE the selector thunk, since it is now under evaluation. + // This is important to stop us going into an infinite loop if + // this selector thunk eventually refers to itself. + SET_INFO(p,&stg_BLACKHOLE_info); + +selector_loop: + info = get_itbl(selectee); + switch (info->type) { + case CONSTR: + case CONSTR_1_0: + case CONSTR_0_1: + case CONSTR_2_0: + case CONSTR_1_1: + case CONSTR_0_2: + case CONSTR_STATIC: + case CONSTR_NOCAF_STATIC: + // check that the size is in range + ASSERT(field < (StgWord32)(info->layout.payload.ptrs + + info->layout.payload.nptrs)); + + return selectee->payload[field]; + + case IND: + case IND_STATIC: + case IND_PERM: + case IND_OLDGEN: + case IND_OLDGEN_PERM: + selectee = ((StgInd *)selectee)->indirectee; + goto selector_loop; + + case EVACUATED: + // We don't follow pointers into to-space; the constructor + // has already been evacuated, so we won't save any space + // leaks by evaluating this selector thunk anyhow. + break; + + case THUNK_SELECTOR: + { + StgClosure *val; + + // check that we don't recurse too much, re-using the + // depth bound also used in evacuate(). + thunk_selector_depth++; + if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) { + break; + } + + val = eval_thunk_selector(info->layout.selector_offset, + (StgSelector *)selectee); + + thunk_selector_depth--; + + if (val == NULL) { + break; + } else { + // we evaluated this selector thunk, so update it with + // an indirection. + UPD_IND_NOLOCK(selectee, val); + selectee = val; + goto selector_loop; + } + } + + case AP_UPD: + case THUNK: + case THUNK_1_0: + case THUNK_0_1: + case THUNK_2_0: + case THUNK_1_1: + case THUNK_0_2: + case THUNK_STATIC: + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + case BLACKHOLE_BQ: +#if defined(PAR) + case RBH: + case BLOCKED_FETCH: +# ifdef DIST + case REMOTE_REF: +# endif + case FETCH_ME: + case FETCH_ME_BQ: +#endif + // not evaluated yet + break; + + default: + barf("eval_thunk_selector: strange selectee %d", + (int)(info->type)); + } + + // We didn't manage to evaluate this thunk; restore the old info pointer + SET_INFO(p, info_ptr); + return NULL; } /* ----------------------------------------------------------------------------- @@ -2027,6 +2206,8 @@ scavenge(step *stp) info = get_itbl((StgClosure *)p); ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info))); + ASSERT(thunk_selector_depth == 0); + q = p; switch (info->type) { @@ -2117,9 +2298,23 @@ scavenge(step *stp) } case IND_PERM: - if (stp->gen_no != 0) { - SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); - } + if (stp->gen->no != 0) { +#ifdef PROFILING + // @LDV profiling + // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an + // IND_OLDGEN_PERM closure is larger than an IND_PERM closure. + LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); +#endif + // + // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // + SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that p has just been created. + LDV_recordCreate((StgClosure *)p); +#endif + } // fall through case IND_OLDGEN_PERM: ((StgIndOldGen *)p)->indirectee = @@ -2331,19 +2526,21 @@ scavenge(step *stp) static void scavenge_mark_stack(void) { - StgPtr p; + StgPtr p, q; StgInfoTable *info; nat saved_evac_gen; evac_gen = oldest_gen->no; saved_evac_gen = evac_gen; +linear_scan: while (!mark_stack_empty()) { p = pop_mark_stack(); info = get_itbl((StgClosure *)p); ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info))); + q = p; switch (info->type) { case MVAR: @@ -2367,7 +2564,6 @@ scavenge_mark_stack(void) case CONSTR_2_0: ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - mark(p+1,Bdescr(p)); break; case FUN_1_0: @@ -2378,7 +2574,6 @@ scavenge_mark_stack(void) case CONSTR_1_0: case CONSTR_1_1: ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - mark(p+1,Bdescr(p)); break; case FUN_0_1: @@ -2388,7 +2583,6 @@ scavenge_mark_stack(void) scavenge_srt(info); case CONSTR_0_1: case CONSTR_0_2: - mark(p+1,Bdescr(p)); break; case FUN: @@ -2425,7 +2619,6 @@ scavenge_mark_stack(void) recordOldToNewPtrs((StgMutClosure *)p); } failed_to_evac = rtsFalse; - mark(p+1,Bdescr(p)); break; case MUT_VAR: @@ -2433,7 +2626,6 @@ scavenge_mark_stack(void) ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); evac_gen = saved_evac_gen; failed_to_evac = rtsFalse; - mark(p+1,Bdescr(p)); break; case MUT_CONS: @@ -2573,7 +2765,7 @@ scavenge_mark_stack(void) p, info_type((StgClosure *)p))); break; } -#endif +#endif // PAR default: barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", @@ -2582,11 +2774,52 @@ scavenge_mark_stack(void) if (failed_to_evac) { failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)p, &generations[evac_gen]); + mkMutCons((StgClosure *)q, &generations[evac_gen]); } + + // mark the next bit to indicate "scavenged" + mark(q+1, Bdescr(q)); } // while (!mark_stack_empty()) -} + + // start a new linear scan if the mark stack overflowed at some point + if (mark_stack_overflowed && oldgen_scan_bd == NULL) { + IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan")); + mark_stack_overflowed = rtsFalse; + oldgen_scan_bd = oldest_gen->steps[0].blocks; + oldgen_scan = oldgen_scan_bd->start; + } + + if (oldgen_scan_bd) { + // push a new thing on the mark stack + loop: + // find a closure that is marked but not scavenged, and start + // from there. + while (oldgen_scan < oldgen_scan_bd->free + && !is_marked(oldgen_scan,oldgen_scan_bd)) { + oldgen_scan++; + } + + if (oldgen_scan < oldgen_scan_bd->free) { + + // already scavenged? + if (is_marked(oldgen_scan+1,oldgen_scan_bd)) { + oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE; + goto loop; + } + push_mark_stack(oldgen_scan); + // ToDo: bump the linear scan by the actual size of the object + oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE; + goto linear_scan; + } + + oldgen_scan_bd = oldgen_scan_bd->link; + if (oldgen_scan_bd != NULL) { + oldgen_scan = oldgen_scan_bd->start; + goto loop; + } + } +} /* ----------------------------------------------------------------------------- Scavenge one object. @@ -2597,104 +2830,131 @@ scavenge_mark_stack(void) -------------------------------------------------------------------------- */ static rtsBool -scavenge_one(StgClosure *p) +scavenge_one(StgPtr p) { - const StgInfoTable *info; - rtsBool no_luck; - - ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) - || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); - - info = get_itbl(p); - - switch (info -> type) { - - case FUN: - case FUN_1_0: // hardly worth specialising these guys - case FUN_0_1: - case FUN_1_1: - case FUN_0_2: - case FUN_2_0: - case THUNK: - case THUNK_1_0: - case THUNK_0_1: - case THUNK_1_1: - case THUNK_0_2: - case THUNK_2_0: - case CONSTR: - case CONSTR_1_0: - case CONSTR_0_1: - case CONSTR_1_1: - case CONSTR_0_2: - case CONSTR_2_0: - case WEAK: - case FOREIGN: - case IND_PERM: - case IND_OLDGEN_PERM: + const StgInfoTable *info; + nat saved_evac_gen = evac_gen; + rtsBool no_luck; + + ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p)) + || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p)))); + + info = get_itbl((StgClosure *)p); + + switch (info->type) { + + case FUN: + case FUN_1_0: // hardly worth specialising these guys + case FUN_0_1: + case FUN_1_1: + case FUN_0_2: + case FUN_2_0: + case THUNK: + case THUNK_1_0: + case THUNK_0_1: + case THUNK_1_1: + case THUNK_0_2: + case THUNK_2_0: + case CONSTR: + case CONSTR_1_0: + case CONSTR_0_1: + case CONSTR_1_1: + case CONSTR_0_2: + case CONSTR_2_0: + case WEAK: + case FOREIGN: + case IND_PERM: + case IND_OLDGEN_PERM: { - StgPtr q, end; - - end = (P_)p->payload + info->layout.payload.ptrs; - for (q = (P_)p->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); - } - break; + StgPtr q, end; + + end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) { + (StgClosure *)*q = evacuate((StgClosure *)*q); + } + break; } - - case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: - case BLACKHOLE: - break; - - case THUNK_SELECTOR: + + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + break; + + case THUNK_SELECTOR: { - StgSelector *s = (StgSelector *)p; - s->selectee = evacuate(s->selectee); - break; + StgSelector *s = (StgSelector *)p; + s->selectee = evacuate(s->selectee); + break; } - case AP_UPD: /* same as PAPs */ - case PAP: - /* Treat a PAP just like a section of stack, not forgetting to - * evacuate the function pointer too... - */ - { - StgPAP* pap = (StgPAP *)p; + case ARR_WORDS: + // nothing to follow + break; - pap->fun = evacuate(pap->fun); - scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); - break; + case MUT_ARR_PTRS: + { + // follow everything + StgPtr next; + + evac_gen = 0; // repeatedly mutable + recordMutable((StgMutClosure *)p); + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; } - case IND_OLDGEN: - /* This might happen if for instance a MUT_CONS was pointing to a - * THUNK which has since been updated. The IND_OLDGEN will - * be on the mutable list anyway, so we don't need to do anything - * here. - */ - break; + case MUT_ARR_PTRS_FROZEN: + { + // follow everything + StgPtr next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + break; + } - case MUT_ARR_PTRS_FROZEN: - { - // follow everything - StgPtr q, next; + case TSO: + { + StgTSO *tso = (StgTSO *)p; + + evac_gen = 0; // repeatedly mutable + scavengeTSO(tso); + recordMutable((StgMutClosure *)tso); + evac_gen = saved_evac_gen; + failed_to_evac = rtsFalse; + break; + } + + case AP_UPD: + case PAP: + { + StgPAP* pap = (StgPAP *)p; + pap->fun = evacuate(pap->fun); + scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + break; + } - q = (StgPtr)p; - next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); - } - break; - } + case IND_OLDGEN: + // This might happen if for instance a MUT_CONS was pointing to a + // THUNK which has since been updated. The IND_OLDGEN will + // be on the mutable list anyway, so we don't need to do anything + // here. + break; - default: - barf("scavenge_one: strange object %d", (int)(info->type)); - } + default: + barf("scavenge_one: strange object %d", (int)(info->type)); + } - no_luck = failed_to_evac; - failed_to_evac = rtsFalse; - return (no_luck); + no_luck = failed_to_evac; + failed_to_evac = rtsFalse; + return (no_luck); } /* ----------------------------------------------------------------------------- @@ -2762,7 +3022,7 @@ scavenge_mut_once_list(generation *gen) } else { size = gen->steps[0].scan - start; } - fprintf(stderr,"evac IND_OLDGEN: %ld bytes\n", size * sizeof(W_)); + belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } #endif @@ -2792,7 +3052,7 @@ scavenge_mut_once_list(generation *gen) * it from the mutable list if possible by promoting whatever it * points to. */ - if (scavenge_one((StgClosure *)((StgMutVar *)p)->var)) { + if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) { /* didn't manage to promote everything, so put the * MUT_CONS back on the list. */ @@ -3043,7 +3303,7 @@ scavenge_static(void) */ if (failed_to_evac) { failed_to_evac = rtsFalse; - scavenged_static_objects = STATIC_LINK(info,p); + scavenged_static_objects = IND_STATIC_LINK(p); ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list; oldest_gen->mut_once_list = (StgMutClosure *)ind; } @@ -3291,9 +3551,7 @@ static void scavenge_large(step *stp) { bdescr *bd; - StgPtr p, q; - const StgInfoTable* info; - nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen + StgPtr p; bd = stp->new_large_objects; @@ -3311,72 +3569,8 @@ scavenge_large(step *stp) stp->n_scavenged_large_blocks += bd->blocks; p = bd->start; - info = get_itbl((StgClosure *)p); - - // only certain objects can be "large"... - q = p; - switch (info->type) { - - case ARR_WORDS: - // nothing to follow - break; - - case MUT_ARR_PTRS: - { - // follow everything - StgPtr next; - - evac_gen = 0; // repeatedly mutable - recordMutable((StgMutClosure *)p); - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; - break; - } - - case MUT_ARR_PTRS_FROZEN: - { - // follow everything - StgPtr next; - - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - break; - } - - case TSO: - { - StgTSO *tso = (StgTSO *)p; - - evac_gen = 0; // repeatedly mutable - scavengeTSO(tso); - recordMutable((StgMutClosure *)tso); - evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; - break; - } - - case AP_UPD: - case PAP: - { - StgPAP* pap = (StgPAP *)p; - pap->fun = evacuate(pap->fun); - scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); - break; - } - - default: - barf("scavenge_large: unknown/strange object %d", (int)(info->type)); - } - - if (failed_to_evac) { - failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)q, &generations[evac_gen]); + if (scavenge_one(p)) { + mkMutCons((StgClosure *)p, stp->gen); } } } @@ -3439,15 +3633,14 @@ revertCAFs( void ) } void -scavengeCAFs( void ) +markCAFs( evac_fn evac ) { StgIndStatic *c; - evac_gen = 0; for (c = (StgIndStatic *)caf_list; c != NULL; c = (StgIndStatic *)c->static_link) { - c->indirectee = evacuate(c->indirectee); + evac(&c->indirectee); } } @@ -3484,7 +3677,7 @@ gcCAFs(void) ASSERT(info->type == IND_STATIC); if (STATIC_LINK(info,p) == NULL) { - IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04lx\n", (long)p)); + IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p)); // black hole it SET_INFO(p,&stg_BLACKHOLE_info); p = STATIC_LINK2(info,p); @@ -3498,7 +3691,7 @@ gcCAFs(void) } - // fprintf(stderr, "%d CAFs live\n", i); + // belch("%d CAFs live", i); } #endif @@ -3545,9 +3738,19 @@ threadLazyBlackHole(StgTSO *tso) if (bh->header.info != &stg_BLACKHOLE_BQ_info && bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); + belch("Unexpected lazy BHing required at 0x%04x",(int)bh); +#endif +#ifdef PROFILING + // @LDV profiling + // We pretend that bh is now dead. + LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); #endif SET_INFO(bh,&stg_BLACKHOLE_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that bh has just been created. + LDV_recordCreate(bh); +#endif } update_frame = update_frame->link; @@ -3694,7 +3897,7 @@ threadSqueezeStack(StgTSO *tso) StgClosure *updatee_bypass = frame->updatee; #if DEBUG - IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame)); + IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame)); squeezes++; #endif @@ -3769,7 +3972,7 @@ threadSqueezeStack(StgTSO *tso) bh->header.info != &stg_BLACKHOLE_BQ_info && bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); + belch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif #ifdef DEBUG /* zero out the slop so that the sanity checker can tell @@ -3778,7 +3981,7 @@ threadSqueezeStack(StgTSO *tso) { StgInfoTable *info = get_itbl(bh); nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i; - /* don't zero out slop for a THUNK_SELECTOR, because it's layout + /* don't zero out slop for a THUNK_SELECTOR, because its layout * info is used for a different purpose, and it's exactly the * same size as a BLACKHOLE in any case. */ @@ -3789,7 +3992,20 @@ threadSqueezeStack(StgTSO *tso) } } #endif +#ifdef PROFILING + // @LDV profiling + // We pretend that bh is now dead. + LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); +#endif + // + // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // SET_INFO(bh,&stg_BLACKHOLE_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that bh has just been created. + LDV_recordCreate(bh); +#endif } } @@ -3808,10 +4024,10 @@ threadSqueezeStack(StgTSO *tso) else next_frame_bottom = tso->sp - 1; -#if DEBUG +#if 0 IF_DEBUG(gc, - fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom, - displacement)) + belch("sliding [%p, %p] by %ld", sp, next_frame_bottom, + displacement)) #endif while (sp >= next_frame_bottom) { @@ -3825,9 +4041,9 @@ threadSqueezeStack(StgTSO *tso) tso->sp += displacement; tso->su = prev_frame; -#if DEBUG +#if 0 IF_DEBUG(gc, - fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n", + belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames", squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames)) #endif }