X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=c0e1a4aaa3c5cab979bd42a92c8a73a89ac4ab92;hb=29e55dea299569979d4e73d64b709a97aaea36e8;hp=0475e4653ea8ae303dbd84864007ef26a51c94e0;hpb=bc51f1af3c837d6c5cdac8374fad987773240202;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 0475e46..c0e1a4a 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.110 2001/07/26 14:29:26 simonmar Exp $ + * $Id: GC.c,v 1.143 2002/09/18 06:34:07 mthomas Exp $ * * (c) The GHC Team 1998-1999 * @@ -7,6 +7,7 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" #include "RtsFlags.h" #include "RtsUtils.h" @@ -26,6 +27,7 @@ #include "Prelude.h" #include "ParTicky.h" // ToDo: move into Rts.h #include "GCCompact.h" +#include "Signals.h" #if defined(GRAN) || defined(PAR) # include "GranSimRts.h" # include "ParallelRts.h" @@ -40,7 +42,11 @@ #if defined(RTS_GTK_FRONTPANEL) #include "FrontPanel.h" #endif -#include + +#include "RetainerProfile.h" +#include "LdvProfile.h" + +#include /* STATIC OBJECT LIST. * @@ -76,8 +82,8 @@ * We build up a static object list while collecting generations 0..N, * which is then appended to the static object list of generation N+1. */ -StgClosure* static_objects; // live static objects -StgClosure* scavenged_static_objects; // static objects scavenged so far +static StgClosure* static_objects; // live static objects +StgClosure* scavenged_static_objects; // static objects scavenged so far /* N is the oldest generation being collected, where the generations * are numbered starting at 0. A major GC (indicated by the major_gc @@ -96,12 +102,17 @@ static nat evac_gen; /* Weak pointers */ StgWeak *old_weak_ptr_list; // also pending finaliser list -static rtsBool weak_done; // all done for this pass + +/* Which stage of processing various kinds of weak pointer are we at? + * (see traverse_weak_ptr_list() below for discussion). + */ +typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage; +static WeakStage weak_stage; /* List of all threads during GC */ static StgTSO *old_all_threads; -static StgTSO *resurrected_threads; +StgTSO *resurrected_threads; /* Flag indicating failure to evacuate an object to the desired * generation. @@ -110,17 +121,17 @@ static rtsBool failed_to_evac; /* Old to-space (used for two-space collector only) */ -bdescr *old_to_blocks; +static bdescr *old_to_blocks; /* Data used for allocation area sizing. */ -lnat new_blocks; // blocks allocated during this GC -lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC +static lnat new_blocks; // blocks allocated during this GC +static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC /* Used to avoid long recursion due to selector thunks */ -lnat thunk_selector_depth = 0; -#define MAX_THUNK_SELECTOR_DEPTH 256 +static lnat thunk_selector_depth = 0; +#define MAX_THUNK_SELECTOR_DEPTH 8 /* ----------------------------------------------------------------------------- Static function declarations @@ -132,7 +143,9 @@ static void zero_static_object_list ( StgClosure* first_static ); static void zero_mutable_list ( StgMutClosure *first ); static rtsBool traverse_weak_ptr_list ( void ); -static void cleanup_weak_ptr_list ( StgWeak **list ); +static void mark_weak_ptr_list ( StgWeak **list ); + +static StgClosure * eval_thunk_selector ( nat field, StgSelector * p ); static void scavenge ( step * ); static void scavenge_mark_stack ( void ); @@ -142,7 +155,6 @@ static void scavenge_large ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); static void scavenge_mut_once_list ( generation *g ); -static void scavengeCAFs ( void ); #if 0 && defined(DEBUG) static void gcCAFs ( void ); @@ -216,6 +228,8 @@ pop_mark_stack(void) - free from-space in each step, and set from-space = to-space. + Locks held: sched_mutex + -------------------------------------------------------------------------- */ void @@ -236,6 +250,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) Now, Now)); #endif +#ifndef mingw32_TARGET_OS + // block signals + blockUserSignals(); +#endif + // tell the stats department that we've started a GC stat_startGC(); @@ -462,7 +481,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } - scavengeCAFs(); + /* follow roots from the CAF list (used by GHCi) + */ + evac_gen = 0; + markCAFs(mark_root); /* follow all the roots that the application knows about. */ @@ -487,9 +509,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) /* Mark the weak pointer list, and prepare to detect dead weak * pointers. */ + mark_weak_ptr_list(&weak_ptr_list); old_weak_ptr_list = weak_ptr_list; weak_ptr_list = NULL; - weak_done = rtsFalse; + weak_stage = WeakPtrs; /* The all_threads list is like the weak_ptr_list. * See traverse_weak_ptr_list() for the details. @@ -573,16 +596,31 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (flag) { goto loop; } - // must be last... + // must be last... invariant is that everything is fully + // scavenged at this point. if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something goto loop; } } - /* Final traversal of the weak pointer list (see comment by - * cleanUpWeakPtrList below). + /* Update the pointers from the "main thread" list - these are + * treated as weak pointers because we want to allow a main thread + * to get a BlockedOnDeadMVar exception in the same way as any other + * thread. Note that the threads should all have been retained by + * GC by virtue of being on the all_threads list, we're just + * updating pointers here. */ - cleanup_weak_ptr_list(&weak_ptr_list); + { + StgMainThread *m; + StgTSO *tso; + for (m = main_threads; m != NULL; m = m->link) { + tso = (StgTSO *) isAlive((StgClosure *)m->tso); + if (tso == NULL) { + barf("main thread has been GC'd"); + } + m->tso = tso; + } + } #if defined(PAR) // Reconstruct the Global Address tables used in GUM @@ -604,9 +642,17 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } +#ifdef PROFILING + // We call processHeapClosureForDead() on every closure destroyed during + // the current garbage collection, so we invoke LdvCensusForDead(). + if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV + || RtsFlags.ProfFlags.bioSelector != NULL) + LdvCensusForDead(N); +#endif + // NO MORE EVACUATION AFTER THIS POINT! // Finally: compaction of the oldest generation. - if (major_gc && RtsFlags.GcFlags.compact) { + if (major_gc && oldest_gen->steps[0].is_compacted) { // save number of blocks for stats oldgen_saved_blocks = oldest_gen->steps[0].n_blocks; compact(get_roots); @@ -699,24 +745,8 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->large_objects = stp->scavenged_large_objects; stp->n_large_blocks = stp->n_scavenged_large_blocks; - /* Set the maximum blocks for this generation, interpolating - * between the maximum size of the oldest and youngest - * generations. - * - * max_blocks = oldgen_max_blocks * G - * ---------------------- - * oldest_gen - */ - if (g != 0) { -#if 0 - generations[g].max_blocks = (oldest_gen->max_blocks * g) - / (RtsFlags.GcFlags.generations-1); -#endif - generations[g].max_blocks = oldest_gen->max_blocks; - } - - // for older generations... } else { + // for older generations... /* For older generations, we need to append the * scavenged_large_object list (i.e. large objects that have been @@ -735,30 +765,83 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } - /* Set the maximum blocks for the oldest generation, based on twice - * the amount of live data now, adjusted to fit the maximum heap - * size if necessary. + /* Reset the sizes of the older generations when we do a major + * collection. * - * This is an approximation, since in the worst case we'll need - * twice the amount of live data plus whatever space the other - * generations need. + * CURRENT STRATEGY: make all generations except zero the same size. + * We have to stay within the maximum heap size, and leave a certain + * percentage of the maximum heap size available to allocate into. */ if (major_gc && RtsFlags.GcFlags.generations > 1) { - oldest_gen->max_blocks = - stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor, - RtsFlags.GcFlags.minOldGenSize); - if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) { - oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2; - if (((int)oldest_gen->max_blocks - - (int)oldest_gen->steps[0].n_blocks) < - (RtsFlags.GcFlags.pcFreeHeap * - RtsFlags.GcFlags.maxHeapSize / 200)) { - heapOverflow(); - } + nat live, size, min_alloc; + nat max = RtsFlags.GcFlags.maxHeapSize; + nat gens = RtsFlags.GcFlags.generations; + + // live in the oldest generations + live = oldest_gen->steps[0].n_blocks + + oldest_gen->steps[0].n_large_blocks; + + // default max size for all generations except zero + size = stg_max(live * RtsFlags.GcFlags.oldGenFactor, + RtsFlags.GcFlags.minOldGenSize); + + // minimum size for generation zero + min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200, + RtsFlags.GcFlags.minAllocAreaSize); + + // Auto-enable compaction when the residency reaches a + // certain percentage of the maximum heap size (default: 30%). + if (RtsFlags.GcFlags.generations > 1 && + (RtsFlags.GcFlags.compact || + (max > 0 && + oldest_gen->steps[0].n_blocks > + (RtsFlags.GcFlags.compactThreshold * max) / 100))) { + oldest_gen->steps[0].is_compacted = 1; +// fprintf(stderr,"compaction: on\n", live); + } else { + oldest_gen->steps[0].is_compacted = 0; +// fprintf(stderr,"compaction: off\n", live); + } + + // if we're going to go over the maximum heap size, reduce the + // size of the generations accordingly. The calculation is + // different if compaction is turned on, because we don't need + // to double the space required to collect the old generation. + if (max != 0) { + + // this test is necessary to ensure that the calculations + // below don't have any negative results - we're working + // with unsigned values here. + if (max < min_alloc) { + heapOverflow(); + } + + if (oldest_gen->steps[0].is_compacted) { + if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) { + size = (max - min_alloc) / ((gens - 1) * 2 - 1); + } + } else { + if ( (size * (gens - 1) * 2) + min_alloc > max ) { + size = (max - min_alloc) / ((gens - 1) * 2); + } + } + + if (size < live) { + heapOverflow(); + } + } + +#if 0 + fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live, + min_alloc, size, max); +#endif + + for (g = 0; g < gens; g++) { + generations[g].max_blocks = size; } } - // Guess the amount of live data for stats. + // Guess the amount of live data for stats. live = calcLive(); /* Free the small objects allocated via allocate(), since this will @@ -773,6 +856,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) alloc_HpLim = NULL; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + // Start a new pinned_object_block + pinned_object_block = NULL; + /* Free the mark stack. */ if (mark_stack_bdescr != NULL) { @@ -806,9 +892,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) /* For a two-space collector, we need to resize the nursery. */ /* set up a new nursery. Allocate a nursery size based on a - * function of the amount of live data (currently a factor of 2, - * should be configurable (ToDo)). Use the blocks from the old - * nursery if possible, freeing up any left over blocks. + * function of the amount of live data (by default a factor of 2) + * Use the blocks from the old nursery if possible, freeing up any + * left over blocks. * * If we get near the maximum heap size, then adjust our nursery * size accordingly. If the nursery is the same size as the live @@ -817,12 +903,13 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) * * A normal 2-space collector would need 4L bytes to give the same * performance we get from 3L bytes, reducing to the same - * performance at 2L bytes. + * performance at 2L bytes. */ blocks = g0s0->n_to_blocks; - if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > - RtsFlags.GcFlags.maxHeapSize ) { + if ( RtsFlags.GcFlags.maxHeapSize != 0 && + blocks * RtsFlags.GcFlags.oldGenFactor * 2 > + RtsFlags.GcFlags.maxHeapSize ) { long adjusted_blocks; // signed on purpose int pc_free; @@ -881,6 +968,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } resizeNursery((nat)blocks); + + } else { + // we might have added extra large blocks to the nursery, so + // resize back to minAllocAreaSize again. + resizeNursery(RtsFlags.GcFlags.minAllocAreaSize); } } @@ -889,20 +981,29 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (major_gc) { gcCAFs(); } #endif +#ifdef PROFILING + // resetStaticObjectForRetainerProfiling() must be called before + // zeroing below. + resetStaticObjectForRetainerProfiling(); +#endif + // zero the scavenged static object list if (major_gc) { zero_static_object_list(scavenged_static_objects); } - /* Reset the nursery - */ + // Reset the nursery resetNurseries(); + RELEASE_LOCK(&sched_mutex); + // start any pending finalizers scheduleFinalizers(old_weak_ptr_list); // send exceptions to any threads which were about to die resurrectThreads(resurrected_threads); + + ACQUIRE_LOCK(&sched_mutex); // Update the stable pointer hash table. updateStablePtrTable(major_gc); @@ -920,7 +1021,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // restore enclosing cost centre #ifdef PROFILING - heapCensus(); CCCS = prev_CCS; #endif @@ -936,6 +1036,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // ok, GC over: tell the stats department what happened. stat_endGC(allocated, collected, live, copied, N); +#ifndef mingw32_TARGET_OS + // unblock signals again + unblockUserSignals(); +#endif + //PAR_TICKY_TP(); } @@ -958,6 +1063,30 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) older generations than the one we're collecting. This could probably be optimised by keeping per-generation lists of weak pointers, but for a few weak pointers this scheme will work. + + There are three distinct stages to processing weak pointers: + + - weak_stage == WeakPtrs + + We process all the weak pointers whos keys are alive (evacuate + their values and finalizers), and repeat until we can find no new + live keys. If no live keys are found in this pass, then we + evacuate the finalizers of all the dead weak pointers in order to + run them. + + - weak_stage == WeakThreads + + Now, we discover which *threads* are still alive. Pointers to + threads from the all_threads and main thread lists are the + weakest of all: a pointers from the finalizer of a dead weak + pointer can keep a thread alive. Any threads found to be unreachable + are evacuated and placed on the resurrected_threads list so we + can send them a signal later. + + - weak_stage == WeakDone + + No more evacuation is done. + -------------------------------------------------------------------------- */ static rtsBool @@ -967,137 +1096,154 @@ traverse_weak_ptr_list(void) StgClosure *new; rtsBool flag = rtsFalse; - if (weak_done) { return rtsFalse; } - - /* doesn't matter where we evacuate values/finalizers to, since - * these pointers are treated as roots (iff the keys are alive). - */ - evac_gen = 0; - - last_w = &old_weak_ptr_list; - for (w = old_weak_ptr_list; w != NULL; w = next_w) { - - /* First, this weak pointer might have been evacuated. If so, - * remove the forwarding pointer from the weak_ptr_list. - */ - if (get_itbl(w)->type == EVACUATED) { - w = (StgWeak *)((StgEvacuated *)w)->evacuee; - *last_w = w; - } - - /* There might be a DEAD_WEAK on the list if finalizeWeak# was - * called on a live weak pointer object. Just remove it. - */ - if (w->header.info == &stg_DEAD_WEAK_info) { - next_w = ((StgDeadWeak *)w)->link; - *last_w = next_w; - continue; - } + switch (weak_stage) { - ASSERT(get_itbl(w)->type == WEAK); - - /* Now, check whether the key is reachable. - */ - if ((new = isAlive(w->key))) { - w->key = new; - // evacuate the value and finalizer - w->value = evacuate(w->value); - w->finalizer = evacuate(w->finalizer); - // remove this weak ptr from the old_weak_ptr list - *last_w = w->link; - // and put it on the new weak ptr list - next_w = w->link; - w->link = weak_ptr_list; - weak_ptr_list = w; - flag = rtsTrue; - IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key)); - continue; - } - else { - last_w = &(w->link); - next_w = w->link; - continue; - } - } + case WeakDone: + return rtsFalse; - /* Now deal with the all_threads list, which behaves somewhat like - * the weak ptr list. If we discover any threads that are about to - * become garbage, we wake them up and administer an exception. - */ - { - StgTSO *t, *tmp, *next, **prev; + case WeakPtrs: + /* doesn't matter where we evacuate values/finalizers to, since + * these pointers are treated as roots (iff the keys are alive). + */ + evac_gen = 0; + + last_w = &old_weak_ptr_list; + for (w = old_weak_ptr_list; w != NULL; w = next_w) { + + /* There might be a DEAD_WEAK on the list if finalizeWeak# was + * called on a live weak pointer object. Just remove it. + */ + if (w->header.info == &stg_DEAD_WEAK_info) { + next_w = ((StgDeadWeak *)w)->link; + *last_w = next_w; + continue; + } + + switch (get_itbl(w)->type) { - prev = &old_all_threads; - for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + case EVACUATED: + next_w = (StgWeak *)((StgEvacuated *)w)->evacuee; + *last_w = next_w; + continue; - (StgClosure *)tmp = isAlive((StgClosure *)t); - - if (tmp != NULL) { - t = tmp; - } + case WEAK: + /* Now, check whether the key is reachable. + */ + new = isAlive(w->key); + if (new != NULL) { + w->key = new; + // evacuate the value and finalizer + w->value = evacuate(w->value); + w->finalizer = evacuate(w->finalizer); + // remove this weak ptr from the old_weak_ptr list + *last_w = w->link; + // and put it on the new weak ptr list + next_w = w->link; + w->link = weak_ptr_list; + weak_ptr_list = w; + flag = rtsTrue; + IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", + w, w->key)); + continue; + } + else { + last_w = &(w->link); + next_w = w->link; + continue; + } - ASSERT(get_itbl(t)->type == TSO); - switch (t->what_next) { - case ThreadRelocated: - next = t->link; - *prev = next; - continue; - case ThreadKilled: - case ThreadComplete: - // finshed or died. The thread might still be alive, but we - // don't keep it on the all_threads list. Don't forget to - // stub out its global_link field. - next = t->global_link; - t->global_link = END_TSO_QUEUE; - *prev = next; - continue; - default: - ; + default: + barf("traverse_weak_ptr_list: not WEAK"); + } } + + /* If we didn't make any changes, then we can go round and kill all + * the dead weak pointers. The old_weak_ptr list is used as a list + * of pending finalizers later on. + */ + if (flag == rtsFalse) { + for (w = old_weak_ptr_list; w; w = w->link) { + w->finalizer = evacuate(w->finalizer); + } - if (tmp == NULL) { - // not alive (yet): leave this thread on the old_all_threads list. - prev = &(t->global_link); - next = t->global_link; - continue; - } - else { - // alive: move this thread onto the all_threads list. - next = t->global_link; - t->global_link = all_threads; - all_threads = t; - *prev = next; - break; + // Next, move to the WeakThreads stage after fully + // scavenging the finalizers we've just evacuated. + weak_stage = WeakThreads; } - } - } - /* If we didn't make any changes, then we can go round and kill all - * the dead weak pointers. The old_weak_ptr list is used as a list - * of pending finalizers later on. - */ - if (flag == rtsFalse) { - cleanup_weak_ptr_list(&old_weak_ptr_list); - for (w = old_weak_ptr_list; w; w = w->link) { - w->finalizer = evacuate(w->finalizer); - } + return rtsTrue; - /* And resurrect any threads which were about to become garbage. - */ - { - StgTSO *t, *tmp, *next; - for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { - next = t->global_link; - (StgClosure *)tmp = evacuate((StgClosure *)t); - tmp->global_link = resurrected_threads; - resurrected_threads = tmp; + case WeakThreads: + /* Now deal with the all_threads list, which behaves somewhat like + * the weak ptr list. If we discover any threads that are about to + * become garbage, we wake them up and administer an exception. + */ + { + StgTSO *t, *tmp, *next, **prev; + + prev = &old_all_threads; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + + (StgClosure *)tmp = isAlive((StgClosure *)t); + + if (tmp != NULL) { + t = tmp; + } + + ASSERT(get_itbl(t)->type == TSO); + switch (t->what_next) { + case ThreadRelocated: + next = t->link; + *prev = next; + continue; + case ThreadKilled: + case ThreadComplete: + // finshed or died. The thread might still be alive, but we + // don't keep it on the all_threads list. Don't forget to + // stub out its global_link field. + next = t->global_link; + t->global_link = END_TSO_QUEUE; + *prev = next; + continue; + default: + ; + } + + if (tmp == NULL) { + // not alive (yet): leave this thread on the + // old_all_threads list. + prev = &(t->global_link); + next = t->global_link; + } + else { + // alive: move this thread onto the all_threads list. + next = t->global_link; + t->global_link = all_threads; + all_threads = t; + *prev = next; + } + } } - } + + /* And resurrect any threads which were about to become garbage. + */ + { + StgTSO *t, *tmp, *next; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + next = t->global_link; + (StgClosure *)tmp = evacuate((StgClosure *)t); + tmp->global_link = resurrected_threads; + resurrected_threads = tmp; + } + } + + weak_stage = WeakDone; // *now* we're done, + return rtsTrue; // but one more round of scavenging, please - weak_done = rtsTrue; + default: + barf("traverse_weak_ptr_list"); } - return rtsTrue; } /* ----------------------------------------------------------------------------- @@ -1114,23 +1260,18 @@ traverse_weak_ptr_list(void) static void -cleanup_weak_ptr_list ( StgWeak **list ) +mark_weak_ptr_list ( StgWeak **list ) { StgWeak *w, **last_w; last_w = list; for (w = *list; w; w = w->link) { - - if (get_itbl(w)->type == EVACUATED) { - w = (StgWeak *)((StgEvacuated *)w)->evacuee; - *last_w = w; - } - - if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) { + // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here + ASSERT(w->header.info == &stg_DEAD_WEAK_info + || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED); (StgClosure *)w = evacuate((StgClosure *)w); *last_w = w; - } - last_w = &(w->link); + last_w = &(w->link); } } @@ -1160,6 +1301,7 @@ isAlive(StgClosure *p) loop: bd = Bdescr((P_)p); + // ignore closures in generations that we're not collecting. if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) { return p; @@ -1246,6 +1388,10 @@ static __inline__ StgClosure * copy(StgClosure *src, nat size, step *stp) { P_ to, from, dest; +#ifdef PROFILING + // @LDV profiling + nat size_org = size; +#endif TICK_GC_WORDS_COPIED(size); /* Find out where we're going, using the handy "to" pointer in @@ -1275,6 +1421,11 @@ copy(StgClosure *src, nat size, step *stp) dest = stp->hp; stp->hp = to; upd_evacuee(src,(StgClosure *)dest); +#ifdef PROFILING + // We store the size of the just evacuated object in the LDV word so that + // the profiler can guess the position of the next object later. + SET_EVACUAEE_FOR_LDV(src, size_org); +#endif return (StgClosure *)dest; } @@ -1284,10 +1435,14 @@ copy(StgClosure *src, nat size, step *stp) */ -static __inline__ StgClosure * +static StgClosure * copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) { P_ dest, to, from; +#ifdef PROFILING + // @LDV profiling + nat size_to_copy_org = size_to_copy; +#endif TICK_GC_WORDS_COPIED(size_to_copy); if (stp->gen_no < evac_gen) { @@ -1309,6 +1464,16 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) dest = stp->hp; stp->hp += size_to_reserve; upd_evacuee(src,(StgClosure *)dest); +#ifdef PROFILING + // We store the size of the just evacuated object in the LDV word so that + // the profiler can guess the position of the next object later. + // size_to_copy_org is wrong because the closure already occupies size_to_reserve + // words. + SET_EVACUAEE_FOR_LDV(src, size_to_reserve); + // fill the slop + if (size_to_reserve - size_to_copy_org > 0) + FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org)); +#endif return (StgClosure *)dest; } @@ -1317,8 +1482,8 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) Evacuate a large object This just consists of removing the object from the (doubly-linked) - large_alloc_list, and linking it on to the (singly-linked) - new_large_objects list, from where it will be scavenged later. + step->large_objects list, and linking it on to the (singly-linked) + step->new_large_objects list, from where it will be scavenged later. Convention: bd->flags has BF_EVACUATED set for a large object that has been evacuated, or unset otherwise. @@ -1331,9 +1496,10 @@ evacuate_large(StgPtr p) bdescr *bd = Bdescr(p); step *stp; - // should point to the beginning of the block - ASSERT(((W_)p & BLOCK_MASK) == 0); - + // object must be at the beginning of the block (or be a ByteArray) + ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS || + (((W_)p & BLOCK_MASK) == 0)); + // already evacuated? if (bd->flags & BF_EVACUATED) { /* Don't forget to set the failed_to_evac flag if we didn't get @@ -1571,139 +1737,26 @@ loop: case THUNK_SELECTOR: { - const StgInfoTable* selectee_info; - StgClosure* selectee = ((StgSelector*)q)->selectee; + StgClosure *p; - selector_loop: - selectee_info = get_itbl(selectee); - switch (selectee_info->type) { - case CONSTR: - case CONSTR_1_0: - case CONSTR_0_1: - case CONSTR_2_0: - case CONSTR_1_1: - case CONSTR_0_2: - case CONSTR_STATIC: - { - StgWord offset = info->layout.selector_offset; - - // check that the size is in range - ASSERT(offset < - (StgWord32)(selectee_info->layout.payload.ptrs + - selectee_info->layout.payload.nptrs)); - - // perform the selection! - q = selectee->payload[offset]; - - /* if we're already in to-space, there's no need to continue - * with the evacuation, just update the source address with - * a pointer to the (evacuated) constructor field. - */ - if (HEAP_ALLOCED(q)) { - bdescr *bd = Bdescr((P_)q); - if (bd->flags & BF_EVACUATED) { - if (bd->gen_no < evac_gen) { - failed_to_evac = rtsTrue; - TICK_GC_FAILED_PROMOTION(); - } - return q; - } - } - - /* otherwise, carry on and evacuate this constructor field, - * (but not the constructor itself) - */ - goto loop; + if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) { + return copy(q,THUNK_SELECTOR_sizeW(),stp); } - case IND: - case IND_STATIC: - case IND_PERM: - case IND_OLDGEN: - case IND_OLDGEN_PERM: - selectee = ((StgInd *)selectee)->indirectee; - goto selector_loop; - - case EVACUATED: - selectee = ((StgEvacuated *)selectee)->evacuee; - goto selector_loop; - - case THUNK_SELECTOR: -# if 0 - /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or - something) to go into an infinite loop when the nightly - stage2 compiles PrelTup.lhs. */ - - /* we can't recurse indefinitely in evacuate(), so set a - * limit on the number of times we can go around this - * loop. - */ - if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) { - bdescr *bd; - bd = Bdescr((P_)selectee); - if (!bd->flags & BF_EVACUATED) { - thunk_selector_depth++; - selectee = evacuate(selectee); - thunk_selector_depth--; - goto selector_loop; - } - } - // otherwise, fall through... -# endif - - case AP_UPD: - case THUNK: - case THUNK_1_0: - case THUNK_0_1: - case THUNK_2_0: - case THUNK_1_1: - case THUNK_0_2: - case THUNK_STATIC: - case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: - case BLACKHOLE: - case BLACKHOLE_BQ: - // not evaluated yet - break; - -#if defined(PAR) - // a copy of the top-level cases below - case RBH: // cf. BLACKHOLE_BQ - { - //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); - to = copy(q,BLACKHOLE_sizeW(),stp); - //ToDo: derive size etc from reverted IP - //to = copy(q,size,stp); - // recordMutable((StgMutClosure *)to); - return to; - } - - case BLOCKED_FETCH: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); - to = copy(q,sizeofW(StgBlockedFetch),stp); - return to; - -# ifdef DIST - case REMOTE_REF: -# endif - case FETCH_ME: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); - to = copy(q,sizeofW(StgFetchMe),stp); - return to; - - case FETCH_ME_BQ: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); - to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); - return to; -#endif + p = eval_thunk_selector(info->layout.selector_offset, + (StgSelector *)q); - default: - barf("evacuate: THUNK_SELECTOR: strange selectee %d", - (int)(selectee_info->type)); - } + if (p == NULL) { + return copy(q,THUNK_SELECTOR_sizeW(),stp); + } else { + // q is still BLACKHOLE'd. + thunk_selector_depth++; + p = evacuate(p); + thunk_selector_depth--; + upd_evacuee(q,p); + return p; + } } - return copy(q,THUNK_SELECTOR_sizeW(),stp); case IND: case IND_OLDGEN: @@ -1785,7 +1838,7 @@ loop: */ if (evac_gen > 0) { // optimisation StgClosure *p = ((StgEvacuated*)q)->evacuee; - if (Bdescr((P_)p)->gen_no < evac_gen) { + if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) { failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } @@ -1871,6 +1924,144 @@ loop: } /* ----------------------------------------------------------------------------- + Evaluate a THUNK_SELECTOR if possible. + + returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or + a closure pointer if we evaluated it and this is the result. Note + that "evaluating" the THUNK_SELECTOR doesn't necessarily mean + reducing it to HNF, just that we have eliminated the selection. + The result might be another thunk, or even another THUNK_SELECTOR. + + If the return value is non-NULL, the original selector thunk has + been BLACKHOLE'd, and should be updated with an indirection or a + forwarding pointer. If the return value is NULL, then the selector + thunk is unchanged. + -------------------------------------------------------------------------- */ + +static StgClosure * +eval_thunk_selector( nat field, StgSelector * p ) +{ + StgInfoTable *info; + const StgInfoTable *info_ptr; + StgClosure *selectee; + + selectee = p->selectee; + + // Save the real info pointer (NOTE: not the same as get_itbl()). + info_ptr = p->header.info; + + // If the THUNK_SELECTOR is in a generation that we are not + // collecting, then bail out early. We won't be able to save any + // space in any case, and updating with an indirection is trickier + // in an old gen. + if (Bdescr((StgPtr)p)->gen_no > N) { + return NULL; + } + + // BLACKHOLE the selector thunk, since it is now under evaluation. + // This is important to stop us going into an infinite loop if + // this selector thunk eventually refers to itself. + SET_INFO(p,&stg_BLACKHOLE_info); + +selector_loop: + + info = get_itbl(selectee); + switch (info->type) { + case CONSTR: + case CONSTR_1_0: + case CONSTR_0_1: + case CONSTR_2_0: + case CONSTR_1_1: + case CONSTR_0_2: + case CONSTR_STATIC: + case CONSTR_NOCAF_STATIC: + // check that the size is in range + ASSERT(field < (StgWord32)(info->layout.payload.ptrs + + info->layout.payload.nptrs)); + + return selectee->payload[field]; + + case IND: + case IND_STATIC: + case IND_PERM: + case IND_OLDGEN: + case IND_OLDGEN_PERM: + selectee = ((StgInd *)selectee)->indirectee; + goto selector_loop; + + case EVACUATED: + // We don't follow pointers into to-space; the constructor + // has already been evacuated, so we won't save any space + // leaks by evaluating this selector thunk anyhow. + break; + + case THUNK_SELECTOR: + { + StgClosure *val; + + // check that we don't recurse too much, re-using the + // depth bound also used in evacuate(). + thunk_selector_depth++; + if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) { + break; + } + + val = eval_thunk_selector(info->layout.selector_offset, + (StgSelector *)selectee); + + thunk_selector_depth--; + + if (val == NULL) { + break; + } else { + // We evaluated this selector thunk, so update it with + // an indirection. NOTE: we don't use UPD_IND here, + // because we are guaranteed that p is in a generation + // that we are collecting, and we never want to put the + // indirection on a mutable list. + ((StgInd *)selectee)->indirectee = val; + SET_INFO(selectee,&stg_IND_info); + selectee = val; + goto selector_loop; + } + } + + case AP_UPD: + case THUNK: + case THUNK_1_0: + case THUNK_0_1: + case THUNK_2_0: + case THUNK_1_1: + case THUNK_0_2: + case THUNK_STATIC: + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + case BLACKHOLE_BQ: +#if defined(PAR) + case RBH: + case BLOCKED_FETCH: +# ifdef DIST + case REMOTE_REF: +# endif + case FETCH_ME: + case FETCH_ME_BQ: +#endif + // not evaluated yet + break; + + default: + barf("eval_thunk_selector: strange selectee %d", + (int)(info->type)); + } + + // We didn't manage to evaluate this thunk; restore the old info pointer + SET_INFO(p, info_ptr); + return NULL; +} + +/* ----------------------------------------------------------------------------- move_TSO is called to update the TSO structure after it has been moved from one place to another. -------------------------------------------------------------------------- */ @@ -2042,6 +2233,8 @@ scavenge(step *stp) info = get_itbl((StgClosure *)p); ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info))); + ASSERT(thunk_selector_depth == 0); + q = p; switch (info->type) { @@ -2132,9 +2325,23 @@ scavenge(step *stp) } case IND_PERM: - if (stp->gen_no != 0) { - SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); - } + if (stp->gen->no != 0) { +#ifdef PROFILING + // @LDV profiling + // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an + // IND_OLDGEN_PERM closure is larger than an IND_PERM closure. + LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); +#endif + // + // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // + SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that p has just been created. + LDV_recordCreate((StgClosure *)p); +#endif + } // fall through case IND_OLDGEN_PERM: ((StgIndOldGen *)p)->indirectee = @@ -3453,15 +3660,14 @@ revertCAFs( void ) } void -scavengeCAFs( void ) +markCAFs( evac_fn evac ) { StgIndStatic *c; - evac_gen = 0; for (c = (StgIndStatic *)caf_list; c != NULL; c = (StgIndStatic *)c->static_link) { - c->indirectee = evacuate(c->indirectee); + evac(&c->indirectee); } } @@ -3561,7 +3767,17 @@ threadLazyBlackHole(StgTSO *tso) #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) belch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif +#ifdef PROFILING + // @LDV profiling + // We pretend that bh is now dead. + LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); +#endif SET_INFO(bh,&stg_BLACKHOLE_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that bh has just been created. + LDV_recordCreate(bh); +#endif } update_frame = update_frame->link; @@ -3792,7 +4008,7 @@ threadSqueezeStack(StgTSO *tso) { StgInfoTable *info = get_itbl(bh); nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i; - /* don't zero out slop for a THUNK_SELECTOR, because it's layout + /* don't zero out slop for a THUNK_SELECTOR, because its layout * info is used for a different purpose, and it's exactly the * same size as a BLACKHOLE in any case. */ @@ -3803,7 +4019,20 @@ threadSqueezeStack(StgTSO *tso) } } #endif +#ifdef PROFILING + // @LDV profiling + // We pretend that bh is now dead. + LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); +#endif + // + // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // SET_INFO(bh,&stg_BLACKHOLE_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that bh has just been created. + LDV_recordCreate(bh); +#endif } }