X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=fa22b4e4f5cd1600694d4a38fc9ecd68acf05084;hb=3ddfdc19e74af725239b7dfdec776d1d07847fc2;hp=3311550567c895e3b104355af44d0789f96de8c2;hpb=a5dc9cdae04f5e0c1f2d13a53bb06c52f0a8c8c9;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 3311550..fa22b4e 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,10 +1,31 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.26 1999/02/05 15:25:07 simonm Exp $ + * $Id: GC.c,v 1.73 2000/03/16 17:27:12 simonmar Exp $ * - * Two-space garbage collector + * (c) The GHC Team 1998-1999 + * + * Generational garbage collector * * ---------------------------------------------------------------------------*/ +//@menu +//* Includes:: +//* STATIC OBJECT LIST:: +//* Static function declarations:: +//* Garbage Collect:: +//* Weak Pointers:: +//* Evacuation:: +//* Scavenging:: +//* Reverting CAFs:: +//* Sanity code for CAF garbage collection:: +//* Lazy black holing:: +//* Stack squeezing:: +//* Pausing a thread:: +//* Index:: +//@end menu + +//@node Includes, STATIC OBJECT LIST +//@subsection Includes + #include "Rts.h" #include "RtsFlags.h" #include "RtsUtils.h" @@ -17,13 +38,26 @@ #include "GC.h" #include "BlockAlloc.h" #include "Main.h" -#include "DebugProf.h" +#include "ProfHeap.h" #include "SchedAPI.h" #include "Weak.h" #include "StablePriv.h" +#include "Prelude.h" +#if defined(GRAN) || defined(PAR) +# include "GranSimRts.h" +# include "ParallelRts.h" +# include "FetchMe.h" +# if defined(DEBUG) +# include "Printer.h" +# include "ParallelDebug.h" +# endif +#endif StgCAF* enteredCAFs; +//@node STATIC OBJECT LIST, Static function declarations, Includes +//@subsection STATIC OBJECT LIST + /* STATIC OBJECT LIST. * * During GC: @@ -75,11 +109,16 @@ static rtsBool major_gc; */ static nat evac_gen; -/* WEAK POINTERS +/* Weak pointers */ static StgWeak *old_weak_ptr_list; /* also pending finaliser list */ static rtsBool weak_done; /* all done for this pass */ +/* List of all threads during GC + */ +static StgTSO *old_all_threads; +static StgTSO *resurrected_threads; + /* Flag indicating failure to evacuate an object to the desired * generation. */ @@ -89,27 +128,41 @@ static rtsBool failed_to_evac; */ bdescr *old_to_space; + +/* Data used for allocation area sizing. + */ +lnat new_blocks; /* blocks allocated during this GC */ +lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */ + +//@node Static function declarations, Garbage Collect, STATIC OBJECT LIST +//@subsection Static function declarations + /* ----------------------------------------------------------------------------- Static function declarations -------------------------------------------------------------------------- */ -static StgClosure *evacuate(StgClosure *q); -static void zeroStaticObjectList(StgClosure* first_static); -static rtsBool traverse_weak_ptr_list(void); -static void zeroMutableList(StgMutClosure *first); -static void revertDeadCAFs(void); +static StgClosure * evacuate ( StgClosure *q ); +static void zero_static_object_list ( StgClosure* first_static ); +static void zero_mutable_list ( StgMutClosure *first ); +static void revert_dead_CAFs ( void ); + +static rtsBool traverse_weak_ptr_list ( void ); +static void cleanup_weak_ptr_list ( StgWeak **list ); -static void scavenge_stack(StgPtr p, StgPtr stack_end); -static void scavenge_large(step *step); -static void scavenge(step *step); -static void scavenge_static(void); -static void scavenge_mutable_list(generation *g); -static void scavenge_mut_once_list(generation *g); +static void scavenge_stack ( StgPtr p, StgPtr stack_end ); +static void scavenge_large ( step *step ); +static void scavenge ( step *step ); +static void scavenge_static ( void ); +static void scavenge_mutable_list ( generation *g ); +static void scavenge_mut_once_list ( generation *g ); #ifdef DEBUG -static void gcCAFs(void); +static void gcCAFs ( void ); #endif +//@node Garbage Collect, Weak Pointers, Static function declarations +//@subsection Garbage Collect + /* ----------------------------------------------------------------------------- GarbageCollect @@ -132,18 +185,24 @@ static void gcCAFs(void); - free from-space in each step, and set from-space = to-space. -------------------------------------------------------------------------- */ +//@cindex GarbageCollect void GarbageCollect(void (*get_roots)(void)) { bdescr *bd; step *step; - lnat live, allocated, collected = 0; + lnat live, allocated, collected = 0, copied = 0; nat g, s; #ifdef PROFILING CostCentreStack *prev_CCS; #endif +#if defined(DEBUG) && defined(GRAN) + IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n", + Now, Now)) +#endif + /* tell the stats department that we've started a GC */ stat_startGC(); @@ -153,21 +212,8 @@ void GarbageCollect(void (*get_roots)(void)) CCCS = CCS_GC; #endif - /* We might have been called from Haskell land by _ccall_GC, in - * which case we need to call threadPaused() because the scheduler - * won't have done it. - */ - if (CurrentTSO) { threadPaused(CurrentTSO); } - - /* Approximate how much we allocated: number of blocks in the - * nursery + blocks allocated via allocate() - unused nusery blocks. - * This leaves a little slop at the end of each block, and doesn't - * take into account large objects (ToDo). - */ - allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); - for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { - allocated -= BLOCK_SIZE_W; - } + /* Approximate how much we allocated */ + allocated = calcAllocated(); /* Figure out which generation to collect */ @@ -180,8 +226,10 @@ void GarbageCollect(void (*get_roots)(void)) major_gc = (N == RtsFlags.GcFlags.generations-1); /* check stack sanity *before* GC (ToDo: check all threads) */ - /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */ - IF_DEBUG(sanity, checkFreeListSanity()); +#if defined(GRAN) + // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity()); +#endif + IF_DEBUG(sanity, checkFreeListSanity()); /* Initialise the static object lists */ @@ -189,10 +237,10 @@ void GarbageCollect(void (*get_roots)(void)) scavenged_static_objects = END_OF_STATIC_LIST; /* zero the mutable list for the oldest generation (see comment by - * zeroMutableList below). + * zero_mutable_list below). */ if (major_gc) { - zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_once_list); + zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list); } /* Save the old to-space if we're doing a two-space collection @@ -202,6 +250,11 @@ void GarbageCollect(void (*get_roots)(void)) g0s0->to_space = NULL; } + /* Keep a count of how many new blocks we allocated during this GC + * (used for resizing the allocation area, later). + */ + new_blocks = 0; + /* Initialise to-space in all the generations/steps that we're * collecting. */ @@ -231,11 +284,12 @@ void GarbageCollect(void (*get_roots)(void)) step->hpLim = step->hp + BLOCK_SIZE_W; step->hp_bd = bd; step->to_space = bd; - step->to_blocks = 1; /* ???? */ + step->to_blocks = 1; step->scan = bd->start; step->scan_bd = bd; step->new_large_objects = NULL; step->scavenged_large_objects = NULL; + new_blocks++; /* mark the large objects as not evacuated yet */ for (bd = step->large_objects; bd; bd = bd->link) { bd->evacuated = 0; @@ -260,6 +314,7 @@ void GarbageCollect(void (*get_roots)(void)) step->hp_bd = bd; step->blocks = bd; step->n_blocks = 1; + new_blocks++; } /* Set the scan pointer for older generations: remember we * still have to scavenge objects that have been promoted. */ @@ -293,6 +348,8 @@ void GarbageCollect(void (*get_roots)(void)) /* Do the mut-once lists first */ for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + IF_PAR_DEBUG(verbose, + printMutOnceList(&generations[g])); scavenge_mut_once_list(&generations[g]); evac_gen = g; for (st = generations[g].n_steps-1; st >= 0; st--) { @@ -301,6 +358,8 @@ void GarbageCollect(void (*get_roots)(void)) } for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + IF_PAR_DEBUG(verbose, + printMutableList(&generations[g])); scavenge_mutable_list(&generations[g]); evac_gen = g; for (st = generations[g].n_steps-1; st >= 0; st--) { @@ -314,20 +373,33 @@ void GarbageCollect(void (*get_roots)(void)) evac_gen = 0; get_roots(); +#if defined(PAR) /* And don't forget to mark the TSO if we got here direct from * Haskell! */ + /* Not needed in a seq version? if (CurrentTSO) { CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO); } + */ + + /* Mark the entries in the GALA table of the parallel system */ + markLocalGAs(major_gc); +#endif /* Mark the weak pointer list, and prepare to detect dead weak * pointers. */ - markWeakList(); old_weak_ptr_list = weak_ptr_list; weak_ptr_list = NULL; weak_done = rtsFalse; + /* The all_threads list is like the weak_ptr_list. + * See traverse_weak_ptr_list() for the details. + */ + old_all_threads = all_threads; + all_threads = END_TSO_QUEUE; + resurrected_threads = END_TSO_QUEUE; + /* Mark the stable pointer table. */ markStablePtrTable(major_gc); @@ -339,22 +411,7 @@ void GarbageCollect(void (*get_roots)(void)) * the CAF document. */ extern void markHugsObjects(void); -#if 0 - /* ToDo: This (undefined) function should contain the scavenge - * loop immediately below this block of code - but I'm not sure - * enough of the details to do this myself. - */ - scavengeEverything(); - /* revert dead CAFs and update enteredCAFs list */ - revertDeadCAFs(); -#endif markHugsObjects(); -#if 0 - /* This will keep the CAFs and the attached BCOs alive - * but the values will have been reverted - */ - scavengeEverything(); -#endif } #endif @@ -387,6 +444,9 @@ void GarbageCollect(void (*get_roots)(void)) loop2: for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) { for (st = generations[gen].n_steps-1; st >= 0 ; st--) { + if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } step = &generations[gen].steps[st]; evac_gen = gen; if (step->hp_bd != step->scan_bd || step->scan < step->hp) { @@ -410,10 +470,18 @@ void GarbageCollect(void (*get_roots)(void)) } } - /* Now see which stable names are still alive + /* Final traversal of the weak pointer list (see comment by + * cleanUpWeakPtrList below). + */ + cleanup_weak_ptr_list(&weak_ptr_list); + + /* Now see which stable names are still alive. */ gcStablePtrTable(major_gc); + /* revert dead CAFs and update enteredCAFs list */ + revert_dead_CAFs(); + /* Set the maximum blocks for the oldest generation, based on twice * the amount of live data now, adjusted to fit the maximum heap * size if necessary. @@ -441,6 +509,7 @@ void GarbageCollect(void (*get_roots)(void)) /* run through all the generations/steps and tidy up */ + copied = new_blocks * BLOCK_SIZE_W; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { if (g <= N) { @@ -455,6 +524,11 @@ void GarbageCollect(void (*get_roots)(void)) /* Tidy the end of the to-space chains */ step->hp_bd->free = step->hp; step->hp_bd->link = NULL; + /* stats information: how much we copied */ + if (g <= N) { + copied -= step->hp_bd->start + BLOCK_SIZE_W - + step->hp_bd->free; + } } /* for generations we collected... */ @@ -501,8 +575,11 @@ void GarbageCollect(void (*get_roots)(void)) * oldest_gen */ if (g != 0) { +#if 0 generations[g].max_blocks = (oldest_gen->max_blocks * g) / (RtsFlags.GcFlags.generations-1); +#endif + generations[g].max_blocks = oldest_gen->max_blocks; } /* for older generations... */ @@ -527,6 +604,18 @@ void GarbageCollect(void (*get_roots)(void)) /* Guess the amount of live data for stats. */ live = calcLive(); + /* Free the small objects allocated via allocate(), since this will + * all have been copied into G0S1 now. + */ + if (small_alloc_list != NULL) { + freeChain(small_alloc_list); + } + small_alloc_list = NULL; + alloc_blocks = 0; + alloc_Hp = NULL; + alloc_HpLim = NULL; + alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + /* Two-space collector: * Free the old to-space, and estimate the amount of live data. */ @@ -556,7 +645,7 @@ void GarbageCollect(void (*get_roots)(void)) * performance we get from 3L bytes, reducing to the same * performance at 2L bytes. */ - blocks = g0s0->n_blocks; + blocks = g0s0->to_blocks; if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > RtsFlags.GcFlags.maxHeapSize ) { @@ -564,7 +653,7 @@ void GarbageCollect(void (*get_roots)(void)) int pc_free; adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks); - IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); + IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize; if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ { heapOverflow(); @@ -590,25 +679,28 @@ void GarbageCollect(void (*get_roots)(void)) nat needed = calcNeeded(); /* approx blocks needed at next GC */ /* Guess how much will be live in generation 0 step 0 next time. - * A good approximation is the amount of data that was live this - * time: this assumes (1) that the size of G0S0 will be roughly - * the same as last time, and (2) that the promotion rate will be - * constant. - * - * If we don't know how much was live in G0S0 (because there's no - * step 1), then assume 30% (which is usually an overestimate). + * A good approximation is the obtained by finding the + * percentage of g0s0 that was live at the last minor GC. */ - if (g0->n_steps == 1) { - needed += (g0s0->n_blocks * 30) / 100; - } else { - needed += g0->steps[1].n_blocks; + if (N == 0) { + g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks; } - /* Now we have a rough guess at the number of blocks needed for - * the next GC, subtract this from the user's suggested heap size - * and use the rest for the allocation area. + /* Estimate a size for the allocation area based on the + * information available. We might end up going slightly under + * or over the suggested heap size, but we should be pretty + * close on average. + * + * Formula: suggested - needed + * ---------------------------- + * 1 + g0s0_pcnt_kept/100 + * + * where 'needed' is the amount of memory needed at the next + * collection for collecting all steps except g0s0. */ - blocks = (int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed; + blocks = + (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) / + (100 + (int)g0s0_pcnt_kept); if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) { blocks = RtsFlags.GcFlags.minAllocAreaSize; @@ -618,41 +710,31 @@ void GarbageCollect(void (*get_roots)(void)) } } - /* revert dead CAFs and update enteredCAFs list */ - revertDeadCAFs(); - - /* mark the garbage collected CAFs as dead */ + /* mark the garbage collected CAFs as dead */ #ifdef DEBUG if (major_gc) { gcCAFs(); } #endif /* zero the scavenged static object list */ if (major_gc) { - zeroStaticObjectList(scavenged_static_objects); + zero_static_object_list(scavenged_static_objects); } /* Reset the nursery */ - for (bd = g0s0->blocks; bd; bd = bd->link) { - bd->free = bd->start; - ASSERT(bd->gen == g0); - ASSERT(bd->step == g0s0); - } - current_nursery = g0s0->blocks; + resetNurseries(); - /* Free the small objects allocated via allocate(), since this will - * all have been copied into G0S1 now. - */ - if (small_alloc_list != NULL) { - freeChain(small_alloc_list); - } - small_alloc_list = NULL; - alloc_blocks = 0; - alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; +#if defined(PAR) + /* Reconstruct the Global Address tables used in GUM */ + RebuildGAtables(major_gc); +#endif - /* start any pending finalisers */ - scheduleFinalisers(old_weak_ptr_list); + /* start any pending finalizers */ + scheduleFinalizers(old_weak_ptr_list); + /* send exceptions to any threads which were about to die */ + resurrectThreads(resurrected_threads); + /* check sanity after GC */ IF_DEBUG(sanity, checkSanity(N)); @@ -666,6 +748,7 @@ void GarbageCollect(void (*get_roots)(void)) /* restore enclosing cost centre */ #ifdef PROFILING + heapCensus(); CCCS = prev_CCS; #endif @@ -673,9 +756,12 @@ void GarbageCollect(void (*get_roots)(void)) IF_DEBUG(sanity, memInventory()); /* ok, GC over: tell the stats department what happened. */ - stat_endGC(allocated, collected, live, N); + stat_endGC(allocated, collected, live, copied, N); } +//@node Weak Pointers, Evacuation, Garbage Collect +//@subsection Weak Pointers + /* ----------------------------------------------------------------------------- Weak Pointers @@ -690,11 +776,12 @@ void GarbageCollect(void (*get_roots)(void)) new live weak pointers, then all the currently unreachable ones are dead. - For generational GC: we just don't try to finalise weak pointers in + For generational GC: we just don't try to finalize weak pointers in older generations than the one we're collecting. This could probably be optimised by keeping per-generation lists of weak pointers, but for a few weak pointers this scheme will work. -------------------------------------------------------------------------- */ +//@cindex traverse_weak_ptr_list static rtsBool traverse_weak_ptr_list(void) @@ -705,7 +792,7 @@ traverse_weak_ptr_list(void) if (weak_done) { return rtsFalse; } - /* doesn't matter where we evacuate values/finalisers to, since + /* doesn't matter where we evacuate values/finalizers to, since * these pointers are treated as roots (iff the keys are alive). */ evac_gen = 0; @@ -713,11 +800,32 @@ traverse_weak_ptr_list(void) last_w = &old_weak_ptr_list; for (w = old_weak_ptr_list; w; w = next_w) { + /* First, this weak pointer might have been evacuated. If so, + * remove the forwarding pointer from the weak_ptr_list. + */ + if (get_itbl(w)->type == EVACUATED) { + w = (StgWeak *)((StgEvacuated *)w)->evacuee; + *last_w = w; + } + + /* There might be a DEAD_WEAK on the list if finalizeWeak# was + * called on a live weak pointer object. Just remove it. + */ + if (w->header.info == &DEAD_WEAK_info) { + next_w = ((StgDeadWeak *)w)->link; + *last_w = next_w; + continue; + } + + ASSERT(get_itbl(w)->type == WEAK); + + /* Now, check whether the key is reachable. + */ if ((new = isAlive(w->key))) { w->key = new; - /* evacuate the value and finaliser */ + /* evacuate the value and finalizer */ w->value = evacuate(w->value); - w->finaliser = evacuate(w->finaliser); + w->finalizer = evacuate(w->finalizer); /* remove this weak ptr from the old_weak_ptr list */ *last_w = w->link; /* and put it on the new weak ptr list */ @@ -734,16 +842,67 @@ traverse_weak_ptr_list(void) continue; } } - + + /* Now deal with the all_threads list, which behaves somewhat like + * the weak ptr list. If we discover any threads that are about to + * become garbage, we wake them up and administer an exception. + */ + { + StgTSO *t, *tmp, *next, **prev; + + prev = &old_all_threads; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + + /* Threads which have finished or died get dropped from + * the list. + */ + switch (t->whatNext) { + case ThreadKilled: + case ThreadComplete: + next = t->global_link; + *prev = next; + continue; + default: + } + + /* Threads which have already been determined to be alive are + * moved onto the all_threads list. + */ + (StgClosure *)tmp = isAlive((StgClosure *)t); + if (tmp != NULL) { + next = tmp->global_link; + tmp->global_link = all_threads; + all_threads = tmp; + *prev = next; + } else { + prev = &(t->global_link); + next = t->global_link; + } + } + } + /* If we didn't make any changes, then we can go round and kill all * the dead weak pointers. The old_weak_ptr list is used as a list - * of pending finalisers later on. + * of pending finalizers later on. */ if (flag == rtsFalse) { + cleanup_weak_ptr_list(&old_weak_ptr_list); for (w = old_weak_ptr_list; w; w = w->link) { - w->value = evacuate(w->value); - w->finaliser = evacuate(w->finaliser); + w->finalizer = evacuate(w->finalizer); } + + /* And resurrect any threads which were about to become garbage. + */ + { + StgTSO *t, *tmp, *next; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + next = t->global_link; + (StgClosure *)tmp = evacuate((StgClosure *)t); + tmp->global_link = resurrected_threads; + resurrected_threads = tmp; + } + } + weak_done = rtsTrue; } @@ -751,15 +910,53 @@ traverse_weak_ptr_list(void) } /* ----------------------------------------------------------------------------- + After GC, the live weak pointer list may have forwarding pointers + on it, because a weak pointer object was evacuated after being + moved to the live weak pointer list. We remove those forwarding + pointers here. + + Also, we don't consider weak pointer objects to be reachable, but + we must nevertheless consider them to be "live" and retain them. + Therefore any weak pointer objects which haven't as yet been + evacuated need to be evacuated now. + -------------------------------------------------------------------------- */ + +//@cindex cleanup_weak_ptr_list + +static void +cleanup_weak_ptr_list ( StgWeak **list ) +{ + StgWeak *w, **last_w; + + last_w = list; + for (w = *list; w; w = w->link) { + + if (get_itbl(w)->type == EVACUATED) { + w = (StgWeak *)((StgEvacuated *)w)->evacuee; + *last_w = w; + } + + if (Bdescr((P_)w)->evacuated == 0) { + (StgClosure *)w = evacuate((StgClosure *)w); + *last_w = w; + } + last_w = &(w->link); + } +} + +/* ----------------------------------------------------------------------------- isAlive determines whether the given closure is still alive (after a garbage collection) or not. It returns the new address of the closure if it is alive, or NULL otherwise. -------------------------------------------------------------------------- */ +//@cindex isAlive + StgClosure * isAlive(StgClosure *p) { - StgInfoTable *info; + const StgInfoTable *info; + nat size; while (1) { @@ -770,10 +967,14 @@ isAlive(StgClosure *p) * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs. */ +#if 1 || !defined(PAR) /* ignore closures in generations that we're not collecting. */ + /* In GUM we use this routine when rebuilding GA tables; for some + reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */ if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) { return p; } +#endif switch (info->type) { @@ -790,6 +991,33 @@ isAlive(StgClosure *p) /* alive! */ return ((StgEvacuated *)p)->evacuee; + case BCO: + size = bco_sizeW((StgBCO*)p); + goto large; + + case ARR_WORDS: + size = arr_words_sizeW((StgArrWords *)p); + goto large; + + case MUT_ARR_PTRS: + case MUT_ARR_PTRS_FROZEN: + size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p); + goto large; + + case TSO: + if (((StgTSO *)p)->whatNext == ThreadRelocated) { + p = (StgClosure *)((StgTSO *)p)->link; + continue; + } + + size = tso_sizeW((StgTSO *)p); + large: + if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_) + && Bdescr((P_)p)->evacuated) + return p; + else + return NULL; + default: /* dead. */ return NULL; @@ -797,12 +1025,14 @@ isAlive(StgClosure *p) } } +//@cindex MarkRoot StgClosure * MarkRoot(StgClosure *root) { return evacuate(root); } +//@cindex addBlock static void addBlock(step *step) { bdescr *bd = allocBlock(); @@ -821,8 +1051,20 @@ static void addBlock(step *step) step->hpLim = step->hp + BLOCK_SIZE_W; step->hp_bd = bd; step->to_blocks++; + new_blocks++; } +//@cindex upd_evacuee + +static __inline__ void +upd_evacuee(StgClosure *p, StgClosure *dest) +{ + p->header.info = &EVACUATED_info; + ((StgEvacuated *)p)->evacuee = dest; +} + +//@cindex copy + static __inline__ StgClosure * copy(StgClosure *src, nat size, step *step) { @@ -835,7 +1077,11 @@ copy(StgClosure *src, nat size, step *step) * by evacuate()). */ if (step->gen->no < evac_gen) { +#ifdef NO_EAGER_PROMOTION + failed_to_evac = rtsTrue; +#else step = &generations[evac_gen].steps[0]; +#endif } /* chain a new block onto the to-space for the destination step if @@ -851,6 +1097,7 @@ copy(StgClosure *src, nat size, step *step) dest = step->hp; step->hp = to; + upd_evacuee(src,(StgClosure *)dest); return (StgClosure *)dest; } @@ -859,6 +1106,8 @@ copy(StgClosure *src, nat size, step *step) * used to optimise evacuation of BLACKHOLEs. */ +//@cindex copyPart + static __inline__ StgClosure * copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step) { @@ -866,7 +1115,11 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step) TICK_GC_WORDS_COPIED(size_to_copy); if (step->gen->no < evac_gen) { +#ifdef NO_EAGER_PROMOTION + failed_to_evac = rtsTrue; +#else step = &generations[evac_gen].steps[0]; +#endif } if (step->hp + size_to_reserve >= step->hpLim) { @@ -879,17 +1132,12 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step) dest = step->hp; step->hp += size_to_reserve; + upd_evacuee(src,(StgClosure *)dest); return (StgClosure *)dest; } -static __inline__ void -upd_evacuee(StgClosure *p, StgClosure *dest) -{ - StgEvacuated *q = (StgEvacuated *)p; - - SET_INFO(q,&EVACUATED_info); - q->evacuee = dest; -} +//@node Evacuation, Scavenging, Weak Pointers +//@subsection Evacuation /* ----------------------------------------------------------------------------- Evacuate a large object @@ -902,6 +1150,8 @@ upd_evacuee(StgClosure *p, StgClosure *dest) evacuated, or 0 otherwise. -------------------------------------------------------------------------- */ +//@cindex evacuate_large + static inline void evacuate_large(StgPtr p, rtsBool mutable) { @@ -938,7 +1188,11 @@ evacuate_large(StgPtr p, rtsBool mutable) */ step = bd->step->to; if (step->gen->no < evac_gen) { +#ifdef NO_EAGER_PROMOTION + failed_to_evac = rtsTrue; +#else step = &generations[evac_gen].steps[0]; +#endif } bd->step = step; @@ -960,6 +1214,8 @@ evacuate_large(StgPtr p, rtsBool mutable) the promotion until the next GC. -------------------------------------------------------------------------- */ +//@cindex mkMutCons + static StgClosure * mkMutCons(StgClosure *ptr, generation *gen) { @@ -1009,7 +1265,7 @@ mkMutCons(StgClosure *ptr, generation *gen) didn't manage to evacuate this object into evac_gen. -------------------------------------------------------------------------- */ - +//@cindex evacuate static StgClosure * evacuate(StgClosure *q) @@ -1020,7 +1276,7 @@ evacuate(StgClosure *q) const StgInfoTable *info; loop: - if (!LOOKS_LIKE_STATIC(q)) { + if (HEAP_ALLOCED(q)) { bd = Bdescr((P_)q); if (bd->gen->no > N) { /* Can't evacuate this object, because it's in a generation @@ -1036,55 +1292,73 @@ loop: } step = bd->step->to; } +#ifdef DEBUG + else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */ +#endif /* make sure the info pointer is into text space */ ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q)) || IS_HUGS_CONSTR_INFO(GET_INFO(q)))); - info = get_itbl(q); + /* + if (info->type==RBH) { + info = REVERT_INFOPTR(info); + IF_DEBUG(gc, + belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)", + q, info_type(q), info, info_type_by_ip(info))); + } + */ + switch (info -> type) { case BCO: - to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),step); - upd_evacuee(q,to); - return to; + { + nat size = bco_sizeW((StgBCO*)q); + + if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { + evacuate_large((P_)q, rtsFalse); + to = q; + } else { + /* just copy the block */ + to = copy(q,size,step); + } + return to; + } case MUT_VAR: ASSERT(q->header.info != &MUT_CONS_info); case MVAR: to = copy(q,sizeW_fromITBL(info),step); - upd_evacuee(q,to); recordMutable((StgMutClosure *)to); return to; - case STABLE_NAME: - stable_ptr_table[((StgStableName *)q)->sn].keep = rtsTrue; - to = copy(q,sizeofW(StgStableName),step); - upd_evacuee(q,to); - return to; - case FUN_1_0: case FUN_0_1: case CONSTR_1_0: case CONSTR_0_1: - to = copy(q,sizeofW(StgHeader)+1,step); - upd_evacuee(q,to); - return to; + return copy(q,sizeofW(StgHeader)+1,step); case THUNK_1_0: /* here because of MIN_UPD_SIZE */ case THUNK_0_1: - case FUN_1_1: - case FUN_0_2: - case FUN_2_0: case THUNK_1_1: case THUNK_0_2: case THUNK_2_0: +#ifdef NO_PROMOTE_THUNKS + if (bd->gen->no == 0 && + bd->step->no != 0 && + bd->step->no == bd->gen->n_steps-1) { + step = bd->step; + } +#endif + return copy(q,sizeofW(StgHeader)+2,step); + + case FUN_1_1: + case FUN_0_2: + case FUN_2_0: case CONSTR_1_1: case CONSTR_0_2: case CONSTR_2_0: - to = copy(q,sizeofW(StgHeader)+2,step); - upd_evacuee(q,to); - return to; + return copy(q,sizeofW(StgHeader)+2,step); case FUN: case THUNK: @@ -1095,19 +1369,17 @@ loop: case CAF_ENTERED: case WEAK: case FOREIGN: - to = copy(q,sizeW_fromITBL(info),step); - upd_evacuee(q,to); - return to; + case STABLE_NAME: + return copy(q,sizeW_fromITBL(info),step); case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: case BLACKHOLE: - to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step); - upd_evacuee(q,to); - return to; + return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step); case BLACKHOLE_BQ: to = copy(q,BLACKHOLE_sizeW(),step); - upd_evacuee(q,to); recordMutable((StgMutClosure *)to); return to; @@ -1127,11 +1399,11 @@ loop: case CONSTR_0_2: case CONSTR_STATIC: { - StgNat32 offset = info->layout.selector_offset; + StgWord32 offset = info->layout.selector_offset; /* check that the size is in range */ ASSERT(offset < - (StgNat32)(selectee_info->layout.payload.ptrs + + (StgWord32)(selectee_info->layout.payload.ptrs + selectee_info->layout.payload.nptrs)); /* perform the selection! */ @@ -1141,7 +1413,7 @@ loop: * with the evacuation, just update the source address with * a pointer to the (evacuated) constructor field. */ - if (IS_USER_PTR(q)) { + if (HEAP_ALLOCED(q)) { bdescr *bd = Bdescr((P_)q); if (bd->evacuated) { if (bd->gen->no < evac_gen) { @@ -1163,15 +1435,15 @@ loop: case IND_PERM: case IND_OLDGEN: case IND_OLDGEN_PERM: - selectee = stgCast(StgInd *,selectee)->indirectee; + selectee = ((StgInd *)selectee)->indirectee; goto selector_loop; case CAF_ENTERED: - selectee = stgCast(StgCAF *,selectee)->value; + selectee = ((StgCAF *)selectee)->value; goto selector_loop; case EVACUATED: - selectee = stgCast(StgEvacuated*,selectee)->evacuee; + selectee = ((StgEvacuated *)selectee)->evacuee; goto selector_loop; case THUNK: @@ -1185,18 +1457,19 @@ loop: /* aargh - do recursively???? */ case CAF_UNENTERED: case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: case BLACKHOLE: case BLACKHOLE_BQ: /* not evaluated yet */ break; default: - barf("evacuate: THUNK_SELECTOR: strange selectee"); + barf("evacuate: THUNK_SELECTOR: strange selectee %d", + (int)(selectee_info->type)); } } - to = copy(q,THUNK_SELECTOR_sizeW(),step); - upd_evacuee(q,to); - return to; + return copy(q,THUNK_SELECTOR_sizeW(),step); case IND: case IND_OLDGEN: @@ -1204,30 +1477,35 @@ loop: q = ((StgInd*)q)->indirectee; goto loop; - /* ToDo: optimise STATIC_LINK for known cases. - - FUN_STATIC : payload[0] - - THUNK_STATIC : payload[1] - - IND_STATIC : payload[1] - */ case THUNK_STATIC: + if (info->srt_len > 0 && major_gc && + THUNK_STATIC_LINK((StgClosure *)q) == NULL) { + THUNK_STATIC_LINK((StgClosure *)q) = static_objects; + static_objects = (StgClosure *)q; + } + return q; + case FUN_STATIC: - if (info->srt_len == 0) { /* small optimisation */ - return q; + if (info->srt_len > 0 && major_gc && + FUN_STATIC_LINK((StgClosure *)q) == NULL) { + FUN_STATIC_LINK((StgClosure *)q) = static_objects; + static_objects = (StgClosure *)q; } - /* fall through */ - case CONSTR_STATIC: + return q; + case IND_STATIC: - /* don't want to evacuate these, but we do want to follow pointers - * from SRTs - see scavenge_static. - */ + if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) { + IND_STATIC_LINK((StgClosure *)q) = static_objects; + static_objects = (StgClosure *)q; + } + return q; - /* put the object on the static list, if necessary. - */ + case CONSTR_STATIC: if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) { STATIC_LINK(info,(StgClosure *)q) = static_objects; static_objects = (StgClosure *)q; } - /* fall through */ + return q; case CONSTR_INTLIKE: case CONSTR_CHARLIKE: @@ -1248,15 +1526,13 @@ loop: case CATCH_FRAME: case SEQ_FRAME: /* shouldn't see these */ - barf("evacuate: stack frame\n"); + barf("evacuate: stack frame at %p\n", q); case AP_UPD: case PAP: /* these are special - the payload is a copy of a chunk of stack, tagging and all. */ - to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),step); - upd_evacuee(q,to); - return to; + return copy(q,pap_sizeW((StgPAP *)q),step); case EVACUATED: /* Already evacuated, just return the forwarding address. @@ -1269,7 +1545,7 @@ loop: if (evac_gen > 0) { /* optimisation */ StgClosure *p = ((StgEvacuated*)q)->evacuee; if (Bdescr((P_)p)->gen->no < evac_gen) { - /* fprintf(stderr,"evac failed!\n");*/ + IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p)); failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } @@ -1278,23 +1554,21 @@ loop: case ARR_WORDS: { - nat size = arr_words_sizeW(stgCast(StgArrWords*,q)); + nat size = arr_words_sizeW((StgArrWords *)q); if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { evacuate_large((P_)q, rtsFalse); return q; } else { /* just copy the block */ - to = copy(q,size,step); - upd_evacuee(q,to); - return to; + return copy(q,size,step); } } case MUT_ARR_PTRS: case MUT_ARR_PTRS_FROZEN: { - nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q)); + nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q); if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { evacuate_large((P_)q, info->type == MUT_ARR_PTRS); @@ -1302,7 +1576,6 @@ loop: } else { /* just copy the block */ to = copy(q,size,step); - upd_evacuee(q,to); if (info->type == MUT_ARR_PTRS) { recordMutable((StgMutClosure *)to); } @@ -1312,10 +1585,17 @@ loop: case TSO: { - StgTSO *tso = stgCast(StgTSO *,q); + StgTSO *tso = (StgTSO *)q; nat size = tso_sizeW(tso); int diff; + /* Deal with redirected TSOs (a TSO that's had its stack enlarged). + */ + if (tso->whatNext == ThreadRelocated) { + q = (StgClosure *)tso->link; + goto loop; + } + /* Large TSOs don't get moved, so no relocation is required. */ if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { @@ -1336,20 +1616,53 @@ loop: new_tso->splim = (StgPtr)new_tso->splim + diff; relocate_TSO(tso, new_tso); - upd_evacuee(q,(StgClosure *)new_tso); recordMutable((StgMutClosure *)new_tso); return (StgClosure *)new_tso; } } +#if defined(PAR) + case RBH: // cf. BLACKHOLE_BQ + { + //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); + to = copy(q,BLACKHOLE_sizeW(),step); + //ToDo: derive size etc from reverted IP + //to = copy(q,size,step); + recordMutable((StgMutClosure *)to); + IF_DEBUG(gc, + belch("@@ evacuate: RBH %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; + } + case BLOCKED_FETCH: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); + to = copy(q,sizeofW(StgBlockedFetch),step); + IF_DEBUG(gc, + belch("@@ evacuate: %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; + case FETCH_ME: - fprintf(stderr,"evacuate: unimplemented/strange closure type\n"); - return q; + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMe),step); + IF_DEBUG(gc, + belch("@@ evacuate: %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; + + case FETCH_ME_BQ: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMeBlockingQueue),step); + IF_DEBUG(gc, + belch("@@ evacuate: %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; +#endif default: - barf("evacuate: strange closure type"); + barf("evacuate: strange closure type %d", (int)(info->type)); } barf("evacuate"); @@ -1359,6 +1672,7 @@ loop: relocate_TSO is called just after a TSO has been copied from src to dest. It adjusts the update frame list for the new location. -------------------------------------------------------------------------- */ +//@cindex relocate_TSO StgTSO * relocate_TSO(StgTSO *src, StgTSO *dest) @@ -1399,7 +1713,7 @@ relocate_TSO(StgTSO *src, StgTSO *dest) break; default: - barf("relocate_TSO"); + barf("relocate_TSO %d", (int)(get_itbl(su)->type)); } break; } @@ -1407,6 +1721,11 @@ relocate_TSO(StgTSO *src, StgTSO *dest) return dest; } +//@node Scavenging, Reverting CAFs, Evacuation +//@subsection Scavenging + +//@cindex scavenge_srt + static inline void scavenge_srt(const StgInfoTable *info) { @@ -1416,11 +1735,50 @@ scavenge_srt(const StgInfoTable *info) * srt field in the info table. That's ok, because we'll * never dereference it. */ - srt = stgCast(StgClosure **,info->srt); + srt = (StgClosure **)(info->srt); srt_end = srt + info->srt_len; for (; srt < srt_end; srt++) { - evacuate(*srt); + /* Special-case to handle references to closures hiding out in DLLs, since + double indirections required to get at those. The code generator knows + which is which when generating the SRT, so it stores the (indirect) + reference to the DLL closure in the table by first adding one to it. + We check for this here, and undo the addition before evacuating it. + + If the SRT entry hasn't got bit 0 set, the SRT entry points to a + closure that's fixed at link-time, and no extra magic is required. + */ +#ifdef ENABLE_WIN32_DLL_SUPPORT + if ( (unsigned long)(*srt) & 0x1 ) { + evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1))); + } else { + evacuate(*srt); + } +#else + evacuate(*srt); +#endif + } +} + +/* ----------------------------------------------------------------------------- + Scavenge a TSO. + -------------------------------------------------------------------------- */ + +static void +scavengeTSO (StgTSO *tso) +{ + /* chase the link field for any TSOs on the same queue */ + (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnException) { + tso->block_info.closure = evacuate(tso->block_info.closure); + } + if ( tso->blocked_exceptions != NULL ) { + tso->blocked_exceptions = + (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); } + /* scavenge this thread's stack */ + scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); } /* ----------------------------------------------------------------------------- @@ -1435,7 +1793,7 @@ scavenge_srt(const StgInfoTable *info) scavenging a mutable object where early promotion isn't such a good idea. -------------------------------------------------------------------------- */ - +//@cindex scavenge static void scavenge(step *step) @@ -1469,11 +1827,16 @@ scavenge(step *step) || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p)))); info = get_itbl((StgClosure *)p); + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); + */ + switch (info -> type) { case BCO: { - StgBCO* bco = stgCast(StgBCO*,p); + StgBCO* bco = (StgBCO *)p; nat i; for (i = 0; i < bco->n_ptrs; i++) { bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i)); @@ -1554,10 +1917,6 @@ scavenge(step *step) case WEAK: case FOREIGN: case STABLE_NAME: - case IND_PERM: - case IND_OLDGEN_PERM: - case CAF_UNENTERED: - case CAF_ENTERED: { StgPtr end; @@ -1569,6 +1928,52 @@ scavenge(step *step) break; } + case IND_PERM: + if (step->gen->no != 0) { + SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info); + } + /* fall through */ + case IND_OLDGEN_PERM: + ((StgIndOldGen *)p)->indirectee = + evacuate(((StgIndOldGen *)p)->indirectee); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordOldToNewPtrs((StgMutClosure *)p); + } + p += sizeofW(StgIndOldGen); + break; + + case CAF_UNENTERED: + { + StgCAF *caf = (StgCAF *)p; + + caf->body = evacuate(caf->body); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordOldToNewPtrs((StgMutClosure *)p); + } else { + caf->mut_link = NULL; + } + p += sizeofW(StgCAF); + break; + } + + case CAF_ENTERED: + { + StgCAF *caf = (StgCAF *)p; + + caf->body = evacuate(caf->body); + caf->value = evacuate(caf->value); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordOldToNewPtrs((StgMutClosure *)p); + } else { + caf->mut_link = NULL; + } + p += sizeofW(StgCAF); + break; + } + case MUT_VAR: /* ignore MUT_CONSs */ if (((StgMutVar *)p)->header.info != &MUT_CONS_info) { @@ -1580,6 +1985,8 @@ scavenge(step *step) break; case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: case BLACKHOLE: p += BLACKHOLE_sizeW(); break; @@ -1638,7 +2045,7 @@ scavenge(step *step) * evacuate the function pointer too... */ { - StgPAP* pap = stgCast(StgPAP*,p); + StgPAP* pap = (StgPAP *)p; pap->fun = evacuate(pap->fun); scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); @@ -1648,7 +2055,7 @@ scavenge(step *step) case ARR_WORDS: /* nothing to follow */ - p += arr_words_sizeW(stgCast(StgArrWords*,p)); + p += arr_words_sizeW((StgArrWords *)p); break; case MUT_ARR_PTRS: @@ -1684,21 +2091,80 @@ scavenge(step *step) case TSO: { - StgTSO *tso; - - tso = (StgTSO *)p; + StgTSO *tso = (StgTSO *)p; evac_gen = 0; - /* chase the link field for any TSOs on the same queue */ - (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); - /* scavenge this thread's stack */ - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + scavengeTSO(tso); evac_gen = saved_evac_gen; p += tso_sizeW(tso); break; } +#if defined(PAR) + case RBH: // cf. BLACKHOLE_BQ + { + // nat size, ptrs, nonptrs, vhs; + // char str[80]; + // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); + StgRBH *rbh = (StgRBH *)p; + (StgClosure *)rbh->blocking_queue = + evacuate((StgClosure *)rbh->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)rbh); + } + IF_DEBUG(gc, + belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + p, info_type(p), (StgClosure *)rbh->blocking_queue)); + // ToDo: use size of reverted closure here! + p += BLACKHOLE_sizeW(); + break; + } + case BLOCKED_FETCH: + { + StgBlockedFetch *bf = (StgBlockedFetch *)p; + /* follow the pointer to the node which is being demanded */ + (StgClosure *)bf->node = + evacuate((StgClosure *)bf->node); + /* follow the link to the rest of the blocking queue */ + (StgClosure *)bf->link = + evacuate((StgClosure *)bf->link); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)bf); + } + IF_DEBUG(gc, + belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + bf, info_type((StgClosure *)bf), + bf->node, info_type(bf->node))); + p += sizeofW(StgBlockedFetch); + break; + } + case FETCH_ME: + IF_DEBUG(gc, + belch("@@ scavenge: HWL claims nothing to do for %p (%s)", + p, info_type((StgClosure *)p))); + p += sizeofW(StgFetchMe); + break; // nothing to do in this case + + case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + { + StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; + (StgClosure *)fmbq->blocking_queue = + evacuate((StgClosure *)fmbq->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)fmbq); + } + IF_DEBUG(gc, + belch("@@ scavenge: %p (%s) exciting, isn't it", + p, info_type((StgClosure *)p))); + p += sizeofW(StgFetchMeBlockingQueue); + break; + } +#endif + case EVACUATED: barf("scavenge: unimplemented/strange closure type\n"); @@ -1727,10 +2193,12 @@ scavenge(step *step) because they contain old-to-new generation pointers. Only certain objects can have this property. -------------------------------------------------------------------------- */ +//@cindex scavenge_one + static rtsBool scavenge_one(StgClosure *p) { - StgInfoTable *info; + const StgInfoTable *info; rtsBool no_luck; ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) @@ -1738,6 +2206,11 @@ scavenge_one(StgClosure *p) info = get_itbl(p); + /* ngoq moHqu'! + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ + switch (info -> type) { case FUN: @@ -1763,7 +2236,6 @@ scavenge_one(StgClosure *p) case IND_PERM: case IND_OLDGEN_PERM: case CAF_UNENTERED: - case CAF_ENTERED: { StgPtr q, end; @@ -1775,6 +2247,8 @@ scavenge_one(StgClosure *p) } case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: case BLACKHOLE: break; @@ -1823,11 +2297,12 @@ scavenge_one(StgClosure *p) generations older than the one being collected) as roots. We also remove non-mutable objects from the mutable list at this point. -------------------------------------------------------------------------- */ +//@cindex scavenge_mut_once_list static void scavenge_mut_once_list(generation *gen) { - StgInfoTable *info; + const StgInfoTable *info; StgMutClosure *p, *next, *new_list; p = gen->mut_once_list; @@ -1844,6 +2319,10 @@ scavenge_mut_once_list(generation *gen) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); info = get_itbl(p); + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ switch(info->type) { case IND_OLDGEN: @@ -1855,7 +2334,8 @@ scavenge_mut_once_list(generation *gen) ((StgIndOldGen *)p)->indirectee = evacuate(((StgIndOldGen *)p)->indirectee); -#if 0 +#ifdef DEBUG + if (RtsFlags.DebugFlags.gc) /* Debugging code to print out the size of the thing we just * promoted */ @@ -1916,24 +2396,53 @@ scavenge_mut_once_list(generation *gen) } continue; + case CAF_ENTERED: + { + StgCAF *caf = (StgCAF *)p; + caf->body = evacuate(caf->body); + caf->value = evacuate(caf->value); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + p->mut_link = new_list; + new_list = p; + } else { + p->mut_link = NULL; + } + } + continue; + + case CAF_UNENTERED: + { + StgCAF *caf = (StgCAF *)p; + caf->body = evacuate(caf->body); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + p->mut_link = new_list; + new_list = p; + } else { + p->mut_link = NULL; + } + } + continue; + default: /* shouldn't have anything else on the mutables list */ - barf("scavenge_mut_once_list: strange object?"); + barf("scavenge_mut_once_list: strange object? %d", (int)(info->type)); } } gen->mut_once_list = new_list; } +//@cindex scavenge_mutable_list static void scavenge_mutable_list(generation *gen) { - StgInfoTable *info; - StgMutClosure *p, *next, *new_list; + const StgInfoTable *info; + StgMutClosure *p, *next; p = gen->saved_mut_list; - new_list = END_MUT_LIST; next = p->mut_link; evac_gen = 0; @@ -1946,6 +2455,10 @@ scavenge_mutable_list(generation *gen) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); info = get_itbl(p); + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ switch(info->type) { case MUT_ARR_PTRS_FROZEN: @@ -1955,6 +2468,10 @@ scavenge_mutable_list(generation *gen) { StgPtr end, q; + IF_DEBUG(gc, + belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p", + p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link)); + end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); evac_gen = gen->no; for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { @@ -1964,19 +2481,23 @@ scavenge_mutable_list(generation *gen) if (failed_to_evac) { failed_to_evac = rtsFalse; - p->mut_link = new_list; - new_list = p; + p->mut_link = gen->mut_list; + gen->mut_list = p; } continue; } case MUT_ARR_PTRS: /* follow everything */ - p->mut_link = new_list; - new_list = p; + p->mut_link = gen->mut_list; + gen->mut_list = p; { StgPtr end, q; + IF_DEBUG(gc, + belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p", + p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link)); + end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { (StgClosure *)*q = evacuate((StgClosure *)*q); @@ -1989,66 +2510,94 @@ scavenge_mutable_list(generation *gen) * it from the mutable list if possible by promoting whatever it * points to. */ + IF_DEBUG(gc, + belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p", + p, ((StgMutVar *)p)->var, p->mut_link)); + ASSERT(p->header.info != &MUT_CONS_info); ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - p->mut_link = new_list; - new_list = p; + p->mut_link = gen->mut_list; + gen->mut_list = p; continue; case MVAR: { StgMVar *mvar = (StgMVar *)p; + + IF_DEBUG(gc, + belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p", + mvar, mvar->head, mvar->tail, mvar->value, p->mut_link)); + (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); - p->mut_link = new_list; - new_list = p; + p->mut_link = gen->mut_list; + gen->mut_list = p; continue; } case TSO: - /* follow ptrs and remove this from the mutable list */ { StgTSO *tso = (StgTSO *)p; - /* Don't bother scavenging if this thread is dead - */ - if (!(tso->whatNext == ThreadComplete || - tso->whatNext == ThreadKilled)) { - /* Don't need to chase the link field for any TSOs on the - * same queue. Just scavenge this thread's stack - */ - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); - } + scavengeTSO(tso); /* Don't take this TSO off the mutable list - it might still * point to some younger objects (because we set evac_gen to 0 * above). */ - tso->mut_link = new_list; - new_list = (StgMutClosure *)tso; + tso->mut_link = gen->mut_list; + gen->mut_list = (StgMutClosure *)tso; continue; } case BLACKHOLE_BQ: { StgBlockingQueue *bh = (StgBlockingQueue *)p; + + IF_DEBUG(gc, + belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p", + p, p->mut_link)); + (StgClosure *)bh->blocking_queue = evacuate((StgClosure *)bh->blocking_queue); - p->mut_link = new_list; - new_list = p; + p->mut_link = gen->mut_list; + gen->mut_list = p; continue; } + /* Happens if a BLACKHOLE_BQ in the old generation is updated: + */ + case IND_OLDGEN: + case IND_OLDGEN_PERM: + /* Try to pull the indirectee into this generation, so we can + * remove the indirection from the mutable list. + */ + evac_gen = gen->no; + ((StgIndOldGen *)p)->indirectee = + evacuate(((StgIndOldGen *)p)->indirectee); + evac_gen = 0; + + if (failed_to_evac) { + failed_to_evac = rtsFalse; + p->mut_link = gen->mut_once_list; + gen->mut_once_list = p; + } else { + p->mut_link = NULL; + } + continue; + + // HWL: old PAR code deleted here + default: /* shouldn't have anything else on the mutables list */ - barf("scavenge_mut_list: strange object?"); + barf("scavenge_mutable_list: strange object? %d", (int)(info->type)); } } - - gen->mut_list = new_list; } +//@cindex scavenge_static + static void scavenge_static(void) { @@ -2064,7 +2613,10 @@ scavenge_static(void) while (p != END_OF_STATIC_LIST) { info = get_itbl(p); - + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ /* make sure the info pointer is into text space */ ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); @@ -2133,13 +2685,16 @@ scavenge_static(void) objects pointed to by it. We can use the same code for walking PAPs, since these are just sections of copied stack. -------------------------------------------------------------------------- */ +//@cindex scavenge_stack static void scavenge_stack(StgPtr p, StgPtr stack_end) { StgPtr q; const StgInfoTable* info; - StgNat32 bitmap; + StgWord32 bitmap; + + IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end)); /* * Each time around this loop, we are looking at a chunk of stack @@ -2148,24 +2703,22 @@ scavenge_stack(StgPtr p, StgPtr stack_end) */ while (p < stack_end) { - q = *stgCast(StgPtr*,p); + q = *(P_ *)p; /* If we've got a tag, skip over that many words on the stack */ - if (IS_ARG_TAG(stgCast(StgWord,q))) { + if (IS_ARG_TAG((W_)q)) { p += ARG_SIZE(q); p++; continue; } /* Is q a pointer to a closure? */ - if (! LOOKS_LIKE_GHC_INFO(q)) { - + if (! LOOKS_LIKE_GHC_INFO(q) ) { #ifdef DEBUG - if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */ - ASSERT(closure_STATIC(stgCast(StgClosure*,q))); - } - /* otherwise, must be a pointer into the allocation space. - */ + if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */ + ASSERT(closure_STATIC((StgClosure *)q)); + } + /* otherwise, must be a pointer into the allocation space. */ #endif (StgClosure *)*p = evacuate((StgClosure *)q); @@ -2178,21 +2731,31 @@ scavenge_stack(StgPtr p, StgPtr stack_end) * record. All activation records have 'bitmap' style layout * info. */ - info = get_itbl(stgCast(StgClosure*,p)); + info = get_itbl((StgClosure *)p); switch (info->type) { /* Dynamic bitmap: the mask is stored on the stack */ case RET_DYN: - bitmap = stgCast(StgRetDyn*,p)->liveness; - p = &payloadWord(stgCast(StgRetDyn*,p),0); + bitmap = ((StgRetDyn *)p)->liveness; + p = (P_)&((StgRetDyn *)p)->payload[0]; goto small_bitmap; /* probably a slow-entry point return address: */ case FUN: case FUN_STATIC: - p++; + { +#if 0 + StgPtr old_p = p; + p++; p++; + IF_DEBUG(sanity, + belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)", + old_p, p, old_p+1)); +#else + p++; /* what if FHS!=1 !? -- HWL */ +#endif goto follow_srt; + } /* Specialised code for update frames, since they're so common. * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE, @@ -2202,7 +2765,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) { StgUpdateFrame *frame = (StgUpdateFrame *)p; StgClosure *to; - StgClosureType type = get_itbl(frame->updatee)->type; + nat type = get_itbl(frame->updatee)->type; p += sizeofW(StgUpdateFrame); if (type == EVACUATED) { @@ -2217,36 +2780,57 @@ scavenge_stack(StgPtr p, StgPtr stack_end) } continue; } - step = bd->step->to; + + /* Don't promote blackholes */ + step = bd->step; + if (!(step->gen->no == 0 && + step->no != 0 && + step->no == step->gen->n_steps-1)) { + step = step->to; + } + switch (type) { case BLACKHOLE: case CAF_BLACKHOLE: to = copyPart(frame->updatee, BLACKHOLE_sizeW(), sizeofW(StgHeader), step); - upd_evacuee(frame->updatee,to); frame->updatee = to; continue; case BLACKHOLE_BQ: to = copy(frame->updatee, BLACKHOLE_sizeW(), step); - upd_evacuee(frame->updatee,to); frame->updatee = to; recordMutable((StgMutClosure *)to); continue; default: + /* will never be SE_{,CAF_}BLACKHOLE, since we + don't push an update frame for single-entry thunks. KSW 1999-01. */ barf("scavenge_stack: UPDATE_FRAME updatee"); } } } /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */ - case RET_BCO: - case RET_SMALL: - case RET_VEC_SMALL: case STOP_FRAME: case CATCH_FRAME: case SEQ_FRAME: + { + // StgPtr old_p = p; // debugging only -- HWL + /* stack frames like these are ordinary closures and therefore may + contain setup-specific fixed-header words (as in GranSim!); + therefore, these cases should not use p++ but &(p->payload) -- HWL */ + // IF_DEBUG(gran, IF_DEBUG(sanity, printObj(p))); + bitmap = info->layout.bitmap; + + p = (StgPtr)&(((StgClosure *)p)->payload); + // IF_DEBUG(sanity, belch("HWL: scavenge_stack: (STOP|CATCH|SEQ)_FRAME adjusting p from %p to %p (instead of %p)", old_p, p, old_p+1)); + goto small_bitmap; + } + case RET_BCO: + case RET_SMALL: + case RET_VEC_SMALL: bitmap = info->layout.bitmap; p++; + /* this assumes that the payload starts immediately after the info-ptr */ small_bitmap: while (bitmap != 0) { if ((bitmap & 1) == 0) { @@ -2307,6 +2891,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) objects are (repeatedly) mutable, so most of the time evac_gen will be zero. --------------------------------------------------------------------------- */ +//@cindex scavenge_large static void scavenge_large(step *step) @@ -2330,7 +2915,7 @@ scavenge_large(step *step) dbl_link_onto(bd, &step->scavenged_large_objects); p = bd->start; - info = get_itbl(stgCast(StgClosure*,p)); + info = get_itbl((StgClosure *)p); switch (info->type) { @@ -2371,7 +2956,7 @@ scavenge_large(step *step) case BCO: { - StgBCO* bco = stgCast(StgBCO*,p); + StgBCO* bco = (StgBCO *)p; nat i; evac_gen = saved_evac_gen; for (i = 0; i < bco->n_ptrs; i++) { @@ -2382,16 +2967,9 @@ scavenge_large(step *step) } case TSO: - { - StgTSO *tso; - - tso = (StgTSO *)p; - /* chase the link field for any TSOs on the same queue */ - (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); - /* scavenge this thread's stack */ - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + scavengeTSO((StgTSO *)p); + // HWL: old PAR code deleted here continue; - } default: barf("scavenge_large: unknown/strange object"); @@ -2399,8 +2977,10 @@ scavenge_large(step *step) } } +//@cindex zero_static_object_list + static void -zeroStaticObjectList(StgClosure* first_static) +zero_static_object_list(StgClosure* first_static) { StgClosure* p; StgClosure* link; @@ -2421,8 +3001,10 @@ zeroStaticObjectList(StgClosure* first_static) * It doesn't do any harm to zero all the mutable link fields on the * mutable list. */ +//@cindex zero_mutable_list + static void -zeroMutableList(StgMutClosure *first) +zero_mutable_list( StgMutClosure *first ) { StgMutClosure *next, *c; @@ -2432,9 +3014,13 @@ zeroMutableList(StgMutClosure *first) } } +//@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging +//@subsection Reverting CAFs + /* ----------------------------------------------------------------------------- Reverting CAFs -------------------------------------------------------------------------- */ +//@cindex RevertCAFs void RevertCAFs(void) { @@ -2444,41 +3030,38 @@ void RevertCAFs(void) enteredCAFs = caf->link; ASSERT(get_itbl(caf)->type == CAF_ENTERED); SET_INFO(caf,&CAF_UNENTERED_info); - caf->value = stgCast(StgClosure*,0xdeadbeef); - caf->link = stgCast(StgCAF*,0xdeadbeef); + caf->value = (StgClosure *)0xdeadbeef; + caf->link = (StgCAF *)0xdeadbeef; } + enteredCAFs = END_CAF_LIST; } -void revertDeadCAFs(void) +//@cindex revert_dead_CAFs + +void revert_dead_CAFs(void) { StgCAF* caf = enteredCAFs; enteredCAFs = END_CAF_LIST; while (caf != END_CAF_LIST) { - StgCAF* next = caf->link; - - switch(GET_INFO(caf)->type) { - case EVACUATED: - { - /* This object has been evacuated, it must be live. */ - StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee); - new->link = enteredCAFs; - enteredCAFs = new; - break; - } - case CAF_ENTERED: - { - SET_INFO(caf,&CAF_UNENTERED_info); - caf->value = stgCast(StgClosure*,0xdeadbeef); - caf->link = stgCast(StgCAF*,0xdeadbeef); - break; - } - default: - barf("revertDeadCAFs: enteredCAFs list corrupted"); - } - caf = next; + StgCAF *next, *new; + next = caf->link; + new = (StgCAF*)isAlive((StgClosure*)caf); + if (new) { + new->link = enteredCAFs; + enteredCAFs = new; + } else { + /* ASSERT(0); */ + SET_INFO(caf,&CAF_UNENTERED_info); + caf->value = (StgClosure*)0xdeadbeef; + caf->link = (StgCAF*)0xdeadbeef; + } + caf = next; } } +//@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs +//@subsection Sanity code for CAF garbage collection + /* ----------------------------------------------------------------------------- Sanity code for CAF garbage collection. @@ -2492,6 +3075,8 @@ void revertDeadCAFs(void) -------------------------------------------------------------------------- */ #ifdef DEBUG +//@cindex gcCAFs + static void gcCAFs(void) { @@ -2529,6 +3114,9 @@ gcCAFs(void) } #endif +//@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection +//@subsection Lazy black holing + /* ----------------------------------------------------------------------------- Lazy black holing. @@ -2536,6 +3124,7 @@ gcCAFs(void) some work, we have to run down the stack and black-hole all the closures referred to by update frames. -------------------------------------------------------------------------- */ +//@cindex threadLazyBlackHole static void threadLazyBlackHole(StgTSO *tso) @@ -2551,7 +3140,7 @@ threadLazyBlackHole(StgTSO *tso) switch (get_itbl(update_frame)->type) { case CATCH_FRAME: - update_frame = stgCast(StgCatchFrame*,update_frame)->link; + update_frame = ((StgCatchFrame *)update_frame)->link; break; case UPDATE_FRAME: @@ -2570,6 +3159,9 @@ threadLazyBlackHole(StgTSO *tso) if (bh->header.info != &BLACKHOLE_BQ_info && bh->header.info != &CAF_BLACKHOLE_info) { +#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) + fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); +#endif SET_INFO(bh,&BLACKHOLE_info); } @@ -2577,7 +3169,7 @@ threadLazyBlackHole(StgTSO *tso) break; case SEQ_FRAME: - update_frame = stgCast(StgSeqFrame*,update_frame)->link; + update_frame = ((StgSeqFrame *)update_frame)->link; break; case STOP_FRAME: @@ -2588,6 +3180,9 @@ threadLazyBlackHole(StgTSO *tso) } } +//@node Stack squeezing, Pausing a thread, Lazy black holing +//@subsection Stack squeezing + /* ----------------------------------------------------------------------------- * Stack squeezing * @@ -2595,6 +3190,7 @@ threadLazyBlackHole(StgTSO *tso) * lazy black holing here. * * -------------------------------------------------------------------------- */ +//@cindex threadSqueezeStack static void threadSqueezeStack(StgTSO *tso) @@ -2605,6 +3201,14 @@ threadSqueezeStack(StgTSO *tso) StgUpdateFrame *prev_frame; /* Temporally previous */ StgPtr bottom; rtsBool prev_was_update_frame; +#if DEBUG + StgUpdateFrame *top_frame; + nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0, + bhs=0, squeezes=0; + void printObj( StgClosure *obj ); // from Printer.c + + top_frame = tso->su; +#endif bottom = &(tso->stack[tso->stack_size]); frame = tso->su; @@ -2624,11 +3228,36 @@ threadSqueezeStack(StgTSO *tso) */ next_frame = NULL; - while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */ + /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */ + while ((P_)frame < bottom - sizeofW(StgStopFrame)) { prev_frame = frame->link; frame->link = next_frame; next_frame = frame; frame = prev_frame; +#if DEBUG + IF_DEBUG(sanity, + if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) { + printObj((StgClosure *)prev_frame); + barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n", + frame, prev_frame); + }) + switch (get_itbl(frame)->type) { + case UPDATE_FRAME: upd_frames++; + if (frame->updatee->header.info == &BLACKHOLE_info) + bhs++; + break; + case STOP_FRAME: stop_frames++; + break; + case CATCH_FRAME: catch_frames++; + break; + case SEQ_FRAME: seq_frames++; + break; + default: + barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n", + frame, prev_frame); + printObj((StgClosure *)prev_frame); + } +#endif if (get_itbl(frame)->type == UPDATE_FRAME && frame->updatee->header.info == &BLACKHOLE_info) { break; @@ -2678,8 +3307,9 @@ threadSqueezeStack(StgTSO *tso) StgClosure *updatee_keep = prev_frame->updatee; StgClosure *updatee_bypass = frame->updatee; -#if 0 /* DEBUG */ - fprintf(stderr, "squeezing frame at %p\n", frame); +#if DEBUG + IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame)); + squeezes++; #endif /* Deal with blocking queues. If both updatees have blocked @@ -2692,7 +3322,12 @@ threadSqueezeStack(StgTSO *tso) * slower --SDM */ #if 0 /* do it properly... */ - if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) { +# if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) +# error Unimplemented lazy BH warning. (KSW 1999-01) +# endif + if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info + || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info + ) { /* Sigh. It has one. Don't lose those threads! */ if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) { /* Urgh. Two queues. Merge them. */ @@ -2717,7 +3352,11 @@ threadSqueezeStack(StgTSO *tso) #endif TICK_UPD_SQUEEZED(); - UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */ + /* wasn't there something about update squeezing and ticky to be + * sorted out? oh yes: we aren't counting each enter properly + * in this case. See the log somewhere. KSW 1999-04-21 + */ + UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */ sp = (P_)frame - 1; /* sp = stuff to slide */ displacement += sizeofW(StgUpdateFrame); @@ -2730,8 +3369,12 @@ threadSqueezeStack(StgTSO *tso) */ if (is_update_frame) { StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee; - if (bh->header.info != &BLACKHOLE_BQ_info && + if (bh->header.info != &BLACKHOLE_info && + bh->header.info != &BLACKHOLE_BQ_info && bh->header.info != &CAF_BLACKHOLE_info) { +#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) + fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); +#endif SET_INFO(bh,&BLACKHOLE_info); } } @@ -2751,9 +3394,10 @@ threadSqueezeStack(StgTSO *tso) else next_frame_bottom = tso->sp - 1; -#if 0 /* DEBUG */ - fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom, - displacement); +#if DEBUG + IF_DEBUG(gc, + fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom, + displacement)) #endif while (sp >= next_frame_bottom) { @@ -2767,8 +3411,16 @@ threadSqueezeStack(StgTSO *tso) tso->sp += displacement; tso->su = prev_frame; +#if DEBUG + IF_DEBUG(gc, + fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n", + squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames)) +#endif } +//@node Pausing a thread, Index, Stack squeezing +//@subsection Pausing a thread + /* ----------------------------------------------------------------------------- * Pausing a thread * @@ -2776,6 +3428,7 @@ threadSqueezeStack(StgTSO *tso) * here. We also take the opportunity to do stack squeezing if it's * turned on. * -------------------------------------------------------------------------- */ +//@cindex threadPaused void threadPaused(StgTSO *tso) @@ -2785,3 +3438,78 @@ threadPaused(StgTSO *tso) else threadLazyBlackHole(tso); } + +/* ----------------------------------------------------------------------------- + * Debugging + * -------------------------------------------------------------------------- */ + +#if DEBUG +//@cindex printMutOnceList +void +printMutOnceList(generation *gen) +{ + StgMutClosure *p, *next; + + p = gen->mut_once_list; + next = p->mut_link; + + fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list); + for (; p != END_MUT_LIST; p = next, next = p->mut_link) { + fprintf(stderr, "%p (%s), ", + p, info_type((StgClosure *)p)); + } + fputc('\n', stderr); +} + +//@cindex printMutableList +void +printMutableList(generation *gen) +{ + StgMutClosure *p, *next; + + p = gen->saved_mut_list; + next = p->mut_link; + + fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list); + for (; p != END_MUT_LIST; p = next, next = p->mut_link) { + fprintf(stderr, "%p (%s), ", + p, info_type((StgClosure *)p)); + } + fputc('\n', stderr); +} +#endif /* DEBUG */ + +//@node Index, , Pausing a thread +//@subsection Index + +//@index +//* GarbageCollect:: @cindex\s-+GarbageCollect +//* MarkRoot:: @cindex\s-+MarkRoot +//* RevertCAFs:: @cindex\s-+RevertCAFs +//* addBlock:: @cindex\s-+addBlock +//* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list +//* copy:: @cindex\s-+copy +//* copyPart:: @cindex\s-+copyPart +//* evacuate:: @cindex\s-+evacuate +//* evacuate_large:: @cindex\s-+evacuate_large +//* gcCAFs:: @cindex\s-+gcCAFs +//* isAlive:: @cindex\s-+isAlive +//* mkMutCons:: @cindex\s-+mkMutCons +//* relocate_TSO:: @cindex\s-+relocate_TSO +//* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs +//* scavenge:: @cindex\s-+scavenge +//* scavenge_large:: @cindex\s-+scavenge_large +//* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list +//* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list +//* scavenge_one:: @cindex\s-+scavenge_one +//* scavenge_srt:: @cindex\s-+scavenge_srt +//* scavenge_stack:: @cindex\s-+scavenge_stack +//* scavenge_static:: @cindex\s-+scavenge_static +//* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole +//* threadPaused:: @cindex\s-+threadPaused +//* threadSqueezeStack:: @cindex\s-+threadSqueezeStack +//* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list +//* upd_evacuee:: @cindex\s-+upd_evacuee +//* zero_mutable_list:: @cindex\s-+zero_mutable_list +//* zero_static_object_list:: @cindex\s-+zero_static_object_list +//@end index