X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=120f02a76e25eb5fd5fd5f48303a18da1ad17c2b;hb=da47a29433290180fd15e39a13ab3bdfacaeea1b;hp=6383560c7cbda1eb6ab73d636ea3e36973d8e44a;hpb=0b0ee1f3d33b6c4d938aa8420749823e6aa5eb77;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 6383560..120f02a 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.75 2000/03/23 14:30:13 simonmar Exp $ + * $Id: GC.c,v 1.102 2001/04/03 16:35:12 sewardj Exp $ * * (c) The GHC Team 1998-1999 * @@ -37,12 +37,14 @@ #include "Sanity.h" #include "GC.h" #include "BlockAlloc.h" +#include "MBlock.h" #include "Main.h" #include "ProfHeap.h" #include "SchedAPI.h" #include "Weak.h" #include "StablePriv.h" #include "Prelude.h" +#include "ParTicky.h" // ToDo: move into Rts.h #if defined(GRAN) || defined(PAR) # include "GranSimRts.h" # include "ParallelRts.h" @@ -52,8 +54,11 @@ # include "ParallelDebug.h" # endif #endif - -StgCAF* enteredCAFs; +#include "HsFFI.h" +#include "Linker.h" +#if defined(RTS_GTK_FRONTPANEL) +#include "FrontPanel.h" +#endif //@node STATIC OBJECT LIST, Static function declarations, Includes //@subsection STATIC OBJECT LIST @@ -128,12 +133,16 @@ static rtsBool failed_to_evac; */ bdescr *old_to_space; - /* Data used for allocation area sizing. */ lnat new_blocks; /* blocks allocated during this GC */ lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */ +/* Used to avoid long recursion due to selector thunks + */ +lnat thunk_selector_depth = 0; +#define MAX_THUNK_SELECTOR_DEPTH 256 + //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST //@subsection Static function declarations @@ -144,14 +153,13 @@ lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */ static StgClosure * evacuate ( StgClosure *q ); static void zero_static_object_list ( StgClosure* first_static ); static void zero_mutable_list ( StgMutClosure *first ); -static void revert_dead_CAFs ( void ); static rtsBool traverse_weak_ptr_list ( void ); static void cleanup_weak_ptr_list ( StgWeak **list ); static void scavenge_stack ( StgPtr p, StgPtr stack_end ); -static void scavenge_large ( step *step ); -static void scavenge ( step *step ); +static void scavenge_large ( step * ); +static void scavenge ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); static void scavenge_mut_once_list ( generation *g ); @@ -160,6 +168,9 @@ static void scavenge_mut_once_list ( generation *g ); static void gcCAFs ( void ); #endif +void revertCAFs ( void ); +void scavengeCAFs ( void ); + //@node Garbage Collect, Weak Pointers, Static function declarations //@subsection Garbage Collect @@ -187,10 +198,10 @@ static void gcCAFs ( void ); -------------------------------------------------------------------------- */ //@cindex GarbageCollect -void GarbageCollect(void (*get_roots)(void)) +void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) { bdescr *bd; - step *step; + step *stp; lnat live, allocated, collected = 0, copied = 0; nat g, s; @@ -200,36 +211,52 @@ void GarbageCollect(void (*get_roots)(void)) #if defined(DEBUG) && defined(GRAN) IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n", - Now, Now)) + Now, Now)); #endif /* tell the stats department that we've started a GC */ stat_startGC(); + /* Init stats and print par specific (timing) info */ + PAR_TICKY_PAR_START(); + /* attribute any costs to CCS_GC */ #ifdef PROFILING prev_CCS = CCCS; CCCS = CCS_GC; #endif - /* Approximate how much we allocated */ + /* Approximate how much we allocated. + * Todo: only when generating stats? + */ allocated = calcAllocated(); /* Figure out which generation to collect */ - N = 0; - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) { - N = g; + if (force_major_gc) { + N = RtsFlags.GcFlags.generations - 1; + major_gc = rtsTrue; + } else { + N = 0; + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) { + N = g; + } } + major_gc = (N == RtsFlags.GcFlags.generations-1); + } + +#ifdef RTS_GTK_FRONTPANEL + if (RtsFlags.GcFlags.frontpanel) { + updateFrontPanelBeforeGC(N); } - major_gc = (N == RtsFlags.GcFlags.generations-1); +#endif /* check stack sanity *before* GC (ToDo: check all threads) */ #if defined(GRAN) // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity()); #endif - IF_DEBUG(sanity, checkFreeListSanity()); + IF_DEBUG(sanity, checkFreeListSanity()); /* Initialise the static object lists */ @@ -273,25 +300,25 @@ void GarbageCollect(void (*get_roots)(void)) * as necessary. */ bd = allocBlock(); - step = &generations[g].steps[s]; - ASSERT(step->gen->no == g); - ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue); + stp = &generations[g].steps[s]; + ASSERT(stp->gen->no == g); + ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue); bd->gen = &generations[g]; - bd->step = step; + bd->step = stp; bd->link = NULL; bd->evacuated = 1; /* it's a to-space block */ - step->hp = bd->start; - step->hpLim = step->hp + BLOCK_SIZE_W; - step->hp_bd = bd; - step->to_space = bd; - step->to_blocks = 1; - step->scan = bd->start; - step->scan_bd = bd; - step->new_large_objects = NULL; - step->scavenged_large_objects = NULL; + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->to_space = bd; + stp->to_blocks = 1; + stp->scan = bd->start; + stp->scan_bd = bd; + stp->new_large_objects = NULL; + stp->scavenged_large_objects = NULL; new_blocks++; /* mark the large objects as not evacuated yet */ - for (bd = step->large_objects; bd; bd = bd->link) { + for (bd = stp->large_objects; bd; bd = bd->link) { bd->evacuated = 0; } } @@ -302,28 +329,28 @@ void GarbageCollect(void (*get_roots)(void)) */ for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - if (step->hp_bd == NULL) { + stp = &generations[g].steps[s]; + if (stp->hp_bd == NULL) { bd = allocBlock(); bd->gen = &generations[g]; - bd->step = step; + bd->step = stp; bd->link = NULL; bd->evacuated = 0; /* *not* a to-space block */ - step->hp = bd->start; - step->hpLim = step->hp + BLOCK_SIZE_W; - step->hp_bd = bd; - step->blocks = bd; - step->n_blocks = 1; + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->blocks = bd; + stp->n_blocks = 1; new_blocks++; } /* Set the scan pointer for older generations: remember we * still have to scavenge objects that have been promoted. */ - step->scan = step->hp; - step->scan_bd = step->hp_bd; - step->to_space = NULL; - step->to_blocks = 0; - step->new_large_objects = NULL; - step->scavenged_large_objects = NULL; + stp->scan = stp->hp; + stp->scan_bd = stp->hp_bd; + stp->to_space = NULL; + stp->to_blocks = 0; + stp->new_large_objects = NULL; + stp->scavenged_large_objects = NULL; } } @@ -368,6 +395,8 @@ void GarbageCollect(void (*get_roots)(void)) } } + scavengeCAFs(); + /* follow all the roots that the application knows about. */ evac_gen = 0; @@ -384,6 +413,8 @@ void GarbageCollect(void (*get_roots)(void)) /* Mark the entries in the GALA table of the parallel system */ markLocalGAs(major_gc); + /* Mark all entries on the list of pending fetches */ + markPendingFetches(major_gc); #endif /* Mark the weak pointer list, and prepare to detect dead weak @@ -426,6 +457,8 @@ void GarbageCollect(void (*get_roots)(void)) /* scavenge static objects */ if (major_gc && static_objects != END_OF_STATIC_LIST) { + IF_DEBUG(sanity, + checkStaticObjects()); scavenge_static(); } @@ -447,15 +480,15 @@ void GarbageCollect(void (*get_roots)(void)) if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) { continue; } - step = &generations[gen].steps[st]; + stp = &generations[gen].steps[st]; evac_gen = gen; - if (step->hp_bd != step->scan_bd || step->scan < step->hp) { - scavenge(step); + if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) { + scavenge(stp); flag = rtsTrue; goto loop2; } - if (step->new_large_objects != NULL) { - scavenge_large(step); + if (stp->new_large_objects != NULL) { + scavenge_large(stp); flag = rtsTrue; goto loop2; } @@ -479,9 +512,13 @@ void GarbageCollect(void (*get_roots)(void)) */ gcStablePtrTable(major_gc); - /* revert dead CAFs and update enteredCAFs list */ - revert_dead_CAFs(); - +#if defined(PAR) + /* Reconstruct the Global Address tables used in GUM */ + rebuildGAtables(major_gc); + IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/)); + IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/)); +#endif + /* Set the maximum blocks for the oldest generation, based on twice * the amount of live data now, adjusted to fit the maximum heap * size if necessary. @@ -518,35 +555,35 @@ void GarbageCollect(void (*get_roots)(void)) for (s = 0; s < generations[g].n_steps; s++) { bdescr *next; - step = &generations[g].steps[s]; + stp = &generations[g].steps[s]; if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) { /* Tidy the end of the to-space chains */ - step->hp_bd->free = step->hp; - step->hp_bd->link = NULL; + stp->hp_bd->free = stp->hp; + stp->hp_bd->link = NULL; /* stats information: how much we copied */ if (g <= N) { - copied -= step->hp_bd->start + BLOCK_SIZE_W - - step->hp_bd->free; + copied -= stp->hp_bd->start + BLOCK_SIZE_W - + stp->hp_bd->free; } } /* for generations we collected... */ if (g <= N) { - collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */ + collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */ /* free old memory and shift to-space into from-space for all * the collected steps (except the allocation area). These * freed blocks will probaby be quickly recycled. */ if (!(g == 0 && s == 0)) { - freeChain(step->blocks); - step->blocks = step->to_space; - step->n_blocks = step->to_blocks; - step->to_space = NULL; - step->to_blocks = 0; - for (bd = step->blocks; bd != NULL; bd = bd->link) { + freeChain(stp->blocks); + stp->blocks = stp->to_space; + stp->n_blocks = stp->to_blocks; + stp->to_space = NULL; + stp->to_blocks = 0; + for (bd = stp->blocks; bd != NULL; bd = bd->link) { bd->evacuated = 0; /* now from-space */ } } @@ -556,15 +593,15 @@ void GarbageCollect(void (*get_roots)(void)) * collection from large_objects. Any objects left on * large_objects list are therefore dead, so we free them here. */ - for (bd = step->large_objects; bd != NULL; bd = next) { + for (bd = stp->large_objects; bd != NULL; bd = next) { next = bd->link; freeGroup(bd); bd = next; } - for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) { + for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) { bd->evacuated = 0; } - step->large_objects = step->scavenged_large_objects; + stp->large_objects = stp->scavenged_large_objects; /* Set the maximum blocks for this generation, interpolating * between the maximum size of the oldest and youngest @@ -589,14 +626,14 @@ void GarbageCollect(void (*get_roots)(void)) * scavenged_large_object list (i.e. large objects that have been * promoted during this GC) to the large_object list for that step. */ - for (bd = step->scavenged_large_objects; bd; bd = next) { + for (bd = stp->scavenged_large_objects; bd; bd = next) { next = bd->link; bd->evacuated = 0; - dbl_link_onto(bd, &step->large_objects); + dbl_link_onto(bd, &stp->large_objects); } /* add the new blocks we promoted during this GC */ - step->n_blocks += step->to_blocks; + stp->n_blocks += stp->to_blocks; } } } @@ -711,9 +748,11 @@ void GarbageCollect(void (*get_roots)(void)) } /* mark the garbage collected CAFs as dead */ -#ifdef DEBUG +#if 0 /* doesn't work at the moment */ +#if defined(DEBUG) if (major_gc) { gcCAFs(); } #endif +#endif /* zero the scavenged static object list */ if (major_gc) { @@ -724,11 +763,6 @@ void GarbageCollect(void (*get_roots)(void)) */ resetNurseries(); -#if defined(PAR) - /* Reconstruct the Global Address tables used in GUM */ - RebuildGAtables(major_gc); -#endif - /* start any pending finalizers */ scheduleFinalizers(old_weak_ptr_list); @@ -755,8 +789,16 @@ void GarbageCollect(void (*get_roots)(void)) /* check for memory leaks if sanity checking is on */ IF_DEBUG(sanity, memInventory()); +#ifdef RTS_GTK_FRONTPANEL + if (RtsFlags.GcFlags.frontpanel) { + updateFrontPanelAfterGC( N, live ); + } +#endif + /* ok, GC over: tell the stats department what happened. */ stat_endGC(allocated, collected, live, copied, N); + + //PAR_TICKY_TP(); } //@node Weak Pointers, Evacuation, Garbage Collect @@ -811,7 +853,7 @@ traverse_weak_ptr_list(void) /* There might be a DEAD_WEAK on the list if finalizeWeak# was * called on a live weak pointer object. Just remove it. */ - if (w->header.info == &DEAD_WEAK_info) { + if (w->header.info == &stg_DEAD_WEAK_info) { next_w = ((StgDeadWeak *)w)->link; *last_w = next_w; continue; @@ -857,12 +899,16 @@ traverse_weak_ptr_list(void) * the list. */ switch (t->what_next) { + case ThreadRelocated: + next = t->link; + *prev = next; + continue; case ThreadKilled: case ThreadComplete: - next = t->global_link; - *prev = next; - continue; - default: + next = t->global_link; + *prev = next; + continue; + default: ; } /* Threads which have already been determined to be alive are @@ -967,14 +1013,10 @@ isAlive(StgClosure *p) * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs. */ -#if 1 || !defined(PAR) /* ignore closures in generations that we're not collecting. */ - /* In GUM we use this routine when rebuilding GA tables; for some - reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */ if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) { return p; } -#endif switch (info->type) { @@ -991,10 +1033,6 @@ isAlive(StgClosure *p) /* alive! */ return ((StgEvacuated *)p)->evacuee; - case BCO: - size = bco_sizeW((StgBCO*)p); - goto large; - case ARR_WORDS: size = arr_words_sizeW((StgArrWords *)p); goto large; @@ -1029,28 +1067,35 @@ isAlive(StgClosure *p) StgClosure * MarkRoot(StgClosure *root) { +# if 0 && defined(PAR) && defined(DEBUG) + StgClosure *foo = evacuate(root); + // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated); + ASSERT(isAlive(foo)); // must be in to-space + return foo; +# else return evacuate(root); +# endif } //@cindex addBlock -static void addBlock(step *step) +static void addBlock(step *stp) { bdescr *bd = allocBlock(); - bd->gen = step->gen; - bd->step = step; + bd->gen = stp->gen; + bd->step = stp; - if (step->gen->no <= N) { + if (stp->gen->no <= N) { bd->evacuated = 1; } else { bd->evacuated = 0; } - step->hp_bd->free = step->hp; - step->hp_bd->link = bd; - step->hp = bd->start; - step->hpLim = step->hp + BLOCK_SIZE_W; - step->hp_bd = bd; - step->to_blocks++; + stp->hp_bd->free = stp->hp; + stp->hp_bd->link = bd; + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->to_blocks++; new_blocks++; } @@ -1059,14 +1104,14 @@ static void addBlock(step *step) static __inline__ void upd_evacuee(StgClosure *p, StgClosure *dest) { - p->header.info = &EVACUATED_info; + p->header.info = &stg_EVACUATED_info; ((StgEvacuated *)p)->evacuee = dest; } //@cindex copy static __inline__ StgClosure * -copy(StgClosure *src, nat size, step *step) +copy(StgClosure *src, nat size, step *stp) { P_ to, from, dest; @@ -1076,27 +1121,27 @@ copy(StgClosure *src, nat size, step *step) * evacuate to an older generation, adjust it here (see comment * by evacuate()). */ - if (step->gen->no < evac_gen) { + if (stp->gen->no < evac_gen) { #ifdef NO_EAGER_PROMOTION failed_to_evac = rtsTrue; #else - step = &generations[evac_gen].steps[0]; + stp = &generations[evac_gen].steps[0]; #endif } /* chain a new block onto the to-space for the destination step if * necessary. */ - if (step->hp + size >= step->hpLim) { - addBlock(step); + if (stp->hp + size >= stp->hpLim) { + addBlock(stp); } - for(to = step->hp, from = (P_)src; size>0; --size) { + for(to = stp->hp, from = (P_)src; size>0; --size) { *to++ = *from++; } - dest = step->hp; - step->hp = to; + dest = stp->hp; + stp->hp = to; upd_evacuee(src,(StgClosure *)dest); return (StgClosure *)dest; } @@ -1109,29 +1154,29 @@ copy(StgClosure *src, nat size, step *step) //@cindex copyPart static __inline__ StgClosure * -copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step) +copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) { P_ dest, to, from; TICK_GC_WORDS_COPIED(size_to_copy); - if (step->gen->no < evac_gen) { + if (stp->gen->no < evac_gen) { #ifdef NO_EAGER_PROMOTION failed_to_evac = rtsTrue; #else - step = &generations[evac_gen].steps[0]; + stp = &generations[evac_gen].steps[0]; #endif } - if (step->hp + size_to_reserve >= step->hpLim) { - addBlock(step); + if (stp->hp + size_to_reserve >= stp->hpLim) { + addBlock(stp); } - for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) { + for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) { *to++ = *from++; } - dest = step->hp; - step->hp += size_to_reserve; + dest = stp->hp; + stp->hp += size_to_reserve; upd_evacuee(src,(StgClosure *)dest); return (StgClosure *)dest; } @@ -1156,7 +1201,7 @@ static inline void evacuate_large(StgPtr p, rtsBool mutable) { bdescr *bd = Bdescr(p); - step *step; + step *stp; /* should point to the beginning of the block */ ASSERT(((W_)p & BLOCK_MASK) == 0); @@ -1173,12 +1218,12 @@ evacuate_large(StgPtr p, rtsBool mutable) return; } - step = bd->step; + stp = bd->step; /* remove from large_object list */ if (bd->back) { bd->back->link = bd->link; } else { /* first object in the list */ - step->large_objects = bd->link; + stp->large_objects = bd->link; } if (bd->link) { bd->link->back = bd->back; @@ -1186,19 +1231,19 @@ evacuate_large(StgPtr p, rtsBool mutable) /* link it on to the evacuated large object list of the destination step */ - step = bd->step->to; - if (step->gen->no < evac_gen) { + stp = bd->step->to; + if (stp->gen->no < evac_gen) { #ifdef NO_EAGER_PROMOTION failed_to_evac = rtsTrue; #else - step = &generations[evac_gen].steps[0]; + stp = &generations[evac_gen].steps[0]; #endif } - bd->step = step; - bd->gen = step->gen; - bd->link = step->new_large_objects; - step->new_large_objects = bd; + bd->step = stp; + bd->gen = stp->gen; + bd->link = stp->new_large_objects; + stp->new_large_objects = bd; bd->evacuated = 1; if (mutable) { @@ -1220,21 +1265,21 @@ static StgClosure * mkMutCons(StgClosure *ptr, generation *gen) { StgMutVar *q; - step *step; + step *stp; - step = &gen->steps[0]; + stp = &gen->steps[0]; /* chain a new block onto the to-space for the destination step if * necessary. */ - if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) { - addBlock(step); + if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) { + addBlock(stp); } - q = (StgMutVar *)step->hp; - step->hp += sizeofW(StgMutVar); + q = (StgMutVar *)stp->hp; + stp->hp += sizeofW(StgMutVar); - SET_HDR(q,&MUT_CONS_info,CCS_GC); + SET_HDR(q,&stg_MUT_CONS_info,CCS_GC); q->var = ptr; recordOldToNewPtrs((StgMutClosure *)q); @@ -1272,7 +1317,7 @@ evacuate(StgClosure *q) { StgClosure *to; bdescr *bd = NULL; - step *step; + step *stp; const StgInfoTable *info; loop: @@ -1290,10 +1335,10 @@ loop: } return q; } - step = bd->step->to; + stp = bd->step->to; } #ifdef DEBUG - else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */ + else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */ #endif /* make sure the info pointer is into text space */ @@ -1311,32 +1356,32 @@ loop: switch (info -> type) { - case BCO: - { - nat size = bco_sizeW((StgBCO*)q); - - if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q, rtsFalse); - to = q; - } else { - /* just copy the block */ - to = copy(q,size,step); - } - return to; - } - case MUT_VAR: - ASSERT(q->header.info != &MUT_CONS_info); + ASSERT(q->header.info != &stg_MUT_CONS_info); case MVAR: - to = copy(q,sizeW_fromITBL(info),step); + to = copy(q,sizeW_fromITBL(info),stp); recordMutable((StgMutClosure *)to); return to; + case CONSTR_0_1: + { + StgWord w = (StgWord)q->payload[0]; + if (q->header.info == Czh_con_info && + /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */ + (StgChar)w <= MAX_CHARLIKE) { + return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w); + } + if (q->header.info == Izh_con_info && + (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) { + return (StgClosure *)INTLIKE_CLOSURE((StgInt)w); + } + /* else, fall through ... */ + } + case FUN_1_0: case FUN_0_1: case CONSTR_1_0: - case CONSTR_0_1: - return copy(q,sizeofW(StgHeader)+1,step); + return copy(q,sizeofW(StgHeader)+1,stp); case THUNK_1_0: /* here because of MIN_UPD_SIZE */ case THUNK_0_1: @@ -1347,10 +1392,10 @@ loop: if (bd->gen->no == 0 && bd->step->no != 0 && bd->step->no == bd->gen->n_steps-1) { - step = bd->step; + stp = bd->step; } #endif - return copy(q,sizeofW(StgHeader)+2,step); + return copy(q,sizeofW(StgHeader)+2,stp); case FUN_1_1: case FUN_0_2: @@ -1358,28 +1403,27 @@ loop: case CONSTR_1_1: case CONSTR_0_2: case CONSTR_2_0: - return copy(q,sizeofW(StgHeader)+2,step); + return copy(q,sizeofW(StgHeader)+2,stp); case FUN: case THUNK: case CONSTR: case IND_PERM: case IND_OLDGEN_PERM: - case CAF_UNENTERED: - case CAF_ENTERED: case WEAK: case FOREIGN: case STABLE_NAME: - return copy(q,sizeW_fromITBL(info),step); + case BCO: + return copy(q,sizeW_fromITBL(info),stp); case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: case BLACKHOLE: - return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step); + return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp); case BLACKHOLE_BQ: - to = copy(q,BLACKHOLE_sizeW(),step); + to = copy(q,BLACKHOLE_sizeW(),stp); recordMutable((StgMutClosure *)to); return to; @@ -1438,14 +1482,34 @@ loop: selectee = ((StgInd *)selectee)->indirectee; goto selector_loop; - case CAF_ENTERED: - selectee = ((StgCAF *)selectee)->value; - goto selector_loop; - case EVACUATED: selectee = ((StgEvacuated *)selectee)->evacuee; goto selector_loop; + case THUNK_SELECTOR: +# if 0 + /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or + something) to go into an infinite loop when the nightly + stage2 compiles PrelTup.lhs. */ + + /* we can't recurse indefinitely in evacuate(), so set a + * limit on the number of times we can go around this + * loop. + */ + if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) { + bdescr *bd; + bd = Bdescr((P_)selectee); + if (!bd->evacuated) { + thunk_selector_depth++; + selectee = evacuate(selectee); + thunk_selector_depth--; + goto selector_loop; + } + } + /* otherwise, fall through... */ +# endif + + case AP_UPD: case THUNK: case THUNK_1_0: case THUNK_0_1: @@ -1453,9 +1517,6 @@ loop: case THUNK_1_1: case THUNK_0_2: case THUNK_STATIC: - case THUNK_SELECTOR: - /* aargh - do recursively???? */ - case CAF_UNENTERED: case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: @@ -1464,12 +1525,43 @@ loop: /* not evaluated yet */ break; +#if defined(PAR) + /* a copy of the top-level cases below */ + case RBH: // cf. BLACKHOLE_BQ + { + //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); + to = copy(q,BLACKHOLE_sizeW(),stp); + //ToDo: derive size etc from reverted IP + //to = copy(q,size,stp); + // recordMutable((StgMutClosure *)to); + return to; + } + + case BLOCKED_FETCH: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); + to = copy(q,sizeofW(StgBlockedFetch),stp); + return to; + +# ifdef DIST + case REMOTE_REF: +# endif + case FETCH_ME: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMe),stp); + return to; + + case FETCH_ME_BQ: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); + return to; +#endif + default: barf("evacuate: THUNK_SELECTOR: strange selectee %d", (int)(selectee_info->type)); } } - return copy(q,THUNK_SELECTOR_sizeW(),step); + return copy(q,THUNK_SELECTOR_sizeW(),stp); case IND: case IND_OLDGEN: @@ -1494,9 +1586,15 @@ loop: return q; case IND_STATIC: - if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) { - IND_STATIC_LINK((StgClosure *)q) = static_objects; - static_objects = (StgClosure *)q; + /* If q->saved_info != NULL, then it's a revertible CAF - it'll be + * on the CAF list, so don't do anything with it here (we'll + * scavenge it later). + */ + if (major_gc + && ((StgIndStatic *)q)->saved_info == NULL + && IND_STATIC_LINK((StgClosure *)q) == NULL) { + IND_STATIC_LINK((StgClosure *)q) = static_objects; + static_objects = (StgClosure *)q; } return q; @@ -1530,9 +1628,22 @@ loop: case AP_UPD: case PAP: - /* these are special - the payload is a copy of a chunk of stack, - tagging and all. */ - return copy(q,pap_sizeW((StgPAP *)q),step); + /* PAPs and AP_UPDs are special - the payload is a copy of a chunk + * of stack, tagging and all. + * + * They can be larger than a block in size. Both are only + * allocated via allocate(), so they should be chained on to the + * large_object list. + */ + { + nat size = pap_sizeW((StgPAP*)q); + if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { + evacuate_large((P_)q, rtsFalse); + return q; + } else { + return copy(q,size,stp); + } + } case EVACUATED: /* Already evacuated, just return the forwarding address. @@ -1561,7 +1672,7 @@ loop: return q; } else { /* just copy the block */ - return copy(q,size,step); + return copy(q,size,stp); } } @@ -1575,7 +1686,7 @@ loop: to = q; } else { /* just copy the block */ - to = copy(q,size,step); + to = copy(q,size,stp); if (info->type == MUT_ARR_PTRS) { recordMutable((StgMutClosure *)to); } @@ -1606,14 +1717,13 @@ loop: * list it contains. */ } else { - StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step); + StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp); diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */ /* relocate the stack pointers... */ new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff); new_tso->sp = (StgPtr)new_tso->sp + diff; - new_tso->splim = (StgPtr)new_tso->splim + diff; relocate_TSO(tso, new_tso); @@ -1626,9 +1736,9 @@ loop: case RBH: // cf. BLACKHOLE_BQ { //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); - to = copy(q,BLACKHOLE_sizeW(),step); + to = copy(q,BLACKHOLE_sizeW(),stp); //ToDo: derive size etc from reverted IP - //to = copy(q,size,step); + //to = copy(q,size,stp); recordMutable((StgMutClosure *)to); IF_DEBUG(gc, belch("@@ evacuate: RBH %p (%s) to %p (%s)", @@ -1638,15 +1748,18 @@ loop: case BLOCKED_FETCH: ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); - to = copy(q,sizeofW(StgBlockedFetch),step); + to = copy(q,sizeofW(StgBlockedFetch),stp); IF_DEBUG(gc, belch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; +# ifdef DIST + case REMOTE_REF: +# endif case FETCH_ME: ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); - to = copy(q,sizeofW(StgFetchMe),step); + to = copy(q,sizeofW(StgFetchMe),stp); IF_DEBUG(gc, belch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); @@ -1654,7 +1767,7 @@ loop: case FETCH_ME_BQ: ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); - to = copy(q,sizeofW(StgFetchMeBlockingQueue),step); + to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); IF_DEBUG(gc, belch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); @@ -1770,7 +1883,12 @@ scavengeTSO (StgTSO *tso) (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole - || tso->why_blocked == BlockedOnException) { + || tso->why_blocked == BlockedOnException +#if defined(PAR) + || tso->why_blocked == BlockedOnGA + || tso->why_blocked == BlockedOnGA_NoSend +#endif + ) { tso->block_info.closure = evacuate(tso->block_info.closure); } if ( tso->blocked_exceptions != NULL ) { @@ -1796,15 +1914,15 @@ scavengeTSO (StgTSO *tso) //@cindex scavenge static void -scavenge(step *step) +scavenge(step *stp) { StgPtr p, q; const StgInfoTable *info; bdescr *bd; nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */ - p = step->scan; - bd = step->scan_bd; + p = stp->scan; + bd = stp->scan_bd; failed_to_evac = rtsFalse; @@ -1812,10 +1930,10 @@ scavenge(step *step) * evacuated objects */ - while (bd != step->hp_bd || p < step->hp) { + while (bd != stp->hp_bd || p < stp->hp) { /* If we're at the end of this block, move on to the next block */ - if (bd != step->hp_bd && p == bd->free) { + if (bd != stp->hp_bd && p == bd->free) { bd = bd->link; p = bd->start; continue; @@ -1834,17 +1952,6 @@ scavenge(step *step) switch (info -> type) { - case BCO: - { - StgBCO* bco = (StgBCO *)p; - nat i; - for (i = 0; i < bco->n_ptrs; i++) { - bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i)); - } - p += bco_sizeW(bco); - break; - } - case MVAR: /* treat MVars specially, because we don't want to evacuate the * mut_link field in the middle of the closure. @@ -1917,6 +2024,7 @@ scavenge(step *step) case WEAK: case FOREIGN: case STABLE_NAME: + case BCO: { StgPtr end; @@ -1929,8 +2037,8 @@ scavenge(step *step) } case IND_PERM: - if (step->gen->no != 0) { - SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info); + if (stp->gen->no != 0) { + SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); } /* fall through */ case IND_OLDGEN_PERM: @@ -1943,40 +2051,9 @@ scavenge(step *step) p += sizeofW(StgIndOldGen); break; - case CAF_UNENTERED: - { - StgCAF *caf = (StgCAF *)p; - - caf->body = evacuate(caf->body); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordOldToNewPtrs((StgMutClosure *)p); - } else { - caf->mut_link = NULL; - } - p += sizeofW(StgCAF); - break; - } - - case CAF_ENTERED: - { - StgCAF *caf = (StgCAF *)p; - - caf->body = evacuate(caf->body); - caf->value = evacuate(caf->value); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordOldToNewPtrs((StgMutClosure *)p); - } else { - caf->mut_link = NULL; - } - p += sizeofW(StgCAF); - break; - } - case MUT_VAR: /* ignore MUT_CONSs */ - if (((StgMutVar *)p)->header.info != &MUT_CONS_info) { + if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) { evac_gen = 0; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); evac_gen = saved_evac_gen; @@ -2141,10 +2218,10 @@ scavenge(step *step) break; } +#ifdef DIST + case REMOTE_REF: +#endif case FETCH_ME: - IF_DEBUG(gc, - belch("@@ scavenge: HWL claims nothing to do for %p (%s)", - p, info_type((StgClosure *)p))); p += sizeofW(StgFetchMe); break; // nothing to do in this case @@ -2166,10 +2243,12 @@ scavenge(step *step) #endif case EVACUATED: - barf("scavenge: unimplemented/strange closure type\n"); + barf("scavenge: unimplemented/strange closure type %d @ %p", + info->type, p); default: - barf("scavenge"); + barf("scavenge: unimplemented/strange closure type %d @ %p", + info->type, p); } /* If we didn't manage to promote all the objects pointed to by @@ -2182,8 +2261,8 @@ scavenge(step *step) } } - step->scan_bd = bd; - step->scan = p; + stp->scan_bd = bd; + stp->scan = p; } /* ----------------------------------------------------------------------------- @@ -2235,7 +2314,6 @@ scavenge_one(StgClosure *p) case FOREIGN: case IND_PERM: case IND_OLDGEN_PERM: - case CAF_UNENTERED: { StgPtr q, end; @@ -2281,7 +2359,7 @@ scavenge_one(StgClosure *p) break; default: - barf("scavenge_one: strange object"); + barf("scavenge_one: strange object %d", (int)(info->type)); } no_luck = failed_to_evac; @@ -2386,7 +2464,7 @@ scavenge_mut_once_list(generation *gen) * it from the mutable list if possible by promoting whatever it * points to. */ - ASSERT(p->header.info == &MUT_CONS_info); + ASSERT(p->header.info == &stg_MUT_CONS_info); if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) { /* didn't manage to promote everything, so put the * MUT_CONS back on the list. @@ -2396,35 +2474,6 @@ scavenge_mut_once_list(generation *gen) } continue; - case CAF_ENTERED: - { - StgCAF *caf = (StgCAF *)p; - caf->body = evacuate(caf->body); - caf->value = evacuate(caf->value); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - p->mut_link = new_list; - new_list = p; - } else { - p->mut_link = NULL; - } - } - continue; - - case CAF_UNENTERED: - { - StgCAF *caf = (StgCAF *)p; - caf->body = evacuate(caf->body); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - p->mut_link = new_list; - new_list = p; - } else { - p->mut_link = NULL; - } - } - continue; - default: /* shouldn't have anything else on the mutables list */ barf("scavenge_mut_once_list: strange object? %d", (int)(info->type)); @@ -2468,10 +2517,6 @@ scavenge_mutable_list(generation *gen) { StgPtr end, q; - IF_DEBUG(gc, - belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p", - p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link)); - end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); evac_gen = gen->no; for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { @@ -2494,10 +2539,6 @@ scavenge_mutable_list(generation *gen) { StgPtr end, q; - IF_DEBUG(gc, - belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p", - p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link)); - end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { (StgClosure *)*q = evacuate((StgClosure *)*q); @@ -2510,11 +2551,7 @@ scavenge_mutable_list(generation *gen) * it from the mutable list if possible by promoting whatever it * points to. */ - IF_DEBUG(gc, - belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p", - p, ((StgMutVar *)p)->var, p->mut_link)); - - ASSERT(p->header.info != &MUT_CONS_info); + ASSERT(p->header.info != &stg_MUT_CONS_info); ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); p->mut_link = gen->mut_list; gen->mut_list = p; @@ -2523,11 +2560,6 @@ scavenge_mutable_list(generation *gen) case MVAR: { StgMVar *mvar = (StgMVar *)p; - - IF_DEBUG(gc, - belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p", - mvar, mvar->head, mvar->tail, mvar->value, p->mut_link)); - (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); @@ -2554,11 +2586,6 @@ scavenge_mutable_list(generation *gen) case BLACKHOLE_BQ: { StgBlockingQueue *bh = (StgBlockingQueue *)p; - - IF_DEBUG(gc, - belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p", - p, p->mut_link)); - (StgClosure *)bh->blocking_queue = evacuate((StgClosure *)bh->blocking_queue); p->mut_link = gen->mut_list; @@ -2587,7 +2614,64 @@ scavenge_mutable_list(generation *gen) } continue; - // HWL: old PAR code deleted here +#if defined(PAR) + // HWL: check whether all of these are necessary + + case RBH: // cf. BLACKHOLE_BQ + { + // nat size, ptrs, nonptrs, vhs; + // char str[80]; + // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); + StgRBH *rbh = (StgRBH *)p; + (StgClosure *)rbh->blocking_queue = + evacuate((StgClosure *)rbh->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)rbh); + } + // ToDo: use size of reverted closure here! + p += BLACKHOLE_sizeW(); + break; + } + + case BLOCKED_FETCH: + { + StgBlockedFetch *bf = (StgBlockedFetch *)p; + /* follow the pointer to the node which is being demanded */ + (StgClosure *)bf->node = + evacuate((StgClosure *)bf->node); + /* follow the link to the rest of the blocking queue */ + (StgClosure *)bf->link = + evacuate((StgClosure *)bf->link); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)bf); + } + p += sizeofW(StgBlockedFetch); + break; + } + +#ifdef DIST + case REMOTE_REF: + barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type)); +#endif + case FETCH_ME: + p += sizeofW(StgFetchMe); + break; // nothing to do in this case + + case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + { + StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; + (StgClosure *)fmbq->blocking_queue = + evacuate((StgClosure *)fmbq->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)fmbq); + } + p += sizeofW(StgFetchMeBlockingQueue); + break; + } +#endif default: /* shouldn't have anything else on the mutables list */ @@ -2652,7 +2736,7 @@ scavenge_static(void) case THUNK_STATIC: case FUN_STATIC: scavenge_srt(info); - /* fall through */ + break; case CONSTR_STATIC: { @@ -2667,12 +2751,12 @@ scavenge_static(void) } default: - barf("scavenge_static"); + barf("scavenge_static: strange closure %d", (int)(info->type)); } ASSERT(failed_to_evac == rtsFalse); - /* get the next static object from the list. Remeber, there might + /* get the next static object from the list. Remember, there might * be more stuff on this list now that we've done some evacuating! * (static_objects is a global) */ @@ -2694,7 +2778,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) const StgInfoTable* info; StgWord32 bitmap; - IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end)); + //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end)); /* * Each time around this loop, we are looking at a chunk of stack @@ -2773,7 +2857,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) continue; } else { bdescr *bd = Bdescr((P_)frame->updatee); - step *step; + step *stp; if (bd->gen->no > N) { if (bd->gen->no < evac_gen) { failed_to_evac = rtsTrue; @@ -2782,22 +2866,22 @@ scavenge_stack(StgPtr p, StgPtr stack_end) } /* Don't promote blackholes */ - step = bd->step; - if (!(step->gen->no == 0 && - step->no != 0 && - step->no == step->gen->n_steps-1)) { - step = step->to; + stp = bd->step; + if (!(stp->gen->no == 0 && + stp->no != 0 && + stp->no == stp->gen->n_steps-1)) { + stp = stp->to; } switch (type) { case BLACKHOLE: case CAF_BLACKHOLE: to = copyPart(frame->updatee, BLACKHOLE_sizeW(), - sizeofW(StgHeader), step); + sizeofW(StgHeader), stp); frame->updatee = to; continue; case BLACKHOLE_BQ: - to = copy(frame->updatee, BLACKHOLE_sizeW(), step); + to = copy(frame->updatee, BLACKHOLE_sizeW(), stp); frame->updatee = to; recordMutable((StgMutClosure *)to); continue; @@ -2866,7 +2950,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) } default: - barf("scavenge_stack: weird activation record found on stack.\n"); + barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type)); } } } @@ -2882,7 +2966,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) //@cindex scavenge_large static void -scavenge_large(step *step) +scavenge_large(step *stp) { bdescr *bd; StgPtr p; @@ -2890,17 +2974,17 @@ scavenge_large(step *step) nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */ evac_gen = 0; /* most objects are mutable */ - bd = step->new_large_objects; + bd = stp->new_large_objects; - for (; bd != NULL; bd = step->new_large_objects) { + for (; bd != NULL; bd = stp->new_large_objects) { /* take this object *off* the large objects list and put it on * the scavenged large objects list. This is so that we can * treat new_large_objects as a stack and push new objects on * the front when evacuating. */ - step->new_large_objects = bd->link; - dbl_link_onto(bd, &step->scavenged_large_objects); + stp->new_large_objects = bd->link; + dbl_link_onto(bd, &stp->scavenged_large_objects); p = bd->start; info = get_itbl((StgClosure *)p); @@ -2942,25 +3026,24 @@ scavenge_large(step *step) continue; } - case BCO: - { - StgBCO* bco = (StgBCO *)p; - nat i; - evac_gen = saved_evac_gen; - for (i = 0; i < bco->n_ptrs; i++) { - bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i)); - } - evac_gen = 0; - continue; - } - case TSO: scavengeTSO((StgTSO *)p); - // HWL: old PAR code deleted here continue; + case AP_UPD: + case PAP: + { + StgPAP* pap = (StgPAP *)p; + + evac_gen = saved_evac_gen; /* not really mutable */ + pap->fun = evacuate(pap->fun); + scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + evac_gen = 0; + continue; + } + default: - barf("scavenge_large: unknown/strange object"); + barf("scavenge_large: unknown/strange object %d", (int)(info->type)); } } } @@ -2989,7 +3072,6 @@ zero_static_object_list(StgClosure* first_static) * It doesn't do any harm to zero all the mutable link fields on the * mutable list. */ -//@cindex zero_mutable_list static void zero_mutable_list( StgMutClosure *first ) @@ -3002,54 +3084,38 @@ zero_mutable_list( StgMutClosure *first ) } } -//@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging -//@subsection Reverting CAFs - /* ----------------------------------------------------------------------------- Reverting CAFs -------------------------------------------------------------------------- */ -//@cindex RevertCAFs -void RevertCAFs(void) +void +revertCAFs( void ) { - while (enteredCAFs != END_CAF_LIST) { - StgCAF* caf = enteredCAFs; - - enteredCAFs = caf->link; - ASSERT(get_itbl(caf)->type == CAF_ENTERED); - SET_INFO(caf,&CAF_UNENTERED_info); - caf->value = (StgClosure *)0xdeadbeef; - caf->link = (StgCAF *)0xdeadbeef; - } - enteredCAFs = END_CAF_LIST; -} + StgIndStatic *c; -//@cindex revert_dead_CAFs + for (c = (StgIndStatic *)caf_list; c != NULL; + c = (StgIndStatic *)c->static_link) + { + c->header.info = c->saved_info; + c->saved_info = NULL; + /* could, but not necessary: c->static_link = NULL; */ + } + caf_list = NULL; +} -void revert_dead_CAFs(void) +void +scavengeCAFs( void ) { - StgCAF* caf = enteredCAFs; - enteredCAFs = END_CAF_LIST; - while (caf != END_CAF_LIST) { - StgCAF *next, *new; - next = caf->link; - new = (StgCAF*)isAlive((StgClosure*)caf); - if (new) { - new->link = enteredCAFs; - enteredCAFs = new; - } else { - /* ASSERT(0); */ - SET_INFO(caf,&CAF_UNENTERED_info); - caf->value = (StgClosure*)0xdeadbeef; - caf->link = (StgCAF*)0xdeadbeef; - } - caf = next; + StgIndStatic *c; + + evac_gen = 0; + for (c = (StgIndStatic *)caf_list; c != NULL; + c = (StgIndStatic *)c->static_link) + { + c->indirectee = evacuate(c->indirectee); } } -//@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs -//@subsection Sanity code for CAF garbage collection - /* ----------------------------------------------------------------------------- Sanity code for CAF garbage collection. @@ -3086,7 +3152,7 @@ gcCAFs(void) if (STATIC_LINK(info,p) == NULL) { IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p)); /* black hole it */ - SET_INFO(p,&BLACKHOLE_info); + SET_INFO(p,&stg_BLACKHOLE_info); p = STATIC_LINK2(info,p); *pp = p; } @@ -3141,16 +3207,16 @@ threadLazyBlackHole(StgTSO *tso) * The blackhole made for a CAF is a CAF_BLACKHOLE, so they * don't interfere with this optimisation. */ - if (bh->header.info == &BLACKHOLE_info) { + if (bh->header.info == &stg_BLACKHOLE_info) { return; } - if (bh->header.info != &BLACKHOLE_BQ_info && - bh->header.info != &CAF_BLACKHOLE_info) { + if (bh->header.info != &stg_BLACKHOLE_BQ_info && + bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); #endif - SET_INFO(bh,&BLACKHOLE_info); + SET_INFO(bh,&stg_BLACKHOLE_info); } update_frame = update_frame->link; @@ -3230,16 +3296,20 @@ threadSqueezeStack(StgTSO *tso) frame, prev_frame); }) switch (get_itbl(frame)->type) { - case UPDATE_FRAME: upd_frames++; - if (frame->updatee->header.info == &BLACKHOLE_info) - bhs++; - break; - case STOP_FRAME: stop_frames++; - break; - case CATCH_FRAME: catch_frames++; - break; - case SEQ_FRAME: seq_frames++; - break; + case UPDATE_FRAME: + upd_frames++; + if (frame->updatee->header.info == &stg_BLACKHOLE_info) + bhs++; + break; + case STOP_FRAME: + stop_frames++; + break; + case CATCH_FRAME: + catch_frames++; + break; + case SEQ_FRAME: + seq_frames++; + break; default: barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n", frame, prev_frame); @@ -3247,7 +3317,7 @@ threadSqueezeStack(StgTSO *tso) } #endif if (get_itbl(frame)->type == UPDATE_FRAME - && frame->updatee->header.info == &BLACKHOLE_info) { + && frame->updatee->header.info == &stg_BLACKHOLE_info) { break; } } @@ -3313,11 +3383,11 @@ threadSqueezeStack(StgTSO *tso) # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) # error Unimplemented lazy BH warning. (KSW 1999-01) # endif - if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info - || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info + if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info + || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info ) { /* Sigh. It has one. Don't lose those threads! */ - if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) { + if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) { /* Urgh. Two queues. Merge them. */ P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue; @@ -3343,8 +3413,18 @@ threadSqueezeStack(StgTSO *tso) /* wasn't there something about update squeezing and ticky to be * sorted out? oh yes: we aren't counting each enter properly * in this case. See the log somewhere. KSW 1999-04-21 + * + * Check two things: that the two update frames don't point to + * the same object, and that the updatee_bypass isn't already an + * indirection. Both of these cases only happen when we're in a + * block hole-style loop (and there are multiple update frames + * on the stack pointing to the same closure), but they can both + * screw us up if we don't check. */ - UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */ + if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) { + /* this wakes the threads up */ + UPD_IND_NOLOCK(updatee_bypass, updatee_keep); + } sp = (P_)frame - 1; /* sp = stuff to slide */ displacement += sizeofW(StgUpdateFrame); @@ -3357,13 +3437,31 @@ threadSqueezeStack(StgTSO *tso) */ if (is_update_frame) { StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee; - if (bh->header.info != &BLACKHOLE_info && - bh->header.info != &BLACKHOLE_BQ_info && - bh->header.info != &CAF_BLACKHOLE_info) { + if (bh->header.info != &stg_BLACKHOLE_info && + bh->header.info != &stg_BLACKHOLE_BQ_info && + bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); #endif - SET_INFO(bh,&BLACKHOLE_info); +#ifdef DEBUG + /* zero out the slop so that the sanity checker can tell + * where the next closure is. + */ + { + StgInfoTable *info = get_itbl(bh); + nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i; + /* don't zero out slop for a THUNK_SELECTOR, because it's layout + * info is used for a different purpose, and it's exactly the + * same size as a BLACKHOLE in any case. + */ + if (info->type != THUNK_SELECTOR) { + for (i = np; i < np + nw; i++) { + ((StgClosure *)bh)->payload[i] = 0; + } + } + } +#endif + SET_INFO(bh,&stg_BLACKHOLE_info); } } @@ -3417,7 +3515,6 @@ threadSqueezeStack(StgTSO *tso) * turned on. * -------------------------------------------------------------------------- */ //@cindex threadPaused - void threadPaused(StgTSO *tso) { @@ -3455,16 +3552,32 @@ printMutableList(generation *gen) { StgMutClosure *p, *next; - p = gen->saved_mut_list; + p = gen->mut_list; next = p->mut_link; - fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list); + fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list); for (; p != END_MUT_LIST; p = next, next = p->mut_link) { fprintf(stderr, "%p (%s), ", p, info_type((StgClosure *)p)); } fputc('\n', stderr); } + +//@cindex maybeLarge +static inline rtsBool +maybeLarge(StgClosure *closure) +{ + StgInfoTable *info = get_itbl(closure); + + /* closure types that may be found on the new_large_objects list; + see scavenge_large */ + return (info->type == MUT_ARR_PTRS || + info->type == MUT_ARR_PTRS_FROZEN || + info->type == TSO || + info->type == ARR_WORDS); +} + + #endif /* DEBUG */ //@node Index, , Pausing a thread @@ -3482,9 +3595,11 @@ printMutableList(generation *gen) //* evacuate_large:: @cindex\s-+evacuate_large //* gcCAFs:: @cindex\s-+gcCAFs //* isAlive:: @cindex\s-+isAlive +//* maybeLarge:: @cindex\s-+maybeLarge //* mkMutCons:: @cindex\s-+mkMutCons +//* printMutOnceList:: @cindex\s-+printMutOnceList +//* printMutableList:: @cindex\s-+printMutableList //* relocate_TSO:: @cindex\s-+relocate_TSO -//* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs //* scavenge:: @cindex\s-+scavenge //* scavenge_large:: @cindex\s-+scavenge_large //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list