X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=120f02a76e25eb5fd5fd5f48303a18da1ad17c2b;hb=9213df4a0c5a8f121832d36d6d3afbf1c80f1a0a;hp=d8f0410f998accbbb6cfbb9c5c0c06cb7bb063ec;hpb=ce32f3f61194e6008f74da877cccacebd7849d92;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index d8f0410..120f02a 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,10 +1,31 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.3 1999/01/06 11:52:43 simonm Exp $ + * $Id: GC.c,v 1.102 2001/04/03 16:35:12 sewardj Exp $ * - * Two-space garbage collector + * (c) The GHC Team 1998-1999 + * + * Generational garbage collector * * ---------------------------------------------------------------------------*/ +//@menu +//* Includes:: +//* STATIC OBJECT LIST:: +//* Static function declarations:: +//* Garbage Collect:: +//* Weak Pointers:: +//* Evacuation:: +//* Scavenging:: +//* Reverting CAFs:: +//* Sanity code for CAF garbage collection:: +//* Lazy black holing:: +//* Stack squeezing:: +//* Pausing a thread:: +//* Index:: +//@end menu + +//@node Includes, STATIC OBJECT LIST +//@subsection Includes + #include "Rts.h" #include "RtsFlags.h" #include "RtsUtils.h" @@ -16,12 +37,31 @@ #include "Sanity.h" #include "GC.h" #include "BlockAlloc.h" +#include "MBlock.h" #include "Main.h" -#include "DebugProf.h" +#include "ProfHeap.h" #include "SchedAPI.h" #include "Weak.h" +#include "StablePriv.h" +#include "Prelude.h" +#include "ParTicky.h" // ToDo: move into Rts.h +#if defined(GRAN) || defined(PAR) +# include "GranSimRts.h" +# include "ParallelRts.h" +# include "FetchMe.h" +# if defined(DEBUG) +# include "Printer.h" +# include "ParallelDebug.h" +# endif +#endif +#include "HsFFI.h" +#include "Linker.h" +#if defined(RTS_GTK_FRONTPANEL) +#include "FrontPanel.h" +#endif -StgCAF* enteredCAFs; +//@node STATIC OBJECT LIST, Static function declarations, Includes +//@subsection STATIC OBJECT LIST /* STATIC OBJECT LIST. * @@ -74,31 +114,66 @@ static rtsBool major_gc; */ static nat evac_gen; -/* WEAK POINTERS +/* Weak pointers */ static StgWeak *old_weak_ptr_list; /* also pending finaliser list */ static rtsBool weak_done; /* all done for this pass */ +/* List of all threads during GC + */ +static StgTSO *old_all_threads; +static StgTSO *resurrected_threads; + +/* Flag indicating failure to evacuate an object to the desired + * generation. + */ +static rtsBool failed_to_evac; + +/* Old to-space (used for two-space collector only) + */ +bdescr *old_to_space; + +/* Data used for allocation area sizing. + */ +lnat new_blocks; /* blocks allocated during this GC */ +lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */ + +/* Used to avoid long recursion due to selector thunks + */ +lnat thunk_selector_depth = 0; +#define MAX_THUNK_SELECTOR_DEPTH 256 + +//@node Static function declarations, Garbage Collect, STATIC OBJECT LIST +//@subsection Static function declarations + /* ----------------------------------------------------------------------------- Static function declarations -------------------------------------------------------------------------- */ -static StgClosure *evacuate(StgClosure *q); -static void zeroStaticObjectList(StgClosure* first_static); -static rtsBool traverse_weak_ptr_list(void); -static void zeroMutableList(StgMutClosure *first); -static void revertDeadCAFs(void); +static StgClosure * evacuate ( StgClosure *q ); +static void zero_static_object_list ( StgClosure* first_static ); +static void zero_mutable_list ( StgMutClosure *first ); + +static rtsBool traverse_weak_ptr_list ( void ); +static void cleanup_weak_ptr_list ( StgWeak **list ); -static void scavenge_stack(StgPtr p, StgPtr stack_end); -static void scavenge_large(step *step); -static void scavenge(step *step); -static void scavenge_static(void); -static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen); +static void scavenge_stack ( StgPtr p, StgPtr stack_end ); +static void scavenge_large ( step * ); +static void scavenge ( step * ); +static void scavenge_static ( void ); +static void scavenge_mutable_list ( generation *g ); +static void scavenge_mut_once_list ( generation *g ); #ifdef DEBUG -static void gcCAFs(void); +static void gcCAFs ( void ); #endif +void revertCAFs ( void ); +void scavengeCAFs ( void ); + +//@node Garbage Collect, Weak Pointers, Static function declarations +//@subsection Garbage Collect + /* ----------------------------------------------------------------------------- GarbageCollect @@ -121,54 +196,66 @@ static void gcCAFs(void); - free from-space in each step, and set from-space = to-space. -------------------------------------------------------------------------- */ +//@cindex GarbageCollect -void GarbageCollect(void (*get_roots)(void)) +void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc ) { bdescr *bd; - step *step; - lnat live, allocated; + step *stp; + lnat live, allocated, collected = 0, copied = 0; nat g, s; #ifdef PROFILING CostCentreStack *prev_CCS; #endif +#if defined(DEBUG) && defined(GRAN) + IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n", + Now, Now)); +#endif + /* tell the stats department that we've started a GC */ stat_startGC(); + /* Init stats and print par specific (timing) info */ + PAR_TICKY_PAR_START(); + /* attribute any costs to CCS_GC */ #ifdef PROFILING prev_CCS = CCCS; CCCS = CCS_GC; #endif - /* We might have been called from Haskell land by _ccall_GC, in - * which case we need to call threadPaused() because the scheduler - * won't have done it. - */ - if (CurrentTSO) { threadPaused(CurrentTSO); } - - /* Approximate how much we allocated: number of blocks in the - * nursery + blocks allocated via allocate() - unused nusery blocks. - * This leaves a little slop at the end of each block, and doesn't - * take into account large objects (ToDo). + /* Approximate how much we allocated. + * Todo: only when generating stats? */ - allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); - for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { - allocated -= BLOCK_SIZE_W; - } + allocated = calcAllocated(); /* Figure out which generation to collect */ - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) { - N = g; + if (force_major_gc) { + N = RtsFlags.GcFlags.generations - 1; + major_gc = rtsTrue; + } else { + N = 0; + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) { + N = g; + } } + major_gc = (N == RtsFlags.GcFlags.generations-1); + } + +#ifdef RTS_GTK_FRONTPANEL + if (RtsFlags.GcFlags.frontpanel) { + updateFrontPanelBeforeGC(N); } - major_gc = (N == RtsFlags.GcFlags.generations-1); +#endif /* check stack sanity *before* GC (ToDo: check all threads) */ - /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */ +#if defined(GRAN) + // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity()); +#endif IF_DEBUG(sanity, checkFreeListSanity()); /* Initialise the static object lists @@ -177,42 +264,61 @@ void GarbageCollect(void (*get_roots)(void)) scavenged_static_objects = END_OF_STATIC_LIST; /* zero the mutable list for the oldest generation (see comment by - * zeroMutableList below). + * zero_mutable_list below). */ if (major_gc) { - zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list); + zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list); + } + + /* Save the old to-space if we're doing a two-space collection + */ + if (RtsFlags.GcFlags.generations == 1) { + old_to_space = g0s0->to_space; + g0s0->to_space = NULL; } + /* Keep a count of how many new blocks we allocated during this GC + * (used for resizing the allocation area, later). + */ + new_blocks = 0; + /* Initialise to-space in all the generations/steps that we're * collecting. */ for (g = 0; g <= N; g++) { + generations[g].mut_once_list = END_MUT_LIST; generations[g].mut_list = END_MUT_LIST; for (s = 0; s < generations[g].n_steps; s++) { + /* generation 0, step 0 doesn't need to-space */ - if (g == 0 && s == 0) { continue; } + if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } + /* Get a free block for to-space. Extra blocks will be chained on * as necessary. */ bd = allocBlock(); - step = &generations[g].steps[s]; - ASSERT(step->gen->no == g); - ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue); + stp = &generations[g].steps[s]; + ASSERT(stp->gen->no == g); + ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue); bd->gen = &generations[g]; - bd->step = step; + bd->step = stp; bd->link = NULL; - step->hp = bd->start; - step->hpLim = step->hp + BLOCK_SIZE_W; - step->hp_bd = bd; - step->to_space = bd; - step->to_blocks = 1; /* ???? */ - step->scan = bd->start; - step->scan_bd = bd; - step->new_large_objects = NULL; - step->scavenged_large_objects = NULL; + bd->evacuated = 1; /* it's a to-space block */ + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->to_space = bd; + stp->to_blocks = 1; + stp->scan = bd->start; + stp->scan_bd = bd; + stp->new_large_objects = NULL; + stp->scavenged_large_objects = NULL; + new_blocks++; /* mark the large objects as not evacuated yet */ - for (bd = step->large_objects; bd; bd = bd->link) { + for (bd = stp->large_objects; bd; bd = bd->link) { bd->evacuated = 0; } } @@ -223,59 +329,112 @@ void GarbageCollect(void (*get_roots)(void)) */ for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - if (step->hp_bd == NULL) { + stp = &generations[g].steps[s]; + if (stp->hp_bd == NULL) { bd = allocBlock(); bd->gen = &generations[g]; - bd->step = step; + bd->step = stp; bd->link = NULL; - step->hp = bd->start; - step->hpLim = step->hp + BLOCK_SIZE_W; - step->hp_bd = bd; - step->blocks = bd; - step->n_blocks = 1; + bd->evacuated = 0; /* *not* a to-space block */ + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->blocks = bd; + stp->n_blocks = 1; + new_blocks++; } /* Set the scan pointer for older generations: remember we * still have to scavenge objects that have been promoted. */ - step->scan = step->hp; - step->scan_bd = step->hp_bd; - step->to_space = NULL; - step->to_blocks = 0; - step->new_large_objects = NULL; - step->scavenged_large_objects = NULL; + stp->scan = stp->hp; + stp->scan_bd = stp->hp_bd; + stp->to_space = NULL; + stp->to_blocks = 0; + stp->new_large_objects = NULL; + stp->scavenged_large_objects = NULL; } } /* ----------------------------------------------------------------------- - * follow all the roots that the application knows about. - */ - evac_gen = 0; - get_roots(); - - /* follow all the roots that we know about: + * follow all the roots that we know about: * - mutable lists from each generation > N * we want to *scavenge* these roots, not evacuate them: they're not * going to move in this GC. + * Also: do them in reverse generation order. This is because we + * often want to promote objects that are pointed to by older + * generations early, so we don't have to repeatedly copy them. + * Doing the generations in reverse order ensures that we don't end + * up in the situation where we want to evac an object to gen 3 and + * it has already been evaced to gen 2. */ - for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { - generations[g].mut_list = - scavenge_mutable_list(generations[g].mut_list, g); + { + int st; + for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + generations[g].saved_mut_list = generations[g].mut_list; + generations[g].mut_list = END_MUT_LIST; + } + + /* Do the mut-once lists first */ + for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + IF_PAR_DEBUG(verbose, + printMutOnceList(&generations[g])); + scavenge_mut_once_list(&generations[g]); + evac_gen = g; + for (st = generations[g].n_steps-1; st >= 0; st--) { + scavenge(&generations[g].steps[st]); + } + } + + for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + IF_PAR_DEBUG(verbose, + printMutableList(&generations[g])); + scavenge_mutable_list(&generations[g]); + evac_gen = g; + for (st = generations[g].n_steps-1; st >= 0; st--) { + scavenge(&generations[g].steps[st]); + } + } } - + + scavengeCAFs(); + + /* follow all the roots that the application knows about. + */ + evac_gen = 0; + get_roots(); + +#if defined(PAR) /* And don't forget to mark the TSO if we got here direct from * Haskell! */ + /* Not needed in a seq version? if (CurrentTSO) { CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO); } + */ + + /* Mark the entries in the GALA table of the parallel system */ + markLocalGAs(major_gc); + /* Mark all entries on the list of pending fetches */ + markPendingFetches(major_gc); +#endif /* Mark the weak pointer list, and prepare to detect dead weak * pointers. */ - markWeakList(); old_weak_ptr_list = weak_ptr_list; weak_ptr_list = NULL; weak_done = rtsFalse; + /* The all_threads list is like the weak_ptr_list. + * See traverse_weak_ptr_list() for the details. + */ + old_all_threads = all_threads; + all_threads = END_TSO_QUEUE; + resurrected_threads = END_TSO_QUEUE; + + /* Mark the stable pointer table. + */ + markStablePtrTable(major_gc); + #ifdef INTERPRETER { /* ToDo: To fix the caf leak, we need to make the commented out @@ -283,22 +442,7 @@ void GarbageCollect(void (*get_roots)(void)) * the CAF document. */ extern void markHugsObjects(void); -#if 0 - /* ToDo: This (undefined) function should contain the scavenge - * loop immediately below this block of code - but I'm not sure - * enough of the details to do this myself. - */ - scavengeEverything(); - /* revert dead CAFs and update enteredCAFs list */ - revertDeadCAFs(); -#endif markHugsObjects(); -#if 0 - /* This will keep the CAFs and the attached BCOs alive - * but the values will have been reverted - */ - scavengeEverything(); -#endif } #endif @@ -313,47 +457,45 @@ void GarbageCollect(void (*get_roots)(void)) /* scavenge static objects */ if (major_gc && static_objects != END_OF_STATIC_LIST) { + IF_DEBUG(sanity, + checkStaticObjects()); scavenge_static(); } - /* scavenge each step in generations 0..N */ - evac_gen = 0; /* just evac as normal */ - for (g = 0; g <= N; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - if (step->hp_bd != step->scan_bd || step->scan < step->hp) { - scavenge(step); - flag = rtsTrue; - } - if (step->new_large_objects != NULL) { - scavenge_large(step); - flag = rtsTrue; - } - } - } - if (flag) { goto loop; } - - /* Now scavenge all the older generations. Objects may have been + /* When scavenging the older generations: Objects may have been * evacuated from generations <= N into older generations, and we - * need to scavenge these objects. We're going to make sure that + * need to scavenge these objects. We're going to try to ensure that * any evacuations that occur move the objects into at least the - * same generation as the object being scavenged. + * same generation as the object being scavenged, otherwise we + * have to create new entries on the mutable list for the older + * generation. */ - for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - evac_gen = g; /* evacuate to g at least */ - old_loop: - if (step->hp_bd != step->scan_bd || step->scan < step->hp) { - scavenge(step); - goto old_loop; - } - if (step->new_large_objects != NULL) { - scavenge_large(step); - goto old_loop; + + /* scavenge each step in generations 0..maxgen */ + { + int gen, st; + loop2: + for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) { + for (st = generations[gen].n_steps-1; st >= 0 ; st--) { + if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } + stp = &generations[gen].steps[st]; + evac_gen = gen; + if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) { + scavenge(stp); + flag = rtsTrue; + goto loop2; + } + if (stp->new_large_objects != NULL) { + scavenge_large(stp); + flag = rtsTrue; + goto loop2; + } } } } + if (flag) { goto loop; } /* must be last... */ if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */ @@ -361,32 +503,89 @@ void GarbageCollect(void (*get_roots)(void)) } } + /* Final traversal of the weak pointer list (see comment by + * cleanUpWeakPtrList below). + */ + cleanup_weak_ptr_list(&weak_ptr_list); + + /* Now see which stable names are still alive. + */ + gcStablePtrTable(major_gc); + +#if defined(PAR) + /* Reconstruct the Global Address tables used in GUM */ + rebuildGAtables(major_gc); + IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/)); + IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/)); +#endif + + /* Set the maximum blocks for the oldest generation, based on twice + * the amount of live data now, adjusted to fit the maximum heap + * size if necessary. + * + * This is an approximation, since in the worst case we'll need + * twice the amount of live data plus whatever space the other + * generations need. + */ + if (RtsFlags.GcFlags.generations > 1) { + if (major_gc) { + oldest_gen->max_blocks = + stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor, + RtsFlags.GcFlags.minOldGenSize); + if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) { + oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2; + if (((int)oldest_gen->max_blocks - + (int)oldest_gen->steps[0].to_blocks) < + (RtsFlags.GcFlags.pcFreeHeap * + RtsFlags.GcFlags.maxHeapSize / 200)) { + heapOverflow(); + } + } + } + } + /* run through all the generations/steps and tidy up */ - for (g = 0; g <= RtsFlags.GcFlags.generations; g++) { + copied = new_blocks * BLOCK_SIZE_W; + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + + if (g <= N) { + generations[g].collections++; /* for stats */ + } + for (s = 0; s < generations[g].n_steps; s++) { bdescr *next; - step = &generations[g].steps[s]; + stp = &generations[g].steps[s]; - if (!(g == 0 && s == 0)) { + if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) { /* Tidy the end of the to-space chains */ - step->hp_bd->free = step->hp; - step->hp_bd->link = NULL; + stp->hp_bd->free = stp->hp; + stp->hp_bd->link = NULL; + /* stats information: how much we copied */ + if (g <= N) { + copied -= stp->hp_bd->start + BLOCK_SIZE_W - + stp->hp_bd->free; + } } /* for generations we collected... */ if (g <= N) { + collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */ + /* free old memory and shift to-space into from-space for all * the collected steps (except the allocation area). These * freed blocks will probaby be quickly recycled. */ if (!(g == 0 && s == 0)) { - freeChain(step->blocks); - step->blocks = step->to_space; - step->n_blocks = step->to_blocks; - step->to_space = NULL; - step->to_blocks = 0; + freeChain(stp->blocks); + stp->blocks = stp->to_space; + stp->n_blocks = stp->to_blocks; + stp->to_space = NULL; + stp->to_blocks = 0; + for (bd = stp->blocks; bd != NULL; bd = bd->link) { + bd->evacuated = 0; /* now from-space */ + } } /* LARGE OBJECTS. The current live large objects are chained on @@ -394,22 +593,32 @@ void GarbageCollect(void (*get_roots)(void)) * collection from large_objects. Any objects left on * large_objects list are therefore dead, so we free them here. */ - for (bd = step->large_objects; bd != NULL; bd = next) { + for (bd = stp->large_objects; bd != NULL; bd = next) { next = bd->link; freeGroup(bd); bd = next; } - step->large_objects = step->scavenged_large_objects; - - /* Set the maximum blocks for this generation, - * using an arbitrary factor of the no. of blocks in step 0. + for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) { + bd->evacuated = 0; + } + stp->large_objects = stp->scavenged_large_objects; + + /* Set the maximum blocks for this generation, interpolating + * between the maximum size of the oldest and youngest + * generations. + * + * max_blocks = oldgen_max_blocks * G + * ---------------------- + * oldest_gen */ if (g != 0) { - generations[g].max_blocks = - stg_max(generations[g].steps[s].n_blocks * 2, - RtsFlags.GcFlags.minAllocAreaSize * 4); +#if 0 + generations[g].max_blocks = (oldest_gen->max_blocks * g) + / (RtsFlags.GcFlags.generations-1); +#endif + generations[g].max_blocks = oldest_gen->max_blocks; } - + /* for older generations... */ } else { @@ -417,51 +626,20 @@ void GarbageCollect(void (*get_roots)(void)) * scavenged_large_object list (i.e. large objects that have been * promoted during this GC) to the large_object list for that step. */ - for (bd = step->scavenged_large_objects; bd; bd = next) { + for (bd = stp->scavenged_large_objects; bd; bd = next) { next = bd->link; - dbl_link_onto(bd, &step->large_objects); + bd->evacuated = 0; + dbl_link_onto(bd, &stp->large_objects); } /* add the new blocks we promoted during this GC */ - step->n_blocks += step->to_blocks; + stp->n_blocks += stp->to_blocks; } } } - /* revert dead CAFs and update enteredCAFs list */ - revertDeadCAFs(); - - /* mark the garbage collected CAFs as dead */ -#ifdef DEBUG - if (major_gc) { gcCAFs(); } -#endif - - /* zero the scavenged static object list */ - if (major_gc) { - zeroStaticObjectList(scavenged_static_objects); - } - - /* Reset the nursery - */ - for (bd = g0s0->blocks; bd; bd = bd->link) { - bd->free = bd->start; - ASSERT(bd->gen == g0); - ASSERT(bd->step == g0s0); - } - current_nursery = g0s0->blocks; - - live = 0; - for (g = 0; g <= RtsFlags.GcFlags.generations; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - /* approximate amount of live data (doesn't take into account slop - * at end of each block). ToDo: this more accurately. - */ - if (g == 0 && s == 0) { continue; } - step = &generations[g].steps[s]; - live += step->n_blocks * BLOCK_SIZE_W + - ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_); - } - } + /* Guess the amount of live data for stats. */ + live = calcLive(); /* Free the small objects allocated via allocate(), since this will * all have been copied into G0S1 now. @@ -471,19 +649,130 @@ void GarbageCollect(void (*get_roots)(void)) } small_alloc_list = NULL; alloc_blocks = 0; + alloc_Hp = NULL; + alloc_HpLim = NULL; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; - /* check sanity after GC */ -#ifdef DEBUG - for (g = 0; g <= N; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - if (g == 0 && s == 0) { continue; } - IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks)); + /* Two-space collector: + * Free the old to-space, and estimate the amount of live data. + */ + if (RtsFlags.GcFlags.generations == 1) { + nat blocks; + + if (old_to_space != NULL) { + freeChain(old_to_space); + } + for (bd = g0s0->to_space; bd != NULL; bd = bd->link) { + bd->evacuated = 0; /* now from-space */ + } + + /* For a two-space collector, we need to resize the nursery. */ + + /* set up a new nursery. Allocate a nursery size based on a + * function of the amount of live data (currently a factor of 2, + * should be configurable (ToDo)). Use the blocks from the old + * nursery if possible, freeing up any left over blocks. + * + * If we get near the maximum heap size, then adjust our nursery + * size accordingly. If the nursery is the same size as the live + * data (L), then we need 3L bytes. We can reduce the size of the + * nursery to bring the required memory down near 2L bytes. + * + * A normal 2-space collector would need 4L bytes to give the same + * performance we get from 3L bytes, reducing to the same + * performance at 2L bytes. + */ + blocks = g0s0->to_blocks; + + if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > + RtsFlags.GcFlags.maxHeapSize ) { + int adjusted_blocks; /* signed on purpose */ + int pc_free; + + adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks); + IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); + pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize; + if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ { + heapOverflow(); + } + blocks = adjusted_blocks; + + } else { + blocks *= RtsFlags.GcFlags.oldGenFactor; + if (blocks < RtsFlags.GcFlags.minAllocAreaSize) { + blocks = RtsFlags.GcFlags.minAllocAreaSize; + } + } + resizeNursery(blocks); + + } else { + /* Generational collector: + * If the user has given us a suggested heap size, adjust our + * allocation area to make best use of the memory available. + */ + + if (RtsFlags.GcFlags.heapSizeSuggestion) { + int blocks; + nat needed = calcNeeded(); /* approx blocks needed at next GC */ + + /* Guess how much will be live in generation 0 step 0 next time. + * A good approximation is the obtained by finding the + * percentage of g0s0 that was live at the last minor GC. + */ + if (N == 0) { + g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks; + } + + /* Estimate a size for the allocation area based on the + * information available. We might end up going slightly under + * or over the suggested heap size, but we should be pretty + * close on average. + * + * Formula: suggested - needed + * ---------------------------- + * 1 + g0s0_pcnt_kept/100 + * + * where 'needed' is the amount of memory needed at the next + * collection for collecting all steps except g0s0. + */ + blocks = + (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) / + (100 + (int)g0s0_pcnt_kept); + + if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) { + blocks = RtsFlags.GcFlags.minAllocAreaSize; + } + + resizeNursery((nat)blocks); } } - IF_DEBUG(sanity, checkFreeListSanity()); + + /* mark the garbage collected CAFs as dead */ +#if 0 /* doesn't work at the moment */ +#if defined(DEBUG) + if (major_gc) { gcCAFs(); } #endif +#endif + + /* zero the scavenged static object list */ + if (major_gc) { + zero_static_object_list(scavenged_static_objects); + } + + /* Reset the nursery + */ + resetNurseries(); + + /* start any pending finalizers */ + scheduleFinalizers(old_weak_ptr_list); + + /* send exceptions to any threads which were about to die */ + resurrectThreads(resurrected_threads); + /* check sanity after GC */ + IF_DEBUG(sanity, checkSanity(N)); + + /* extra GC trace info */ IF_DEBUG(gc, stat_describe_gens()); #ifdef DEBUG @@ -491,23 +780,30 @@ void GarbageCollect(void (*get_roots)(void)) /* heapCensus(to_space); */ /* ToDo */ #endif - /* start any pending finalisers */ - scheduleFinalisers(old_weak_ptr_list); - /* restore enclosing cost centre */ #ifdef PROFILING + heapCensus(); CCCS = prev_CCS; #endif - /* ok, GC over: tell the stats department what happened. */ - { - char s[512]; /* bleugh */ - sprintf(s, "(Gen: %d)", N); - stat_endGC(RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W, - 0, live, s); + /* check for memory leaks if sanity checking is on */ + IF_DEBUG(sanity, memInventory()); + +#ifdef RTS_GTK_FRONTPANEL + if (RtsFlags.GcFlags.frontpanel) { + updateFrontPanelAfterGC( N, live ); } +#endif + + /* ok, GC over: tell the stats department what happened. */ + stat_endGC(allocated, collected, live, copied, N); + + //PAR_TICKY_TP(); } +//@node Weak Pointers, Evacuation, Garbage Collect +//@subsection Weak Pointers + /* ----------------------------------------------------------------------------- Weak Pointers @@ -522,145 +818,372 @@ void GarbageCollect(void (*get_roots)(void)) new live weak pointers, then all the currently unreachable ones are dead. - For generational GC: we just don't try to finalise weak pointers in + For generational GC: we just don't try to finalize weak pointers in older generations than the one we're collecting. This could probably be optimised by keeping per-generation lists of weak pointers, but for a few weak pointers this scheme will work. -------------------------------------------------------------------------- */ +//@cindex traverse_weak_ptr_list static rtsBool traverse_weak_ptr_list(void) { StgWeak *w, **last_w, *next_w; - StgClosure *target; - const StgInfoTable *info; + StgClosure *new; rtsBool flag = rtsFalse; if (weak_done) { return rtsFalse; } - /* doesn't matter where we evacuate values/finalisers to, since + /* doesn't matter where we evacuate values/finalizers to, since * these pointers are treated as roots (iff the keys are alive). */ evac_gen = 0; last_w = &old_weak_ptr_list; for (w = old_weak_ptr_list; w; w = next_w) { - target = w->key; - loop: - /* ignore weak pointers in older generations */ - if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) { - next_w = w->link; + + /* First, this weak pointer might have been evacuated. If so, + * remove the forwarding pointer from the weak_ptr_list. + */ + if (get_itbl(w)->type == EVACUATED) { + w = (StgWeak *)((StgEvacuated *)w)->evacuee; + *last_w = w; + } + + /* There might be a DEAD_WEAK on the list if finalizeWeak# was + * called on a live weak pointer object. Just remove it. + */ + if (w->header.info == &stg_DEAD_WEAK_info) { + next_w = ((StgDeadWeak *)w)->link; + *last_w = next_w; continue; } - info = get_itbl(target); - switch (info->type) { - - case IND: - case IND_STATIC: - case IND_PERM: - case IND_OLDGEN: /* rely on compatible layout with StgInd */ - case IND_OLDGEN_PERM: - /* follow indirections */ - target = ((StgInd *)target)->indirectee; - goto loop; + ASSERT(get_itbl(w)->type == WEAK); - case EVACUATED: - /* If key is alive, evacuate value and finaliser and - * place weak ptr on new weak ptr list. - */ - IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w)); - w->key = ((StgEvacuated *)target)->evacuee; + /* Now, check whether the key is reachable. + */ + if ((new = isAlive(w->key))) { + w->key = new; + /* evacuate the value and finalizer */ w->value = evacuate(w->value); - w->finaliser = evacuate(w->finaliser); - + w->finalizer = evacuate(w->finalizer); /* remove this weak ptr from the old_weak_ptr list */ *last_w = w->link; - /* and put it on the new weak ptr list */ next_w = w->link; w->link = weak_ptr_list; weak_ptr_list = w; flag = rtsTrue; - break; - - default: /* key is dead */ + IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key)); + continue; + } + else { last_w = &(w->link); next_w = w->link; - break; + continue; } } - + + /* Now deal with the all_threads list, which behaves somewhat like + * the weak ptr list. If we discover any threads that are about to + * become garbage, we wake them up and administer an exception. + */ + { + StgTSO *t, *tmp, *next, **prev; + + prev = &old_all_threads; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + + /* Threads which have finished or died get dropped from + * the list. + */ + switch (t->what_next) { + case ThreadRelocated: + next = t->link; + *prev = next; + continue; + case ThreadKilled: + case ThreadComplete: + next = t->global_link; + *prev = next; + continue; + default: ; + } + + /* Threads which have already been determined to be alive are + * moved onto the all_threads list. + */ + (StgClosure *)tmp = isAlive((StgClosure *)t); + if (tmp != NULL) { + next = tmp->global_link; + tmp->global_link = all_threads; + all_threads = tmp; + *prev = next; + } else { + prev = &(t->global_link); + next = t->global_link; + } + } + } + /* If we didn't make any changes, then we can go round and kill all * the dead weak pointers. The old_weak_ptr list is used as a list - * of pending finalisers later on. + * of pending finalizers later on. */ if (flag == rtsFalse) { + cleanup_weak_ptr_list(&old_weak_ptr_list); for (w = old_weak_ptr_list; w; w = w->link) { - w->value = evacuate(w->value); - w->finaliser = evacuate(w->finaliser); + w->finalizer = evacuate(w->finalizer); + } + + /* And resurrect any threads which were about to become garbage. + */ + { + StgTSO *t, *tmp, *next; + for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { + next = t->global_link; + (StgClosure *)tmp = evacuate((StgClosure *)t); + tmp->global_link = resurrected_threads; + resurrected_threads = tmp; + } } + weak_done = rtsTrue; } return rtsTrue; } -StgClosure * -MarkRoot(StgClosure *root) -{ - root = evacuate(root); - return root; -} +/* ----------------------------------------------------------------------------- + After GC, the live weak pointer list may have forwarding pointers + on it, because a weak pointer object was evacuated after being + moved to the live weak pointer list. We remove those forwarding + pointers here. + + Also, we don't consider weak pointer objects to be reachable, but + we must nevertheless consider them to be "live" and retain them. + Therefore any weak pointer objects which haven't as yet been + evacuated need to be evacuated now. + -------------------------------------------------------------------------- */ -static __inline__ StgClosure * -copy(StgClosure *src, W_ size, bdescr *bd) +//@cindex cleanup_weak_ptr_list + +static void +cleanup_weak_ptr_list ( StgWeak **list ) { - step *step; - P_ to, from, dest; + StgWeak *w, **last_w; - /* Find out where we're going, using the handy "to" pointer in - * the step of the source object. If it turns out we need to - * evacuate to an older generation, adjust it here (see comment - * by evacuate()). - */ - step = bd->step->to; - if (step->gen->no < evac_gen) { - step = &generations[evac_gen].steps[0]; - } + last_w = list; + for (w = *list; w; w = w->link) { - /* chain a new block onto the to-space for the destination step if - * necessary. - */ - if (step->hp + size >= step->hpLim) { - bdescr *bd = allocBlock(); - bd->gen = step->gen; - bd->step = step; - step->hp_bd->free = step->hp; - step->hp_bd->link = bd; - step->hp = bd->start; - step->hpLim = step->hp + BLOCK_SIZE_W; - step->hp_bd = bd; - step->to_blocks++; - } + if (get_itbl(w)->type == EVACUATED) { + w = (StgWeak *)((StgEvacuated *)w)->evacuee; + *last_w = w; + } - dest = step->hp; - step->hp += size; - for(to = dest, from = (P_)src; size>0; --size) { - *to++ = *from++; + if (Bdescr((P_)w)->evacuated == 0) { + (StgClosure *)w = evacuate((StgClosure *)w); + *last_w = w; + } + last_w = &(w->link); } - return (StgClosure *)dest; } -static __inline__ void +/* ----------------------------------------------------------------------------- + isAlive determines whether the given closure is still alive (after + a garbage collection) or not. It returns the new address of the + closure if it is alive, or NULL otherwise. + -------------------------------------------------------------------------- */ + +//@cindex isAlive + +StgClosure * +isAlive(StgClosure *p) +{ + const StgInfoTable *info; + nat size; + + while (1) { + + info = get_itbl(p); + + /* ToDo: for static closures, check the static link field. + * Problem here is that we sometimes don't set the link field, eg. + * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs. + */ + + /* ignore closures in generations that we're not collecting. */ + if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) { + return p; + } + + switch (info->type) { + + case IND: + case IND_STATIC: + case IND_PERM: + case IND_OLDGEN: /* rely on compatible layout with StgInd */ + case IND_OLDGEN_PERM: + /* follow indirections */ + p = ((StgInd *)p)->indirectee; + continue; + + case EVACUATED: + /* alive! */ + return ((StgEvacuated *)p)->evacuee; + + case ARR_WORDS: + size = arr_words_sizeW((StgArrWords *)p); + goto large; + + case MUT_ARR_PTRS: + case MUT_ARR_PTRS_FROZEN: + size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p); + goto large; + + case TSO: + if (((StgTSO *)p)->what_next == ThreadRelocated) { + p = (StgClosure *)((StgTSO *)p)->link; + continue; + } + + size = tso_sizeW((StgTSO *)p); + large: + if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_) + && Bdescr((P_)p)->evacuated) + return p; + else + return NULL; + + default: + /* dead. */ + return NULL; + } + } +} + +//@cindex MarkRoot +StgClosure * +MarkRoot(StgClosure *root) +{ +# if 0 && defined(PAR) && defined(DEBUG) + StgClosure *foo = evacuate(root); + // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated); + ASSERT(isAlive(foo)); // must be in to-space + return foo; +# else + return evacuate(root); +# endif +} + +//@cindex addBlock +static void addBlock(step *stp) +{ + bdescr *bd = allocBlock(); + bd->gen = stp->gen; + bd->step = stp; + + if (stp->gen->no <= N) { + bd->evacuated = 1; + } else { + bd->evacuated = 0; + } + + stp->hp_bd->free = stp->hp; + stp->hp_bd->link = bd; + stp->hp = bd->start; + stp->hpLim = stp->hp + BLOCK_SIZE_W; + stp->hp_bd = bd; + stp->to_blocks++; + new_blocks++; +} + +//@cindex upd_evacuee + +static __inline__ void upd_evacuee(StgClosure *p, StgClosure *dest) { - StgEvacuated *q = (StgEvacuated *)p; + p->header.info = &stg_EVACUATED_info; + ((StgEvacuated *)p)->evacuee = dest; +} + +//@cindex copy + +static __inline__ StgClosure * +copy(StgClosure *src, nat size, step *stp) +{ + P_ to, from, dest; + + TICK_GC_WORDS_COPIED(size); + /* Find out where we're going, using the handy "to" pointer in + * the step of the source object. If it turns out we need to + * evacuate to an older generation, adjust it here (see comment + * by evacuate()). + */ + if (stp->gen->no < evac_gen) { +#ifdef NO_EAGER_PROMOTION + failed_to_evac = rtsTrue; +#else + stp = &generations[evac_gen].steps[0]; +#endif + } - SET_INFO(q,&EVACUATED_info); - q->evacuee = dest; + /* chain a new block onto the to-space for the destination step if + * necessary. + */ + if (stp->hp + size >= stp->hpLim) { + addBlock(stp); + } + + for(to = stp->hp, from = (P_)src; size>0; --size) { + *to++ = *from++; + } + + dest = stp->hp; + stp->hp = to; + upd_evacuee(src,(StgClosure *)dest); + return (StgClosure *)dest; } +/* Special version of copy() for when we only want to copy the info + * pointer of an object, but reserve some padding after it. This is + * used to optimise evacuation of BLACKHOLEs. + */ + +//@cindex copyPart + +static __inline__ StgClosure * +copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) +{ + P_ dest, to, from; + + TICK_GC_WORDS_COPIED(size_to_copy); + if (stp->gen->no < evac_gen) { +#ifdef NO_EAGER_PROMOTION + failed_to_evac = rtsTrue; +#else + stp = &generations[evac_gen].steps[0]; +#endif + } + + if (stp->hp + size_to_reserve >= stp->hpLim) { + addBlock(stp); + } + + for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) { + *to++ = *from++; + } + + dest = stp->hp; + stp->hp += size_to_reserve; + upd_evacuee(src,(StgClosure *)dest); + return (StgClosure *)dest; +} + +//@node Evacuation, Scavenging, Weak Pointers +//@subsection Evacuation + /* ----------------------------------------------------------------------------- Evacuate a large object @@ -672,24 +1195,35 @@ upd_evacuee(StgClosure *p, StgClosure *dest) evacuated, or 0 otherwise. -------------------------------------------------------------------------- */ +//@cindex evacuate_large + static inline void -evacuate_large(StgPtr p) +evacuate_large(StgPtr p, rtsBool mutable) { bdescr *bd = Bdescr(p); - step *step; + step *stp; /* should point to the beginning of the block */ ASSERT(((W_)p & BLOCK_MASK) == 0); /* already evacuated? */ - if (bd->evacuated) { return; } + if (bd->evacuated) { + /* Don't forget to set the failed_to_evac flag if we didn't get + * the desired destination (see comments in evacuate()). + */ + if (bd->gen->no < evac_gen) { + failed_to_evac = rtsTrue; + TICK_GC_FAILED_PROMOTION(); + } + return; + } - step = bd->step; + stp = bd->step; /* remove from large_object list */ if (bd->back) { bd->back->link = bd->link; } else { /* first object in the list */ - step->large_objects = bd->link; + stp->large_objects = bd->link; } if (bd->link) { bd->link->back = bd->back; @@ -697,36 +1231,59 @@ evacuate_large(StgPtr p) /* link it on to the evacuated large object list of the destination step */ - step = bd->step->to; - if (step->gen->no < evac_gen) { - step = &generations[evac_gen].steps[0]; + stp = bd->step->to; + if (stp->gen->no < evac_gen) { +#ifdef NO_EAGER_PROMOTION + failed_to_evac = rtsTrue; +#else + stp = &generations[evac_gen].steps[0]; +#endif } - bd->step = step; - bd->gen = step->gen; - bd->link = step->new_large_objects; - step->new_large_objects = bd; + bd->step = stp; + bd->gen = stp->gen; + bd->link = stp->new_large_objects; + stp->new_large_objects = bd; bd->evacuated = 1; + + if (mutable) { + recordMutable((StgMutClosure *)p); + } } /* ----------------------------------------------------------------------------- - Evacuate a mutable object - - If we evacuate a mutable object to a generation that we're not - collecting, cons the object onto the older generation's mutable - list. + Adding a MUT_CONS to an older generation. + + This is necessary from time to time when we end up with an + old-to-new generation pointer in a non-mutable object. We defer + the promotion until the next GC. -------------------------------------------------------------------------- */ - -static inline void -evacuate_mutable(StgMutClosure *c) + +//@cindex mkMutCons + +static StgClosure * +mkMutCons(StgClosure *ptr, generation *gen) { - bdescr *bd; - - bd = Bdescr((P_)c); - if (bd->gen->no > N) { - c->mut_link = bd->gen->mut_list; - bd->gen->mut_list = c; + StgMutVar *q; + step *stp; + + stp = &gen->steps[0]; + + /* chain a new block onto the to-space for the destination step if + * necessary. + */ + if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) { + addBlock(stp); } + + q = (StgMutVar *)stp->hp; + stp->hp += sizeofW(StgMutVar); + + SET_HDR(q,&stg_MUT_CONS_info,CCS_GC); + q->var = ptr; + recordOldToNewPtrs((StgMutClosure *)q); + + return (StgClosure *)q; } /* ----------------------------------------------------------------------------- @@ -749,82 +1306,148 @@ evacuate_mutable(StgMutClosure *c) it now resides in. if M >= evac_gen do nothing - if M < evac_gen replace object with an indirection and evacuate - it to evac_gen. + if M < evac_gen set failed_to_evac flag to indicate that we + didn't manage to evacuate this object into evac_gen. -------------------------------------------------------------------------- */ +//@cindex evacuate - -static StgClosure *evacuate(StgClosure *q) +static StgClosure * +evacuate(StgClosure *q) { StgClosure *to; bdescr *bd = NULL; + step *stp; const StgInfoTable *info; loop: - if (!LOOKS_LIKE_STATIC(q)) { + if (HEAP_ALLOCED(q)) { bd = Bdescr((P_)q); - /* generation too old: leave it alone */ - if (bd->gen->no >= evac_gen && bd->gen->no > N) { - return q; - } + if (bd->gen->no > N) { + /* Can't evacuate this object, because it's in a generation + * older than the ones we're collecting. Let's hope that it's + * in evac_gen or older, or we will have to make an IND_OLDGEN object. + */ + if (bd->gen->no < evac_gen) { + /* nope */ + failed_to_evac = rtsTrue; + TICK_GC_FAILED_PROMOTION(); + } + return q; + } + stp = bd->step->to; } +#ifdef DEBUG + else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */ +#endif /* make sure the info pointer is into text space */ ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q)) || IS_HUGS_CONSTR_INFO(GET_INFO(q)))); - info = get_itbl(q); + /* + if (info->type==RBH) { + info = REVERT_INFOPTR(info); + IF_DEBUG(gc, + belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)", + q, info_type(q), info, info_type_by_ip(info))); + } + */ + switch (info -> type) { - case BCO: - to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd); - upd_evacuee(q,to); - return to; - case MUT_VAR: + ASSERT(q->header.info != &stg_MUT_CONS_info); case MVAR: - to = copy(q,sizeW_fromITBL(info),bd); - upd_evacuee(q,to); - evacuate_mutable((StgMutClosure *)to); + to = copy(q,sizeW_fromITBL(info),stp); + recordMutable((StgMutClosure *)to); return to; + case CONSTR_0_1: + { + StgWord w = (StgWord)q->payload[0]; + if (q->header.info == Czh_con_info && + /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */ + (StgChar)w <= MAX_CHARLIKE) { + return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w); + } + if (q->header.info == Izh_con_info && + (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) { + return (StgClosure *)INTLIKE_CLOSURE((StgInt)w); + } + /* else, fall through ... */ + } + + case FUN_1_0: + case FUN_0_1: + case CONSTR_1_0: + return copy(q,sizeofW(StgHeader)+1,stp); + + case THUNK_1_0: /* here because of MIN_UPD_SIZE */ + case THUNK_0_1: + case THUNK_1_1: + case THUNK_0_2: + case THUNK_2_0: +#ifdef NO_PROMOTE_THUNKS + if (bd->gen->no == 0 && + bd->step->no != 0 && + bd->step->no == bd->gen->n_steps-1) { + stp = bd->step; + } +#endif + return copy(q,sizeofW(StgHeader)+2,stp); + + case FUN_1_1: + case FUN_0_2: + case FUN_2_0: + case CONSTR_1_1: + case CONSTR_0_2: + case CONSTR_2_0: + return copy(q,sizeofW(StgHeader)+2,stp); + case FUN: case THUNK: case CONSTR: case IND_PERM: case IND_OLDGEN_PERM: - case CAF_UNENTERED: - case CAF_ENTERED: case WEAK: case FOREIGN: - to = copy(q,sizeW_fromITBL(info),bd); - upd_evacuee(q,to); - return to; + case STABLE_NAME: + case BCO: + return copy(q,sizeW_fromITBL(info),stp); case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: case BLACKHOLE: - to = copy(q,BLACKHOLE_sizeW(),bd); - upd_evacuee(q,to); + return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp); + + case BLACKHOLE_BQ: + to = copy(q,BLACKHOLE_sizeW(),stp); + recordMutable((StgMutClosure *)to); return to; case THUNK_SELECTOR: { const StgInfoTable* selectee_info; - StgClosure* selectee = stgCast(StgSelector*,q)->selectee; - rtsBool evaced = rtsFalse; + StgClosure* selectee = ((StgSelector*)q)->selectee; selector_loop: selectee_info = get_itbl(selectee); switch (selectee_info->type) { case CONSTR: + case CONSTR_1_0: + case CONSTR_0_1: + case CONSTR_2_0: + case CONSTR_1_1: + case CONSTR_0_2: case CONSTR_STATIC: { - StgNat32 offset = info->layout.selector_offset; + StgWord32 offset = info->layout.selector_offset; /* check that the size is in range */ ASSERT(offset < - (StgNat32)(selectee_info->layout.payload.ptrs + + (StgWord32)(selectee_info->layout.payload.ptrs + selectee_info->layout.payload.nptrs)); /* perform the selection! */ @@ -834,8 +1457,15 @@ loop: * with the evacuation, just update the source address with * a pointer to the (evacuated) constructor field. */ - if (IS_USER_PTR(q) && evaced) { - return q; + if (HEAP_ALLOCED(q)) { + bdescr *bd = Bdescr((P_)q); + if (bd->evacuated) { + if (bd->gen->no < evac_gen) { + failed_to_evac = rtsTrue; + TICK_GC_FAILED_PROMOTION(); + } + return q; + } } /* otherwise, carry on and evacuate this constructor field, @@ -849,35 +1479,89 @@ loop: case IND_PERM: case IND_OLDGEN: case IND_OLDGEN_PERM: - selectee = stgCast(StgInd *,selectee)->indirectee; - goto selector_loop; - - case CAF_ENTERED: - selectee = stgCast(StgCAF *,selectee)->value; + selectee = ((StgInd *)selectee)->indirectee; goto selector_loop; case EVACUATED: - evaced = rtsTrue; - selectee = stgCast(StgEvacuated*,selectee)->evacuee; + selectee = ((StgEvacuated *)selectee)->evacuee; goto selector_loop; + case THUNK_SELECTOR: +# if 0 + /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or + something) to go into an infinite loop when the nightly + stage2 compiles PrelTup.lhs. */ + + /* we can't recurse indefinitely in evacuate(), so set a + * limit on the number of times we can go around this + * loop. + */ + if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) { + bdescr *bd; + bd = Bdescr((P_)selectee); + if (!bd->evacuated) { + thunk_selector_depth++; + selectee = evacuate(selectee); + thunk_selector_depth--; + goto selector_loop; + } + } + /* otherwise, fall through... */ +# endif + + case AP_UPD: case THUNK: + case THUNK_1_0: + case THUNK_0_1: + case THUNK_2_0: + case THUNK_1_1: + case THUNK_0_2: case THUNK_STATIC: - case THUNK_SELECTOR: - /* aargh - do recursively???? */ - case CAF_UNENTERED: case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: case BLACKHOLE: + case BLACKHOLE_BQ: /* not evaluated yet */ break; +#if defined(PAR) + /* a copy of the top-level cases below */ + case RBH: // cf. BLACKHOLE_BQ + { + //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); + to = copy(q,BLACKHOLE_sizeW(),stp); + //ToDo: derive size etc from reverted IP + //to = copy(q,size,stp); + // recordMutable((StgMutClosure *)to); + return to; + } + + case BLOCKED_FETCH: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); + to = copy(q,sizeofW(StgBlockedFetch),stp); + return to; + +# ifdef DIST + case REMOTE_REF: +# endif + case FETCH_ME: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMe),stp); + return to; + + case FETCH_ME_BQ: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); + return to; +#endif + default: - barf("evacuate: THUNK_SELECTOR: strange selectee"); + barf("evacuate: THUNK_SELECTOR: strange selectee %d", + (int)(selectee_info->type)); } } - to = copy(q,THUNK_SELECTOR_sizeW(),bd); - upd_evacuee(q,to); - return to; + return copy(q,THUNK_SELECTOR_sizeW(),stp); case IND: case IND_OLDGEN: @@ -885,30 +1569,41 @@ loop: q = ((StgInd*)q)->indirectee; goto loop; - /* ToDo: optimise STATIC_LINK for known cases. - - FUN_STATIC : payload[0] - - THUNK_STATIC : payload[1] - - IND_STATIC : payload[1] - */ case THUNK_STATIC: + if (info->srt_len > 0 && major_gc && + THUNK_STATIC_LINK((StgClosure *)q) == NULL) { + THUNK_STATIC_LINK((StgClosure *)q) = static_objects; + static_objects = (StgClosure *)q; + } + return q; + case FUN_STATIC: - if (info->srt_len == 0) { /* small optimisation */ - return q; + if (info->srt_len > 0 && major_gc && + FUN_STATIC_LINK((StgClosure *)q) == NULL) { + FUN_STATIC_LINK((StgClosure *)q) = static_objects; + static_objects = (StgClosure *)q; } - /* fall through */ - case CONSTR_STATIC: + return q; + case IND_STATIC: - /* don't want to evacuate these, but we do want to follow pointers - * from SRTs - see scavenge_static. + /* If q->saved_info != NULL, then it's a revertible CAF - it'll be + * on the CAF list, so don't do anything with it here (we'll + * scavenge it later). */ + if (major_gc + && ((StgIndStatic *)q)->saved_info == NULL + && IND_STATIC_LINK((StgClosure *)q) == NULL) { + IND_STATIC_LINK((StgClosure *)q) = static_objects; + static_objects = (StgClosure *)q; + } + return q; - /* put the object on the static list, if necessary. - */ + case CONSTR_STATIC: if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) { STATIC_LINK(info,(StgClosure *)q) = static_objects; static_objects = (StgClosure *)q; } - /* fall through */ + return q; case CONSTR_INTLIKE: case CONSTR_CHARLIKE: @@ -929,124 +1624,158 @@ loop: case CATCH_FRAME: case SEQ_FRAME: /* shouldn't see these */ - barf("evacuate: stack frame\n"); + barf("evacuate: stack frame at %p\n", q); case AP_UPD: case PAP: - /* these are special - the payload is a copy of a chunk of stack, - tagging and all. */ - to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd); - upd_evacuee(q,to); - return to; + /* PAPs and AP_UPDs are special - the payload is a copy of a chunk + * of stack, tagging and all. + * + * They can be larger than a block in size. Both are only + * allocated via allocate(), so they should be chained on to the + * large_object list. + */ + { + nat size = pap_sizeW((StgPAP*)q); + if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { + evacuate_large((P_)q, rtsFalse); + return q; + } else { + return copy(q,size,stp); + } + } case EVACUATED: /* Already evacuated, just return the forwarding address. * HOWEVER: if the requested destination generation (evac_gen) is * older than the actual generation (because the object was * already evacuated to a younger generation) then we have to - * re-evacuate it, replacing the old evacuated copy with an - * indirection to the new copy. + * set the failed_to_evac flag to indicate that we couldn't + * manage to promote the object to the desired generation. */ if (evac_gen > 0) { /* optimisation */ StgClosure *p = ((StgEvacuated*)q)->evacuee; - if (Bdescr((P_)p)->gen->no >= evac_gen) { - return p; - } else { - nat padding_wds = sizeW_fromITBL(get_itbl(p)) - sizeofW(StgInd); - StgClosure *new_p = evacuate(p); /* naughty recursive call */ - IF_DEBUG(gc, fprintf(stderr,"ouch! double evacuation\n")); - ((StgEvacuated*)q)->evacuee = new_p; - p->header.info = &IND_info; - memset((P_)p + sizeofW(StgInd), 0, padding_wds * sizeof(W_)); - return new_p; + if (Bdescr((P_)p)->gen->no < evac_gen) { + IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p)); + failed_to_evac = rtsTrue; + TICK_GC_FAILED_PROMOTION(); } } return ((StgEvacuated*)q)->evacuee; - case MUT_ARR_WORDS: case ARR_WORDS: - case ARR_PTRS: { - nat size = arr_words_sizeW(stgCast(StgArrWords*,q)); + nat size = arr_words_sizeW((StgArrWords *)q); if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q); + evacuate_large((P_)q, rtsFalse); return q; } else { /* just copy the block */ - to = copy(q,size,bd); - upd_evacuee(q,to); - return to; + return copy(q,size,stp); } } case MUT_ARR_PTRS: case MUT_ARR_PTRS_FROZEN: { - nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q)); + nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q); if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q); + evacuate_large((P_)q, info->type == MUT_ARR_PTRS); to = q; } else { /* just copy the block */ - to = copy(q,size,bd); - upd_evacuee(q,to); - } - if (info->type == MUT_ARR_PTRS) { - evacuate_mutable((StgMutClosure *)to); + to = copy(q,size,stp); + if (info->type == MUT_ARR_PTRS) { + recordMutable((StgMutClosure *)to); + } } return to; } case TSO: { - StgTSO *tso = stgCast(StgTSO *,q); + StgTSO *tso = (StgTSO *)q; nat size = tso_sizeW(tso); int diff; + /* Deal with redirected TSOs (a TSO that's had its stack enlarged). + */ + if (tso->what_next == ThreadRelocated) { + q = (StgClosure *)tso->link; + goto loop; + } + /* Large TSOs don't get moved, so no relocation is required. */ if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - evacuate_large((P_)q); - tso->mut_link = NULL; /* see below */ + evacuate_large((P_)q, rtsTrue); return q; /* To evacuate a small TSO, we need to relocate the update frame * list it contains. */ } else { - StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd); + StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp); diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */ /* relocate the stack pointers... */ new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff); new_tso->sp = (StgPtr)new_tso->sp + diff; - new_tso->splim = (StgPtr)new_tso->splim + diff; relocate_TSO(tso, new_tso); - upd_evacuee(q,(StgClosure *)new_tso); - - /* don't evac_mutable - these things are marked mutable as - * required. We *do* need to zero the mut_link field, though: - * this TSO might have been on the mutable list for this - * generation, but we're collecting this generation anyway so - * we didn't follow the mutable list. - */ - new_tso->mut_link = NULL; + recordMutable((StgMutClosure *)new_tso); return (StgClosure *)new_tso; } } +#if defined(PAR) + case RBH: // cf. BLACKHOLE_BQ + { + //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); + to = copy(q,BLACKHOLE_sizeW(),stp); + //ToDo: derive size etc from reverted IP + //to = copy(q,size,stp); + recordMutable((StgMutClosure *)to); + IF_DEBUG(gc, + belch("@@ evacuate: RBH %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; + } + case BLOCKED_FETCH: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); + to = copy(q,sizeofW(StgBlockedFetch),stp); + IF_DEBUG(gc, + belch("@@ evacuate: %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; + +# ifdef DIST + case REMOTE_REF: +# endif case FETCH_ME: - fprintf(stderr,"evacuate: unimplemented/strange closure type\n"); - return q; + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMe),stp); + IF_DEBUG(gc, + belch("@@ evacuate: %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; + + case FETCH_ME_BQ: + ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); + IF_DEBUG(gc, + belch("@@ evacuate: %p (%s) to %p (%s)", + q, info_type(q), to, info_type(to))); + return to; +#endif default: - barf("evacuate: strange closure type"); + barf("evacuate: strange closure type %d", (int)(info->type)); } barf("evacuate"); @@ -1056,6 +1785,7 @@ loop: relocate_TSO is called just after a TSO has been copied from src to dest. It adjusts the update frame list for the new location. -------------------------------------------------------------------------- */ +//@cindex relocate_TSO StgTSO * relocate_TSO(StgTSO *src, StgTSO *dest) @@ -1096,7 +1826,7 @@ relocate_TSO(StgTSO *src, StgTSO *dest) break; default: - barf("relocate_TSO"); + barf("relocate_TSO %d", (int)(get_itbl(su)->type)); } break; } @@ -1104,6 +1834,11 @@ relocate_TSO(StgTSO *src, StgTSO *dest) return dest; } +//@node Scavenging, Reverting CAFs, Evacuation +//@subsection Scavenging + +//@cindex scavenge_srt + static inline void scavenge_srt(const StgInfoTable *info) { @@ -1113,52 +1848,109 @@ scavenge_srt(const StgInfoTable *info) * srt field in the info table. That's ok, because we'll * never dereference it. */ - srt = stgCast(StgClosure **,info->srt); + srt = (StgClosure **)(info->srt); srt_end = srt + info->srt_len; for (; srt < srt_end; srt++) { - evacuate(*srt); + /* Special-case to handle references to closures hiding out in DLLs, since + double indirections required to get at those. The code generator knows + which is which when generating the SRT, so it stores the (indirect) + reference to the DLL closure in the table by first adding one to it. + We check for this here, and undo the addition before evacuating it. + + If the SRT entry hasn't got bit 0 set, the SRT entry points to a + closure that's fixed at link-time, and no extra magic is required. + */ +#ifdef ENABLE_WIN32_DLL_SUPPORT + if ( (unsigned long)(*srt) & 0x1 ) { + evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1))); + } else { + evacuate(*srt); + } +#else + evacuate(*srt); +#endif } } +/* ----------------------------------------------------------------------------- + Scavenge a TSO. + -------------------------------------------------------------------------- */ + static void -scavenge(step *step) +scavengeTSO (StgTSO *tso) { - StgPtr p; + /* chase the link field for any TSOs on the same queue */ + (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnException +#if defined(PAR) + || tso->why_blocked == BlockedOnGA + || tso->why_blocked == BlockedOnGA_NoSend +#endif + ) { + tso->block_info.closure = evacuate(tso->block_info.closure); + } + if ( tso->blocked_exceptions != NULL ) { + tso->blocked_exceptions = + (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); + } + /* scavenge this thread's stack */ + scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); +} + +/* ----------------------------------------------------------------------------- + Scavenge a given step until there are no more objects in this step + to scavenge. + + evac_gen is set by the caller to be either zero (for a step in a + generation < N) or G where G is the generation of the step being + scavenged. + + We sometimes temporarily change evac_gen back to zero if we're + scavenging a mutable object where early promotion isn't such a good + idea. + -------------------------------------------------------------------------- */ +//@cindex scavenge + +static void +scavenge(step *stp) +{ + StgPtr p, q; const StgInfoTable *info; bdescr *bd; + nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */ - p = step->scan; - bd = step->scan_bd; + p = stp->scan; + bd = stp->scan_bd; + + failed_to_evac = rtsFalse; /* scavenge phase - standard breadth-first scavenging of the * evacuated objects */ - while (bd != step->hp_bd || p < step->hp) { + while (bd != stp->hp_bd || p < stp->hp) { /* If we're at the end of this block, move on to the next block */ - if (bd != step->hp_bd && p == bd->free) { + if (bd != stp->hp_bd && p == bd->free) { bd = bd->link; p = bd->start; continue; } + q = p; /* save ptr to object */ + ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p)) || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p)))); info = get_itbl((StgClosure *)p); - switch (info -> type) { + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); + */ - case BCO: - { - StgBCO* bco = stgCast(StgBCO*,p); - nat i; - for (i = 0; i < bco->n_ptrs; i++) { - bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i)); - } - p += bco_sizeW(bco); - continue; - } + switch (info -> type) { case MVAR: /* treat MVars specially, because we don't want to evacuate the @@ -1166,13 +1958,63 @@ scavenge(step *step) */ { StgMVar *mvar = ((StgMVar *)p); + evac_gen = 0; (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); p += sizeofW(StgMVar); - continue; + evac_gen = saved_evac_gen; + break; } + case THUNK_2_0: + case FUN_2_0: + scavenge_srt(info); + case CONSTR_2_0: + ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_0: + scavenge_srt(info); + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */ + break; + + case FUN_1_0: + scavenge_srt(info); + case CONSTR_1_0: + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_0_1: + scavenge_srt(info); + p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */ + break; + + case FUN_0_1: + scavenge_srt(info); + case CONSTR_0_1: + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_0_2: + case FUN_0_2: + scavenge_srt(info); + case CONSTR_0_2: + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_1: + case FUN_1_1: + scavenge_srt(info); + case CONSTR_1_1: + ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + case FUN: case THUNK: scavenge_srt(info); @@ -1181,11 +2023,8 @@ scavenge(step *step) case CONSTR: case WEAK: case FOREIGN: - case MUT_VAR: - case IND_PERM: - case IND_OLDGEN_PERM: - case CAF_UNENTERED: - case CAF_ENTERED: + case STABLE_NAME: + case BCO: { StgPtr end; @@ -1194,17 +2033,52 @@ scavenge(step *step) (StgClosure *)*p = evacuate((StgClosure *)*p); } p += info->layout.payload.nptrs; - continue; + break; + } + + case IND_PERM: + if (stp->gen->no != 0) { + SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); + } + /* fall through */ + case IND_OLDGEN_PERM: + ((StgIndOldGen *)p)->indirectee = + evacuate(((StgIndOldGen *)p)->indirectee); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordOldToNewPtrs((StgMutClosure *)p); } + p += sizeofW(StgIndOldGen); + break; + + case MUT_VAR: + /* ignore MUT_CONSs */ + if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) { + evac_gen = 0; + ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); + evac_gen = saved_evac_gen; + } + p += sizeofW(StgMutVar); + break; case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: case BLACKHOLE: + p += BLACKHOLE_sizeW(); + break; + + case BLACKHOLE_BQ: { - StgBlackHole *bh = (StgBlackHole *)p; + StgBlockingQueue *bh = (StgBlockingQueue *)p; (StgClosure *)bh->blocking_queue = evacuate((StgClosure *)bh->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)bh); + } p += BLACKHOLE_sizeW(); - continue; + break; } case THUNK_SELECTOR: @@ -1212,7 +2086,7 @@ scavenge(step *step) StgSelector *s = (StgSelector *)p; s->selectee = evacuate(s->selectee); p += THUNK_SELECTOR_sizeW(); - continue; + break; } case IND: @@ -1248,112 +2122,420 @@ scavenge(step *step) * evacuate the function pointer too... */ { - StgPAP* pap = stgCast(StgPAP*,p); + StgPAP* pap = (StgPAP *)p; pap->fun = evacuate(pap->fun); scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); p += pap_sizeW(pap); - continue; + break; } case ARR_WORDS: - case MUT_ARR_WORDS: /* nothing to follow */ - p += arr_words_sizeW(stgCast(StgArrWords*,p)); - continue; + p += arr_words_sizeW((StgArrWords *)p); + break; + + case MUT_ARR_PTRS: + /* follow everything */ + { + StgPtr next; + + evac_gen = 0; /* repeatedly mutable */ + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + evac_gen = saved_evac_gen; + break; + } + + case MUT_ARR_PTRS_FROZEN: + /* follow everything */ + { + StgPtr start = p, next; + + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + if (failed_to_evac) { + /* we can do this easier... */ + recordMutable((StgMutClosure *)start); + failed_to_evac = rtsFalse; + } + break; + } + + case TSO: + { + StgTSO *tso = (StgTSO *)p; + evac_gen = 0; + scavengeTSO(tso); + evac_gen = saved_evac_gen; + p += tso_sizeW(tso); + break; + } + +#if defined(PAR) + case RBH: // cf. BLACKHOLE_BQ + { + // nat size, ptrs, nonptrs, vhs; + // char str[80]; + // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); + StgRBH *rbh = (StgRBH *)p; + (StgClosure *)rbh->blocking_queue = + evacuate((StgClosure *)rbh->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)rbh); + } + IF_DEBUG(gc, + belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + p, info_type(p), (StgClosure *)rbh->blocking_queue)); + // ToDo: use size of reverted closure here! + p += BLACKHOLE_sizeW(); + break; + } + + case BLOCKED_FETCH: + { + StgBlockedFetch *bf = (StgBlockedFetch *)p; + /* follow the pointer to the node which is being demanded */ + (StgClosure *)bf->node = + evacuate((StgClosure *)bf->node); + /* follow the link to the rest of the blocking queue */ + (StgClosure *)bf->link = + evacuate((StgClosure *)bf->link); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)bf); + } + IF_DEBUG(gc, + belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + bf, info_type((StgClosure *)bf), + bf->node, info_type(bf->node))); + p += sizeofW(StgBlockedFetch); + break; + } + +#ifdef DIST + case REMOTE_REF: +#endif + case FETCH_ME: + p += sizeofW(StgFetchMe); + break; // nothing to do in this case + + case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + { + StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; + (StgClosure *)fmbq->blocking_queue = + evacuate((StgClosure *)fmbq->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)fmbq); + } + IF_DEBUG(gc, + belch("@@ scavenge: %p (%s) exciting, isn't it", + p, info_type((StgClosure *)p))); + p += sizeofW(StgFetchMeBlockingQueue); + break; + } +#endif + + case EVACUATED: + barf("scavenge: unimplemented/strange closure type %d @ %p", + info->type, p); + + default: + barf("scavenge: unimplemented/strange closure type %d @ %p", + info->type, p); + } + + /* If we didn't manage to promote all the objects pointed to by + * the current object, then we have to designate this object as + * mutable (because it contains old-to-new generation pointers). + */ + if (failed_to_evac) { + mkMutCons((StgClosure *)q, &generations[evac_gen]); + failed_to_evac = rtsFalse; + } + } + + stp->scan_bd = bd; + stp->scan = p; +} + +/* ----------------------------------------------------------------------------- + Scavenge one object. + + This is used for objects that are temporarily marked as mutable + because they contain old-to-new generation pointers. Only certain + objects can have this property. + -------------------------------------------------------------------------- */ +//@cindex scavenge_one + +static rtsBool +scavenge_one(StgClosure *p) +{ + const StgInfoTable *info; + rtsBool no_luck; + + ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) + || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); + + info = get_itbl(p); + + /* ngoq moHqu'! + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ + + switch (info -> type) { + + case FUN: + case FUN_1_0: /* hardly worth specialising these guys */ + case FUN_0_1: + case FUN_1_1: + case FUN_0_2: + case FUN_2_0: + case THUNK: + case THUNK_1_0: + case THUNK_0_1: + case THUNK_1_1: + case THUNK_0_2: + case THUNK_2_0: + case CONSTR: + case CONSTR_1_0: + case CONSTR_0_1: + case CONSTR_1_1: + case CONSTR_0_2: + case CONSTR_2_0: + case WEAK: + case FOREIGN: + case IND_PERM: + case IND_OLDGEN_PERM: + { + StgPtr q, end; + + end = (P_)p->payload + info->layout.payload.ptrs; + for (q = (P_)p->payload; q < end; q++) { + (StgClosure *)*q = evacuate((StgClosure *)*q); + } + break; + } + + case CAF_BLACKHOLE: + case SE_CAF_BLACKHOLE: + case SE_BLACKHOLE: + case BLACKHOLE: + break; + + case THUNK_SELECTOR: + { + StgSelector *s = (StgSelector *)p; + s->selectee = evacuate(s->selectee); + break; + } + + case AP_UPD: /* same as PAPs */ + case PAP: + /* Treat a PAP just like a section of stack, not forgetting to + * evacuate the function pointer too... + */ + { + StgPAP* pap = (StgPAP *)p; + + pap->fun = evacuate(pap->fun); + scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + break; + } + + case IND_OLDGEN: + /* This might happen if for instance a MUT_CONS was pointing to a + * THUNK which has since been updated. The IND_OLDGEN will + * be on the mutable list anyway, so we don't need to do anything + * here. + */ + break; + + default: + barf("scavenge_one: strange object %d", (int)(info->type)); + } + + no_luck = failed_to_evac; + failed_to_evac = rtsFalse; + return (no_luck); +} + + +/* ----------------------------------------------------------------------------- + Scavenging mutable lists. + + We treat the mutable list of each generation > N (i.e. all the + generations older than the one being collected) as roots. We also + remove non-mutable objects from the mutable list at this point. + -------------------------------------------------------------------------- */ +//@cindex scavenge_mut_once_list - case ARR_PTRS: - /* follow everything */ - { - StgPtr next; +static void +scavenge_mut_once_list(generation *gen) +{ + const StgInfoTable *info; + StgMutClosure *p, *next, *new_list; - next = p + arr_ptrs_sizeW(stgCast(StgArrPtrs*,p)); - for (p = (P_)((StgArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - continue; - } + p = gen->mut_once_list; + new_list = END_MUT_LIST; + next = p->mut_link; - case MUT_ARR_PTRS: - case MUT_ARR_PTRS_FROZEN: - /* follow everything */ - { - StgPtr next; + evac_gen = gen->no; + failed_to_evac = rtsFalse; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); - } - continue; - } + for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - case TSO: + /* make sure the info pointer is into text space */ + ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) + || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); + + info = get_itbl(p); + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ + switch(info->type) { + + case IND_OLDGEN: + case IND_OLDGEN_PERM: + case IND_STATIC: + /* Try to pull the indirectee into this generation, so we can + * remove the indirection from the mutable list. + */ + ((StgIndOldGen *)p)->indirectee = + evacuate(((StgIndOldGen *)p)->indirectee); + +#ifdef DEBUG + if (RtsFlags.DebugFlags.gc) + /* Debugging code to print out the size of the thing we just + * promoted + */ { - StgTSO *tso; - - tso = (StgTSO *)p; - /* chase the link field for any TSOs on the same queue */ - (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); - /* scavenge this thread's stack */ - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); - p += tso_sizeW(tso); - continue; + StgPtr start = gen->steps[0].scan; + bdescr *start_bd = gen->steps[0].scan_bd; + nat size = 0; + scavenge(&gen->steps[0]); + if (start_bd != gen->steps[0].scan_bd) { + size += (P_)BLOCK_ROUND_UP(start) - start; + start_bd = start_bd->link; + while (start_bd != gen->steps[0].scan_bd) { + size += BLOCK_SIZE_W; + start_bd = start_bd->link; + } + size += gen->steps[0].scan - + (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan); + } else { + size = gen->steps[0].scan - start; + } + fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_)); } +#endif - case BLOCKED_FETCH: - case FETCH_ME: - case EVACUATED: - barf("scavenge: unimplemented/strange closure type\n"); - + /* failed_to_evac might happen if we've got more than two + * generations, we're collecting only generation 0, the + * indirection resides in generation 2 and the indirectee is + * in generation 1. + */ + if (failed_to_evac) { + failed_to_evac = rtsFalse; + p->mut_link = new_list; + new_list = p; + } else { + /* the mut_link field of an IND_STATIC is overloaded as the + * static link field too (it just so happens that we don't need + * both at the same time), so we need to NULL it out when + * removing this object from the mutable list because the static + * link fields are all assumed to be NULL before doing a major + * collection. + */ + p->mut_link = NULL; + } + continue; + + case MUT_VAR: + /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove + * it from the mutable list if possible by promoting whatever it + * points to. + */ + ASSERT(p->header.info == &stg_MUT_CONS_info); + if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) { + /* didn't manage to promote everything, so put the + * MUT_CONS back on the list. + */ + p->mut_link = new_list; + new_list = p; + } + continue; + default: - barf("scavenge"); + /* shouldn't have anything else on the mutables list */ + barf("scavenge_mut_once_list: strange object? %d", (int)(info->type)); } } - step->scan_bd = bd; - step->scan = p; -} - -/* ----------------------------------------------------------------------------- - Scavenging mutable lists. + gen->mut_once_list = new_list; +} - We treat the mutable list of each generation > N (i.e. all the - generations older than the one being collected) as roots. We also - remove non-mutable objects from the mutable list at this point. - -------------------------------------------------------------------------- */ +//@cindex scavenge_mutable_list -static StgMutClosure * -scavenge_mutable_list(StgMutClosure *p, nat gen) +static void +scavenge_mutable_list(generation *gen) { - StgInfoTable *info; - StgMutClosure *start; - StgMutClosure **prev; + const StgInfoTable *info; + StgMutClosure *p, *next; - evac_gen = 0; + p = gen->saved_mut_list; + next = p->mut_link; - prev = &start; - start = p; + evac_gen = 0; + failed_to_evac = rtsFalse; - for (; p != END_MUT_LIST; p = *prev) { + for (; p != END_MUT_LIST; p = next, next = p->mut_link) { /* make sure the info pointer is into text space */ ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); info = get_itbl(p); + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ switch(info->type) { case MUT_ARR_PTRS_FROZEN: /* remove this guy from the mutable list, but follow the ptrs - * anyway. + * anyway (and make sure they get promoted to this gen). */ - *prev = p->mut_link; - goto do_array; + { + StgPtr end, q; + + end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + evac_gen = gen->no; + for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { + (StgClosure *)*q = evacuate((StgClosure *)*q); + } + evac_gen = 0; + + if (failed_to_evac) { + failed_to_evac = rtsFalse; + p->mut_link = gen->mut_list; + gen->mut_list = p; + } + continue; + } case MUT_ARR_PTRS: /* follow everything */ - prev = &p->mut_link; - do_array: + p->mut_link = gen->mut_list; + gen->mut_list = p; { StgPtr end, q; @@ -1365,56 +2547,141 @@ scavenge_mutable_list(StgMutClosure *p, nat gen) } case MUT_VAR: + /* MUT_CONS is a kind of MUT_VAR, except that we try to remove + * it from the mutable list if possible by promoting whatever it + * points to. + */ + ASSERT(p->header.info != &stg_MUT_CONS_info); ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - prev = &p->mut_link; + p->mut_link = gen->mut_list; + gen->mut_list = p; continue; + case MVAR: + { + StgMVar *mvar = (StgMVar *)p; + (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); + (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); + (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); + p->mut_link = gen->mut_list; + gen->mut_list = p; + continue; + } + case TSO: - /* follow ptrs and remove this from the mutable list */ { StgTSO *tso = (StgTSO *)p; - /* Don't bother scavenging if this thread is dead - */ - if (!(tso->whatNext == ThreadComplete || - tso->whatNext == ThreadKilled)) { - /* Don't need to chase the link field for any TSOs on the - * same queue. Just scavenge this thread's stack - */ - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); - } + scavengeTSO(tso); /* Don't take this TSO off the mutable list - it might still * point to some younger objects (because we set evac_gen to 0 * above). */ - prev = &tso->mut_link; + tso->mut_link = gen->mut_list; + gen->mut_list = (StgMutClosure *)tso; continue; } + case BLACKHOLE_BQ: + { + StgBlockingQueue *bh = (StgBlockingQueue *)p; + (StgClosure *)bh->blocking_queue = + evacuate((StgClosure *)bh->blocking_queue); + p->mut_link = gen->mut_list; + gen->mut_list = p; + continue; + } + + /* Happens if a BLACKHOLE_BQ in the old generation is updated: + */ case IND_OLDGEN: case IND_OLDGEN_PERM: - case IND_STATIC: - /* Remove these from the mutable list - we can be sure that the - * objects they point to now reside in this generation because - * we set evac_gen here -> + /* Try to pull the indirectee into this generation, so we can + * remove the indirection from the mutable list. */ - evac_gen = gen; + evac_gen = gen->no; ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); + evacuate(((StgIndOldGen *)p)->indirectee); evac_gen = 0; - *prev = p->mut_link; - p->mut_link = NULL; /* paranoia? */ + + if (failed_to_evac) { + failed_to_evac = rtsFalse; + p->mut_link = gen->mut_once_list; + gen->mut_once_list = p; + } else { + p->mut_link = NULL; + } continue; - + +#if defined(PAR) + // HWL: check whether all of these are necessary + + case RBH: // cf. BLACKHOLE_BQ + { + // nat size, ptrs, nonptrs, vhs; + // char str[80]; + // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); + StgRBH *rbh = (StgRBH *)p; + (StgClosure *)rbh->blocking_queue = + evacuate((StgClosure *)rbh->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)rbh); + } + // ToDo: use size of reverted closure here! + p += BLACKHOLE_sizeW(); + break; + } + + case BLOCKED_FETCH: + { + StgBlockedFetch *bf = (StgBlockedFetch *)p; + /* follow the pointer to the node which is being demanded */ + (StgClosure *)bf->node = + evacuate((StgClosure *)bf->node); + /* follow the link to the rest of the blocking queue */ + (StgClosure *)bf->link = + evacuate((StgClosure *)bf->link); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)bf); + } + p += sizeofW(StgBlockedFetch); + break; + } + +#ifdef DIST + case REMOTE_REF: + barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type)); +#endif + case FETCH_ME: + p += sizeofW(StgFetchMe); + break; // nothing to do in this case + + case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + { + StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; + (StgClosure *)fmbq->blocking_queue = + evacuate((StgClosure *)fmbq->blocking_queue); + if (failed_to_evac) { + failed_to_evac = rtsFalse; + recordMutable((StgMutClosure *)fmbq); + } + p += sizeofW(StgFetchMeBlockingQueue); + break; + } +#endif + default: /* shouldn't have anything else on the mutables list */ - barf("scavenge_mutable_object: non-mutable object?"); + barf("scavenge_mutable_list: strange object? %d", (int)(info->type)); } } - return start; } +//@cindex scavenge_static + static void scavenge_static(void) { @@ -1430,7 +2697,10 @@ scavenge_static(void) while (p != END_OF_STATIC_LIST) { info = get_itbl(p); - + /* + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + */ /* make sure the info pointer is into text space */ ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p)) || IS_HUGS_CONSTR_INFO(GET_INFO(p)))); @@ -1448,13 +2718,25 @@ scavenge_static(void) { StgInd *ind = (StgInd *)p; ind->indirectee = evacuate(ind->indirectee); + + /* might fail to evacuate it, in which case we have to pop it + * back on the mutable list (and take it off the + * scavenged_static list because the static link and mut link + * pointers are one and the same). + */ + if (failed_to_evac) { + failed_to_evac = rtsFalse; + scavenged_static_objects = STATIC_LINK(info,p); + ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list; + oldest_gen->mut_once_list = (StgMutClosure *)ind; + } break; } case THUNK_STATIC: case FUN_STATIC: scavenge_srt(info); - /* fall through */ + break; case CONSTR_STATIC: { @@ -1469,10 +2751,12 @@ scavenge_static(void) } default: - barf("scavenge_static"); + barf("scavenge_static: strange closure %d", (int)(info->type)); } - /* get the next static object from the list. Remeber, there might + ASSERT(failed_to_evac == rtsFalse); + + /* get the next static object from the list. Remember, there might * be more stuff on this list now that we've done some evacuating! * (static_objects is a global) */ @@ -1485,13 +2769,16 @@ scavenge_static(void) objects pointed to by it. We can use the same code for walking PAPs, since these are just sections of copied stack. -------------------------------------------------------------------------- */ +//@cindex scavenge_stack static void scavenge_stack(StgPtr p, StgPtr stack_end) { StgPtr q; const StgInfoTable* info; - StgNat32 bitmap; + StgWord32 bitmap; + + //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end)); /* * Each time around this loop, we are looking at a chunk of stack @@ -1500,24 +2787,22 @@ scavenge_stack(StgPtr p, StgPtr stack_end) */ while (p < stack_end) { - q = *stgCast(StgPtr*,p); + q = *(P_ *)p; /* If we've got a tag, skip over that many words on the stack */ - if (IS_ARG_TAG(stgCast(StgWord,q))) { + if (IS_ARG_TAG((W_)q)) { p += ARG_SIZE(q); p++; continue; } /* Is q a pointer to a closure? */ - if (! LOOKS_LIKE_GHC_INFO(q)) { - + if (! LOOKS_LIKE_GHC_INFO(q) ) { #ifdef DEBUG - if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */ - ASSERT(closure_STATIC(stgCast(StgClosure*,q))); - } - /* otherwise, must be a pointer into the allocation space. - */ + if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */ + ASSERT(closure_STATIC((StgClosure *)q)); + } + /* otherwise, must be a pointer into the allocation space. */ #endif (StgClosure *)*p = evacuate((StgClosure *)q); @@ -1530,31 +2815,41 @@ scavenge_stack(StgPtr p, StgPtr stack_end) * record. All activation records have 'bitmap' style layout * info. */ - info = get_itbl(stgCast(StgClosure*,p)); + info = get_itbl((StgClosure *)p); switch (info->type) { /* Dynamic bitmap: the mask is stored on the stack */ case RET_DYN: - bitmap = stgCast(StgRetDyn*,p)->liveness; - p = &payloadWord(stgCast(StgRetDyn*,p),0); + bitmap = ((StgRetDyn *)p)->liveness; + p = (P_)&((StgRetDyn *)p)->payload[0]; goto small_bitmap; /* probably a slow-entry point return address: */ case FUN: case FUN_STATIC: - p++; + { +#if 0 + StgPtr old_p = p; + p++; p++; + IF_DEBUG(sanity, + belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)", + old_p, p, old_p+1)); +#else + p++; /* what if FHS!=1 !? -- HWL */ +#endif goto follow_srt; + } /* Specialised code for update frames, since they're so common. - * We *know* the updatee points to a BLACKHOLE or CAF_BLACKHOLE, - * so just inline the code to evacuate it here. + * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE, + * or BLACKHOLE_BQ, so just inline the code to evacuate it here. */ case UPDATE_FRAME: { StgUpdateFrame *frame = (StgUpdateFrame *)p; StgClosure *to; - StgClosureType type = get_itbl(frame->updatee)->type; + nat type = get_itbl(frame->updatee)->type; p += sizeofW(StgUpdateFrame); if (type == EVACUATED) { @@ -1562,24 +2857,52 @@ scavenge_stack(StgPtr p, StgPtr stack_end) continue; } else { bdescr *bd = Bdescr((P_)frame->updatee); - ASSERT(type == BLACKHOLE || type == CAF_BLACKHOLE); - if (bd->gen->no >= evac_gen && bd->gen->no > N) { continue; } - to = copy(frame->updatee, BLACKHOLE_sizeW(), bd); - upd_evacuee(frame->updatee,to); - frame->updatee = to; - continue; + step *stp; + if (bd->gen->no > N) { + if (bd->gen->no < evac_gen) { + failed_to_evac = rtsTrue; + } + continue; + } + + /* Don't promote blackholes */ + stp = bd->step; + if (!(stp->gen->no == 0 && + stp->no != 0 && + stp->no == stp->gen->n_steps-1)) { + stp = stp->to; + } + + switch (type) { + case BLACKHOLE: + case CAF_BLACKHOLE: + to = copyPart(frame->updatee, BLACKHOLE_sizeW(), + sizeofW(StgHeader), stp); + frame->updatee = to; + continue; + case BLACKHOLE_BQ: + to = copy(frame->updatee, BLACKHOLE_sizeW(), stp); + frame->updatee = to; + recordMutable((StgMutClosure *)to); + continue; + default: + /* will never be SE_{,CAF_}BLACKHOLE, since we + don't push an update frame for single-entry thunks. KSW 1999-01. */ + barf("scavenge_stack: UPDATE_FRAME updatee"); + } } } /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */ - case RET_BCO: - case RET_SMALL: - case RET_VEC_SMALL: case STOP_FRAME: case CATCH_FRAME: case SEQ_FRAME: + case RET_BCO: + case RET_SMALL: + case RET_VEC_SMALL: bitmap = info->layout.bitmap; p++; + /* this assumes that the payload starts immediately after the info-ptr */ small_bitmap: while (bitmap != 0) { if ((bitmap & 1) == 0) { @@ -1597,6 +2920,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) case RET_BIG: case RET_VEC_BIG: { + StgPtr q; StgLargeBitmap *large_bitmap; nat i; @@ -1605,6 +2929,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) for (i=0; isize; i++) { bitmap = large_bitmap->bitmap[i]; + q = p + sizeof(W_) * 8; while (bitmap != 0) { if ((bitmap & 1) == 0) { (StgClosure *)*p = evacuate((StgClosure *)*p); @@ -1612,6 +2937,12 @@ scavenge_stack(StgPtr p, StgPtr stack_end) p++; bitmap = bitmap >> 1; } + if (i+1 < large_bitmap->size) { + while (p < q) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + p++; + } + } } /* and don't forget to follow the SRT */ @@ -1619,92 +2950,108 @@ scavenge_stack(StgPtr p, StgPtr stack_end) } default: - barf("scavenge_stack: weird activation record found on stack.\n"); + barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type)); } } -} +} /*----------------------------------------------------------------------------- scavenge the large object list. + + evac_gen set by caller; similar games played with evac_gen as with + scavenge() - see comment at the top of scavenge(). Most large + objects are (repeatedly) mutable, so most of the time evac_gen will + be zero. --------------------------------------------------------------------------- */ +//@cindex scavenge_large static void -scavenge_large(step *step) +scavenge_large(step *stp) { bdescr *bd; StgPtr p; const StgInfoTable* info; + nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */ - bd = step->new_large_objects; - evac_gen = step->gen->no; + evac_gen = 0; /* most objects are mutable */ + bd = stp->new_large_objects; - for (; bd != NULL; bd = step->new_large_objects) { + for (; bd != NULL; bd = stp->new_large_objects) { /* take this object *off* the large objects list and put it on * the scavenged large objects list. This is so that we can * treat new_large_objects as a stack and push new objects on * the front when evacuating. */ - step->new_large_objects = bd->link; - dbl_link_onto(bd, &step->scavenged_large_objects); - bd->evacuated = 0; /* ready for next GC */ + stp->new_large_objects = bd->link; + dbl_link_onto(bd, &stp->scavenged_large_objects); p = bd->start; - info = get_itbl(stgCast(StgClosure*,p)); + info = get_itbl((StgClosure *)p); switch (info->type) { /* only certain objects can be "large"... */ case ARR_WORDS: - case MUT_ARR_WORDS: /* nothing to follow */ continue; - case ARR_PTRS: case MUT_ARR_PTRS: - case MUT_ARR_PTRS_FROZEN: /* follow everything */ { StgPtr next; - next = p + arr_ptrs_sizeW(stgCast(StgArrPtrs*,p)); - for (p = (P_)((StgArrPtrs *)p)->payload; p < next; p++) { + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { (StgClosure *)*p = evacuate((StgClosure *)*p); } continue; } - case BCO: + case MUT_ARR_PTRS_FROZEN: + /* follow everything */ { - StgBCO* bco = stgCast(StgBCO*,p); - nat i; - for (i = 0; i < bco->n_ptrs; i++) { - bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i)); + StgPtr start = p, next; + + evac_gen = saved_evac_gen; /* not really mutable */ + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + (StgClosure *)*p = evacuate((StgClosure *)*p); + } + evac_gen = 0; + if (failed_to_evac) { + recordMutable((StgMutClosure *)start); } continue; } case TSO: + scavengeTSO((StgTSO *)p); + continue; + + case AP_UPD: + case PAP: { - StgTSO *tso; + StgPAP* pap = (StgPAP *)p; - tso = (StgTSO *)p; - /* chase the link field for any TSOs on the same queue */ - (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); - /* scavenge this thread's stack */ - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + evac_gen = saved_evac_gen; /* not really mutable */ + pap->fun = evacuate(pap->fun); + scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args); + evac_gen = 0; continue; } default: - barf("scavenge_large: unknown/strange object"); + barf("scavenge_large: unknown/strange object %d", (int)(info->type)); } } } +//@cindex zero_static_object_list + static void -zeroStaticObjectList(StgClosure* first_static) +zero_static_object_list(StgClosure* first_static) { StgClosure* p; StgClosure* link; @@ -1725,12 +3072,13 @@ zeroStaticObjectList(StgClosure* first_static) * It doesn't do any harm to zero all the mutable link fields on the * mutable list. */ + static void -zeroMutableList(StgMutClosure *first) +zero_mutable_list( StgMutClosure *first ) { StgMutClosure *next, *c; - for (c = first; c; c = next) { + for (c = first; c != END_MUT_LIST; c = next) { next = c->mut_link; c->mut_link = NULL; } @@ -1740,46 +3088,31 @@ zeroMutableList(StgMutClosure *first) Reverting CAFs -------------------------------------------------------------------------- */ -void RevertCAFs(void) +void +revertCAFs( void ) { - while (enteredCAFs != END_CAF_LIST) { - StgCAF* caf = enteredCAFs; - - enteredCAFs = caf->link; - ASSERT(get_itbl(caf)->type == CAF_ENTERED); - SET_INFO(caf,&CAF_UNENTERED_info); - caf->value = stgCast(StgClosure*,0xdeadbeef); - caf->link = stgCast(StgCAF*,0xdeadbeef); - } + StgIndStatic *c; + + for (c = (StgIndStatic *)caf_list; c != NULL; + c = (StgIndStatic *)c->static_link) + { + c->header.info = c->saved_info; + c->saved_info = NULL; + /* could, but not necessary: c->static_link = NULL; */ + } + caf_list = NULL; } -void revertDeadCAFs(void) +void +scavengeCAFs( void ) { - StgCAF* caf = enteredCAFs; - enteredCAFs = END_CAF_LIST; - while (caf != END_CAF_LIST) { - StgCAF* next = caf->link; - - switch(GET_INFO(caf)->type) { - case EVACUATED: - { - /* This object has been evacuated, it must be live. */ - StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee); - new->link = enteredCAFs; - enteredCAFs = new; - break; - } - case CAF_ENTERED: - { - SET_INFO(caf,&CAF_UNENTERED_info); - caf->value = stgCast(StgClosure*,0xdeadbeef); - caf->link = stgCast(StgCAF*,0xdeadbeef); - break; - } - default: - barf("revertDeadCAFs: enteredCAFs list corrupted"); - } - caf = next; + StgIndStatic *c; + + evac_gen = 0; + for (c = (StgIndStatic *)caf_list; c != NULL; + c = (StgIndStatic *)c->static_link) + { + c->indirectee = evacuate(c->indirectee); } } @@ -1796,6 +3129,8 @@ void revertDeadCAFs(void) -------------------------------------------------------------------------- */ #ifdef DEBUG +//@cindex gcCAFs + static void gcCAFs(void) { @@ -1817,7 +3152,7 @@ gcCAFs(void) if (STATIC_LINK(info,p) == NULL) { IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p)); /* black hole it */ - SET_INFO(p,&BLACKHOLE_info); + SET_INFO(p,&stg_BLACKHOLE_info); p = STATIC_LINK2(info,p); *pp = p; } @@ -1833,6 +3168,9 @@ gcCAFs(void) } #endif +//@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection +//@subsection Lazy black holing + /* ----------------------------------------------------------------------------- Lazy black holing. @@ -1840,12 +3178,13 @@ gcCAFs(void) some work, we have to run down the stack and black-hole all the closures referred to by update frames. -------------------------------------------------------------------------- */ +//@cindex threadLazyBlackHole static void threadLazyBlackHole(StgTSO *tso) { StgUpdateFrame *update_frame; - StgBlackHole *bh; + StgBlockingQueue *bh; StgPtr stack_end; stack_end = &tso->stack[tso->stack_size]; @@ -1855,32 +3194,36 @@ threadLazyBlackHole(StgTSO *tso) switch (get_itbl(update_frame)->type) { case CATCH_FRAME: - update_frame = stgCast(StgCatchFrame*,update_frame)->link; + update_frame = ((StgCatchFrame *)update_frame)->link; break; case UPDATE_FRAME: - bh = stgCast(StgBlackHole*,update_frame->updatee); + bh = (StgBlockingQueue *)update_frame->updatee; /* if the thunk is already blackholed, it means we've also * already blackholed the rest of the thunks on this stack, * so we can stop early. + * + * The blackhole made for a CAF is a CAF_BLACKHOLE, so they + * don't interfere with this optimisation. */ + if (bh->header.info == &stg_BLACKHOLE_info) { + return; + } - /* Don't for now: when we enter a CAF, we create a black hole on - * the heap and make the update frame point to it. Thus the - * above optimisation doesn't apply. - */ - if (bh->header.info != &BLACKHOLE_info - && bh->header.info != &CAF_BLACKHOLE_info) { - SET_INFO(bh,&BLACKHOLE_info); - bh->blocking_queue = END_TSO_QUEUE; + if (bh->header.info != &stg_BLACKHOLE_BQ_info && + bh->header.info != &stg_CAF_BLACKHOLE_info) { +#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) + fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); +#endif + SET_INFO(bh,&stg_BLACKHOLE_info); } update_frame = update_frame->link; break; case SEQ_FRAME: - update_frame = stgCast(StgSeqFrame*,update_frame)->link; + update_frame = ((StgSeqFrame *)update_frame)->link; break; case STOP_FRAME: @@ -1891,6 +3234,9 @@ threadLazyBlackHole(StgTSO *tso) } } +//@node Stack squeezing, Pausing a thread, Lazy black holing +//@subsection Stack squeezing + /* ----------------------------------------------------------------------------- * Stack squeezing * @@ -1898,6 +3244,7 @@ threadLazyBlackHole(StgTSO *tso) * lazy black holing here. * * -------------------------------------------------------------------------- */ +//@cindex threadSqueezeStack static void threadSqueezeStack(StgTSO *tso) @@ -1908,6 +3255,14 @@ threadSqueezeStack(StgTSO *tso) StgUpdateFrame *prev_frame; /* Temporally previous */ StgPtr bottom; rtsBool prev_was_update_frame; +#if DEBUG + StgUpdateFrame *top_frame; + nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0, + bhs=0, squeezes=0; + void printObj( StgClosure *obj ); // from Printer.c + + top_frame = tso->su; +#endif bottom = &(tso->stack[tso->stack_size]); frame = tso->su; @@ -1922,16 +3277,49 @@ threadSqueezeStack(StgTSO *tso) * added to the stack, rather than the way we see them in this * walk. (It makes the next loop less confusing.) * - * Could stop if we find an update frame pointing to a black hole, - * but see comment in threadLazyBlackHole(). + * Stop if we find an update frame pointing to a black hole + * (see comment in threadLazyBlackHole()). */ next_frame = NULL; - while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */ + /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */ + while ((P_)frame < bottom - sizeofW(StgStopFrame)) { prev_frame = frame->link; frame->link = next_frame; next_frame = frame; frame = prev_frame; +#if DEBUG + IF_DEBUG(sanity, + if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) { + printObj((StgClosure *)prev_frame); + barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n", + frame, prev_frame); + }) + switch (get_itbl(frame)->type) { + case UPDATE_FRAME: + upd_frames++; + if (frame->updatee->header.info == &stg_BLACKHOLE_info) + bhs++; + break; + case STOP_FRAME: + stop_frames++; + break; + case CATCH_FRAME: + catch_frames++; + break; + case SEQ_FRAME: + seq_frames++; + break; + default: + barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n", + frame, prev_frame); + printObj((StgClosure *)prev_frame); + } +#endif + if (get_itbl(frame)->type == UPDATE_FRAME + && frame->updatee->header.info == &stg_BLACKHOLE_info) { + break; + } } /* Now, we're at the bottom. Frame points to the lowest update @@ -1977,8 +3365,9 @@ threadSqueezeStack(StgTSO *tso) StgClosure *updatee_keep = prev_frame->updatee; StgClosure *updatee_bypass = frame->updatee; -#if 0 /* DEBUG */ - fprintf(stderr, "squeezing frame at %p\n", frame); +#if DEBUG + IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame)); + squeezes++; #endif /* Deal with blocking queues. If both updatees have blocked @@ -1991,20 +3380,21 @@ threadSqueezeStack(StgTSO *tso) * slower --SDM */ #if 0 /* do it properly... */ - if (GET_INFO(updatee_bypass) == BLACKHOLE_info - || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info +# if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) +# error Unimplemented lazy BH warning. (KSW 1999-01) +# endif + if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info + || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info ) { /* Sigh. It has one. Don't lose those threads! */ - if (GET_INFO(updatee_keep) == BLACKHOLE_info - || GET_INFO(updatee_keep) == CAF_BLACKHOLE_info - ) { + if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) { /* Urgh. Two queues. Merge them. */ - P_ keep_tso = ((StgBlackHole *)updatee_keep)->blocking_queue; + P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue; while (keep_tso->link != END_TSO_QUEUE) { keep_tso = keep_tso->link; } - keep_tso->link = ((StgBlackHole *)updatee_bypass)->blocking_queue; + keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue; } else { /* For simplicity, just swap the BQ for the BH */ @@ -2020,7 +3410,21 @@ threadSqueezeStack(StgTSO *tso) #endif TICK_UPD_SQUEEZED(); - UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */ + /* wasn't there something about update squeezing and ticky to be + * sorted out? oh yes: we aren't counting each enter properly + * in this case. See the log somewhere. KSW 1999-04-21 + * + * Check two things: that the two update frames don't point to + * the same object, and that the updatee_bypass isn't already an + * indirection. Both of these cases only happen when we're in a + * block hole-style loop (and there are multiple update frames + * on the stack pointing to the same closure), but they can both + * screw us up if we don't check. + */ + if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) { + /* this wakes the threads up */ + UPD_IND_NOLOCK(updatee_bypass, updatee_keep); + } sp = (P_)frame - 1; /* sp = stuff to slide */ displacement += sizeofW(StgUpdateFrame); @@ -2032,12 +3436,32 @@ threadSqueezeStack(StgTSO *tso) /* Do lazy black-holing. */ if (is_update_frame) { - StgBlackHole *bh = (StgBlackHole *)frame->updatee; - if (bh->header.info != &BLACKHOLE_info - && bh->header.info != &CAF_BLACKHOLE_info - ) { - SET_INFO(bh,&BLACKHOLE_info); - bh->blocking_queue = END_TSO_QUEUE; + StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee; + if (bh->header.info != &stg_BLACKHOLE_info && + bh->header.info != &stg_BLACKHOLE_BQ_info && + bh->header.info != &stg_CAF_BLACKHOLE_info) { +#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) + fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh); +#endif +#ifdef DEBUG + /* zero out the slop so that the sanity checker can tell + * where the next closure is. + */ + { + StgInfoTable *info = get_itbl(bh); + nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i; + /* don't zero out slop for a THUNK_SELECTOR, because it's layout + * info is used for a different purpose, and it's exactly the + * same size as a BLACKHOLE in any case. + */ + if (info->type != THUNK_SELECTOR) { + for (i = np; i < np + nw; i++) { + ((StgClosure *)bh)->payload[i] = 0; + } + } + } +#endif + SET_INFO(bh,&stg_BLACKHOLE_info); } } @@ -2056,9 +3480,10 @@ threadSqueezeStack(StgTSO *tso) else next_frame_bottom = tso->sp - 1; -#if 0 /* DEBUG */ - fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom, - displacement); +#if DEBUG + IF_DEBUG(gc, + fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom, + displacement)) #endif while (sp >= next_frame_bottom) { @@ -2072,8 +3497,16 @@ threadSqueezeStack(StgTSO *tso) tso->sp += displacement; tso->su = prev_frame; +#if DEBUG + IF_DEBUG(gc, + fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n", + squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames)) +#endif } +//@node Pausing a thread, Index, Stack squeezing +//@subsection Pausing a thread + /* ----------------------------------------------------------------------------- * Pausing a thread * @@ -2081,7 +3514,7 @@ threadSqueezeStack(StgTSO *tso) * here. We also take the opportunity to do stack squeezing if it's * turned on. * -------------------------------------------------------------------------- */ - +//@cindex threadPaused void threadPaused(StgTSO *tso) { @@ -2090,3 +3523,96 @@ threadPaused(StgTSO *tso) else threadLazyBlackHole(tso); } + +/* ----------------------------------------------------------------------------- + * Debugging + * -------------------------------------------------------------------------- */ + +#if DEBUG +//@cindex printMutOnceList +void +printMutOnceList(generation *gen) +{ + StgMutClosure *p, *next; + + p = gen->mut_once_list; + next = p->mut_link; + + fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list); + for (; p != END_MUT_LIST; p = next, next = p->mut_link) { + fprintf(stderr, "%p (%s), ", + p, info_type((StgClosure *)p)); + } + fputc('\n', stderr); +} + +//@cindex printMutableList +void +printMutableList(generation *gen) +{ + StgMutClosure *p, *next; + + p = gen->mut_list; + next = p->mut_link; + + fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list); + for (; p != END_MUT_LIST; p = next, next = p->mut_link) { + fprintf(stderr, "%p (%s), ", + p, info_type((StgClosure *)p)); + } + fputc('\n', stderr); +} + +//@cindex maybeLarge +static inline rtsBool +maybeLarge(StgClosure *closure) +{ + StgInfoTable *info = get_itbl(closure); + + /* closure types that may be found on the new_large_objects list; + see scavenge_large */ + return (info->type == MUT_ARR_PTRS || + info->type == MUT_ARR_PTRS_FROZEN || + info->type == TSO || + info->type == ARR_WORDS); +} + + +#endif /* DEBUG */ + +//@node Index, , Pausing a thread +//@subsection Index + +//@index +//* GarbageCollect:: @cindex\s-+GarbageCollect +//* MarkRoot:: @cindex\s-+MarkRoot +//* RevertCAFs:: @cindex\s-+RevertCAFs +//* addBlock:: @cindex\s-+addBlock +//* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list +//* copy:: @cindex\s-+copy +//* copyPart:: @cindex\s-+copyPart +//* evacuate:: @cindex\s-+evacuate +//* evacuate_large:: @cindex\s-+evacuate_large +//* gcCAFs:: @cindex\s-+gcCAFs +//* isAlive:: @cindex\s-+isAlive +//* maybeLarge:: @cindex\s-+maybeLarge +//* mkMutCons:: @cindex\s-+mkMutCons +//* printMutOnceList:: @cindex\s-+printMutOnceList +//* printMutableList:: @cindex\s-+printMutableList +//* relocate_TSO:: @cindex\s-+relocate_TSO +//* scavenge:: @cindex\s-+scavenge +//* scavenge_large:: @cindex\s-+scavenge_large +//* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list +//* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list +//* scavenge_one:: @cindex\s-+scavenge_one +//* scavenge_srt:: @cindex\s-+scavenge_srt +//* scavenge_stack:: @cindex\s-+scavenge_stack +//* scavenge_static:: @cindex\s-+scavenge_static +//* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole +//* threadPaused:: @cindex\s-+threadPaused +//* threadSqueezeStack:: @cindex\s-+threadSqueezeStack +//* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list +//* upd_evacuee:: @cindex\s-+upd_evacuee +//* zero_mutable_list:: @cindex\s-+zero_mutable_list +//* zero_static_object_list:: @cindex\s-+zero_static_object_list +//@end index