\section[PerformGC]{Wrapper for heap overflow} \begin{code} #include "rtsdefs.h" \end{code} @PerformGC@ is the wrapper for calls to @collectHeap@ in the storage manager. It performs the following actions: \begin{enumerate} \item Save live registers. \item If black holing is required before garbage collection we must black hole the update frames on the B stack and any live registers pointing at updatable closures --- possibly R1, if live and in update? --JSM \item Call the garbage collector. \item Restore registers. \end{enumerate} They either succeed or crash-and-burn; hence, they don't return anything. @PerformGC@ saves the fixed STG registers. and calls the garbage collector. It also black holes the B Stack if this is required at garbage collection time. There's also a function @PerformGCIO@ which does all the above and is used to force a full collection. \begin{code} #if defined(CONCURRENT) EXTFUN(EnterNodeCode); /* For reentering node after GC */ EXTFUN(CheckHeapCode); /* For returning to thread after a context switch */ extern P_ AvailableStack; # if defined(PAR) EXTDATA_RO(FetchMe_info); # endif #else static void BlackHoleUpdateStack(STG_NO_ARGS); #endif /* CONCURRENT */ extern smInfo StorageMgrInfo; extern void PrintTickyInfo(STG_NO_ARGS); /* the real work is done by this function --- see wrappers at end */ void RealPerformGC(liveness, reqsize, always_reenter_node, do_full_collection) W_ liveness; W_ reqsize; W_ always_reenter_node; rtsBool do_full_collection; { I_ num_ptr_roots = 0; /* we bump this counter as we store roots; de-bump it as we re-store them. */ #if defined(PROFILING) CostCentre Save_CCC; #endif /* stop the profiling timer --------------------- */ #if defined(PROFILING) /* STOP_TIME_PROFILER; */ #endif #ifdef CONCURRENT SAVE_Liveness = liveness; /* fprintf(stderr,"RealGC:liveness=0x%lx,reqsize=0x%lx,reenter=%lx,do_full=%d,context_switch=%ld\n", liveness, reqsize,always_reenter_node,do_full_collection,context_switch); */ /* Even on a uniprocessor, we may have to reenter node after a context switch. Though it can't turn into a FetchMe, its shape may have changed (e.g. from a thunk to a data object). */ if (always_reenter_node) { /* Avoid infinite loops at the same heap check */ if (SAVE_Hp <= SAVE_HpLim && TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) { TSO_SWITCH(CurrentTSO) = NULL; return; } /* Set up to re-enter Node, so as to be sure it's really there. */ ASSERT(liveness & LIVENESS_R1); TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO); TSO_PC2(CurrentTSO) = EnterNodeCode; } SAVE_Hp -= reqsize; if (context_switch && !do_full_collection # if defined(PROFILING) && !interval_expired # endif ) { /* We're in a GC callWrapper, so the thread state is safe */ TSO_ARG1(CurrentTSO) = reqsize; TSO_PC1(CurrentTSO) = CheckHeapCode; # ifdef PAR if (RTSflags.ParFlags.granSimStats) { TSO_EXECTIME(CurrentTSO) += CURRENT_TIME - TSO_BLOCKEDAT(CurrentTSO); } # endif # if defined(GRAN) ReSchedule(SAME_THREAD); # else ReSchedule(1); # endif } # if defined(PROFILING) Save_CCC = CCC; # endif # if defined(PAR) SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */ # endif ReallyPerformThreadGC(reqsize, do_full_collection); #else /* !CONCURRENT */ # if defined(PROFILING) Save_CCC = CCC; SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */ # endif /* root saving ---------------------------------- */ # define __ENROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \ do { if ( cond ) { \ StorageMgrInfo.roots[num_ptr_roots] = CAT2(MAIN_R,n).p; \ num_ptr_roots++; \ }} while (0) __ENROOT_PTR_REG(IS_LIVE_R1(liveness),1); __ENROOT_PTR_REG(IS_LIVE_R2(liveness),2); __ENROOT_PTR_REG(IS_LIVE_R3(liveness),3); __ENROOT_PTR_REG(IS_LIVE_R4(liveness),4); __ENROOT_PTR_REG(IS_LIVE_R5(liveness),5); __ENROOT_PTR_REG(IS_LIVE_R6(liveness),6); __ENROOT_PTR_REG(IS_LIVE_R7(liveness),7); __ENROOT_PTR_REG(IS_LIVE_R8(liveness),8); /* * Before we garbage collect we may have to squeeze update frames and/or * black hole the update stack */ if (! RTSflags.GcFlags.squeezeUpdFrames) { BlackHoleUpdateStack(); } else { /* Squeeze and/or black hole update frames */ I_ displacement; displacement = SqueezeUpdateFrames(stackInfo.botB + BREL(1), MAIN_SpB, MAIN_SuB); MAIN_SuB += BREL(displacement); MAIN_SpB += BREL(displacement); /* fprintf(stderr, "B size %d, squeezed out %d\n", MAIN_SpB - stackInfo.botB, displacement); */ } ASSERT(num_ptr_roots <= SM_MAXROOTS); StorageMgrInfo.rootno = num_ptr_roots; SAVE_Hp -= reqsize; /* Move (SAVE_)Hp back to where it was */ /* (heap is known to grow upwards) */ /* we *do* have to do this, so reported stats will be right! */ /* the main business ---------------------------- */ blockUserSignals(); { int GC_result; /* Restore hpLim to its "correct" setting */ StorageMgrInfo.hplim += StorageMgrInfo.hardHpOverflowSize; GC_result = collectHeap(reqsize, &StorageMgrInfo, do_full_collection); if ( GC_result == GC_HARD_LIMIT_EXCEEDED ) { OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/ shutdownHaskell(); EXIT(EXIT_FAILURE); } else if ( GC_result == GC_SOFT_LIMIT_EXCEEDED ) { /* Allow ourselves to use emergency space */ /* Set hplim so that we'll GC when we hit the soft limit */ StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize; raiseError( softHeapOverflowHandler ); } else if ( GC_result == GC_SUCCESS ) { /* Set hplim so that we'll GC when we hit the soft limit */ StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize; } else { /* This should not happen */ fprintf(stderr, "Panic: garbage collector returned %d please report it as a bug to glasgow-haskell-bugs@dcs.gla.ac.uk\n", GC_result ); # if defined(TICKY_TICKY) if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo(); # endif abort(); } } StorageMgrInfo.rootno = 0; /* reset */ SAVE_Hp += reqsize; /* Semantics of GC ensures that a block of `reqsize' is now available (and allocated) [NB: sequential only] */ /* root restoring ------------------------------- */ /* must do all the restoring exactly backwards to the storing! */ /* now the general regs, in *backwards* order */ # define __DEROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \ do { if ( cond ) { \ num_ptr_roots--; \ CAT2(MAIN_R,n).p = StorageMgrInfo.roots[num_ptr_roots]; \ }} while (0) __DEROOT_PTR_REG(IS_LIVE_R8(liveness),8); __DEROOT_PTR_REG(IS_LIVE_R7(liveness),7); __DEROOT_PTR_REG(IS_LIVE_R6(liveness),6); __DEROOT_PTR_REG(IS_LIVE_R5(liveness),5); __DEROOT_PTR_REG(IS_LIVE_R4(liveness),4); __DEROOT_PTR_REG(IS_LIVE_R3(liveness),3); __DEROOT_PTR_REG(IS_LIVE_R2(liveness),2); __DEROOT_PTR_REG(IS_LIVE_R1(liveness),1); ASSERT(num_ptr_roots == 0); /* we have put it all back */ unblockUserSignals(); #endif /* !CONCURRENT */ #if defined(PROFILING) CCC = Save_CCC; RESTART_TIME_PROFILER; #endif } \end{code} This is a wrapper used for all standard, non-threaded, non-parallel GC purposes. \begin{code} #ifdef HEAP_CHK_HYGIENE I_ doHygieneCheck = 0; #endif void PerformGC(args) W_ args; { W_ liveness = HEAP_OVERFLOW_LIVENESS(args); W_ reqsize = HEAP_OVERFLOW_REQSIZE(args); W_ always_reenter_node = HEAP_OVERFLOW_REENTER(args); #ifdef HEAP_CHK_HYGIENE if (doHygieneCheck) { checkHygiene(); return; } #endif RealPerformGC(liveness, reqsize, always_reenter_node, rtsFalse); } #if defined(CONCURRENT) && defined(GRAN) /* This is directly called from the macro GRAN_RESCHEDULE out of the */ /* threaded world. -- HWL */ void PerformReschedule(liveness, always_reenter_node) W_ liveness; rtsBool always_reenter_node; { rtsBool need_to_reschedule; /* Reset the global NeedToReSchedule -- this is used only to communicate the fact that we should schedule a new thread rather than the existing one following a fetch. if (RTSflags.GranFlags.Light) { Yield(liveness); } ASSERT(!RTSflags.GranFlags.Light); */ need_to_reschedule = NeedToReSchedule; NeedToReSchedule = rtsFalse; SAVE_Liveness = liveness; if (always_reenter_node) { /* Avoid infinite loops at the same context switch */ if (/* (TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) || */ (!need_to_reschedule && CurrentTime[CurrentProc]=CurrentTime[CurrentProc]) || IgnoreEvents )) { /* TSO_SWITCH(CurrentTSO) = NULL; */ return; } /* Set up to re-enter Node, so as to be sure it's really there. */ ASSERT(liveness & LIVENESS_R1); /* TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO); */ TSO_PC2(CurrentTSO) = (void *) EnterNodeCode; } /* We're in a GC callWrapper, so the thread state is safe */ TSO_ARG1(CurrentTSO) = 0; TSO_PC1(CurrentTSO) = EnterNodeCode; ReSchedule( (need_to_reschedule && !RTSflags.GranFlags.DoReScheduleOnFetch && !RTSflags.GranFlags.Light) ? CHANGE_THREAD : SAME_THREAD ); /* In a block-on-fetch setup we must not use SAME_THREAD since that */ /* would continue the fetching TSO, which is still at the head of the */ /* of the threadq */ /* GrAnSim-Light always uses SAME_THREAD */ } #endif #ifndef PAR /* this is a wrapper used when we want to do a full GC. One reason might be that we're about to enter a time-critical piece of code and want to reduce the risk of a GC during the run. The motivating reason is that we want to force the GC to report any dead Malloc Pointers to us. Note: this should only be called using _ccall_GC_ which saves all registers in the usual place (ie the global save area) before the call and restores them afterwards. ToDo: put in a runtime check that _ccall_GC_ is in action. */ void StgPerformGarbageCollection() { # if ! defined(__STG_GCC_REGS__) SaveAllStgRegs(); /* unregisterised case */ # endif RealPerformGC(0,0,0,rtsTrue); # if ! defined(__STG_GCC_REGS__) RestoreAllStgRegs(); /* unregisterised case */ # endif } #endif /* !PAR */ #if defined(CONCURRENT) # if defined(GRAN) # if defined(DEPTH_FIRST_PRUNING) /* Jim's spark pools are very similar to our processors, except that he uses a hard-wired constant. This would be a mistake for us, since we won't always need this many pools. */ void PruneSparks(STG_NO_ARGS) { sparkq spark, prev, next; I_ proc, pool, prunedSparks; I_ tot_sparks[MAX_PROC], total_sparks = 0, tot = 0;; # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GranFlags.debug & 0x40 ) fprintf(stderr,"Pruning (depth-first) spark roots for GC ...\n"); # endif for(proc=0; proc0) ) fprintf(RTSflags.GcFlags.statsFile,"Pruning and disposing %lu sparks (_NS flag!) on proc %d (pool %d) in PruneSparks\n", prunedSparks,proc,pool); } /* forall pool ... */ } /* forall proc ... */ # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats ) { fprintf(RTSflags.GcFlags.statsFile, "Spark statistics (after pruning) (total sparks: %d; before pruning: %d):", tot,total_sparks); for (proc=0; proc "); fprintf(RTSflags.GcFlags.statsFile,"\tPE %d: %d ",proc,tot_sparks[proc]); } fprintf(RTSflags.GcFlags.statsFile,".\n"); } # endif } # else /* !DEPTH_FIRST_PRUNING */ /* Auxiliary functions that are used in the GranSim version of PruneSparks */ static W_ arr_and(W_ arr[], I_ max) { I_ i; W_ res; /* Doesn't work with max==0; but then, many things don't work in this */ /* special case. */ for (i=1, res = arr[0]; ires) ? arr[i] : res; return (res); } /* In case of an excessive number of sparks, depth first pruning is a Bad */ /* Idea as we might end up with all remaining sparks on processor 0 and */ /* none on the other processors. So, this version uses breadth first */ /* pruning. -- HWL */ void PruneSparks(STG_NO_ARGS) { sparkq spark, prev, prev_spark[MAX_PROC][SPARK_POOLS], curr_spark[MAX_PROC][SPARK_POOLS]; PROC proc; W_ allProcs = 0, endQueues[SPARK_POOLS], finishedQueues[SPARK_POOLS]; I_ pool, total_sparks=0, prunedSparks[MAX_PROC][SPARK_POOLS]; I_ tot_sparks[MAX_PROC], tot = 0;; # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GranFlags.debug & 0x40 ) fprintf(stderr,"Pruning (breadth-first) sparks for GC ...\n"); # endif /* Init */ for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) { allProcs |= PE_NUMBER(proc); tot_sparks[proc] = 0; for(pool = 0; pool < SPARK_POOLS; ++pool) { prev_spark[proc][pool] = NULL; curr_spark[proc][pool] = PendingSparksHd[proc][pool]; prunedSparks[proc][pool] = 0; endQueues[pool] = 0; finishedQueues[pool] = 0; } } /* Breadth first pruning */ do { for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) { for(pool = 0; pool < SPARK_POOLS; ++pool) { spark = curr_spark[proc][pool]; prev = prev_spark[proc][pool]; if (spark == NULL) { /* at the end of the queue already? */ if (! (endQueues[pool] & PE_NUMBER(proc)) ) { endQueues[pool] |= PE_NUMBER(proc); if (prev==NULL) PendingSparksHd[proc][pool] = NULL; else SPARK_NEXT(prev) = NULL; PendingSparksTl[proc][pool] = prev; } continue; } /* HACK! This clause should actually never happen HWL */ if ( (SPARK_NODE(spark) == NULL) || (SPARK_NODE(spark) == Prelude_Z91Z93_closure) ) { # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) ) fprintf(RTSflags.GcFlags.statsFile,"PruneSparks: Warning: spark @ 0x%lx points to NULL or Prelude_Z91Z93_closure\n", spark); # endif /* prune it below */ } else if (SHOULD_SPARK(SPARK_NODE(spark))) { if(++total_sparks <= MAX_SPARKS || MAX_SPARKS == 0) { if ( RTSflags.GcFlags.giveStats ) if (pool==ADVISORY_POOL) { tot_sparks[proc]++; tot++; } /* Keep it */ if (prev_spark[proc][pool] == NULL) PendingSparksHd[proc][pool] = spark; else SPARK_NEXT(prev_spark[proc][pool]) = spark; SPARK_PREV(spark) = prev_spark[proc][pool]; prev_spark[proc][pool] = spark; curr_spark[proc][pool] = SPARK_NEXT(spark); continue; } else { /* total_sparks > MAX_SPARKS */ /* Sparkq will end before the current spark */ if (prev == NULL) PendingSparksHd[proc][pool] = NULL; else SPARK_NEXT(prev) = NULL; PendingSparksTl[proc][pool] = prev; endQueues[pool] |= PE_NUMBER(proc); continue; } } /* By now we know that the spark has to be pruned */ if(RTSflags.GranFlags.granSimStats_Sparks) DumpRawGranEvent(CurrentProc,0,SP_PRUNED, Prelude_Z91Z93_closure,SPARK_NODE(spark),0); SPARK_NODE(spark) = Prelude_Z91Z93_closure; curr_spark[proc][pool] = SPARK_NEXT(spark); prunedSparks[proc][pool]++; DisposeSpark(spark); } /* forall pool ... */ } /* forall proc ... */ } while (arr_and(endQueues,SPARK_POOLS) != allProcs); /* Prune all sparks on all processor starting with */ /* curr_spark[proc][pool]. */ do { for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) { for(pool = 0; pool < SPARK_POOLS; ++pool) { spark = curr_spark[proc][pool]; if ( spark != NULL ) { if(RTSflags.GranFlags.granSimStats_Sparks) DumpRawGranEvent(CurrentProc,0,SP_PRUNED, Prelude_Z91Z93_closure,SPARK_NODE(spark),0); SPARK_NODE(spark) = Prelude_Z91Z93_closure; curr_spark[proc][pool] = SPARK_NEXT(spark); prunedSparks[proc][pool]++; DisposeSpark(spark); } else { finishedQueues[pool] |= PE_NUMBER(proc); } } /* forall pool ... */ } /* forall proc ... */ } while (arr_and(finishedQueues,SPARK_POOLS) != allProcs); # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GranFlags.debug & 0x1000) { for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) { for(pool = 0; pool < SPARK_POOLS; ++pool) { if ( (RTSflags.GcFlags.giveStats) && (prunedSparks[proc][pool]>0)) { fprintf(RTSflags.GcFlags.statsFile, "Discarding %lu sparks on proc %d (pool %d) for GC purposes\n", prunedSparks[proc][pool],proc,pool); } } } if ( RTSflags.GcFlags.giveStats ) { fprintf(RTSflags.GcFlags.statsFile, "Spark statistics (after discarding) (total sparks = %d):",tot); for (proc=0; proc "); fprintf(RTSflags.GcFlags.statsFile, "\tPE %d: %d ",proc,tot_sparks[proc]); } fprintf(RTSflags.GcFlags.statsFile,".\n"); } } # endif } # endif /* !DEPTH_FIRST_PRUNING */ # else /* !GRAN */ void PruneSparks(STG_NO_ARGS) { I_ pool; PP_ old; PP_ new; for (pool = 0; pool < SPARK_POOLS; pool++) { new = PendingSparksBase[pool]; for (old = PendingSparksHd[pool]; old < PendingSparksTl[pool]; old++) { if (SHOULD_SPARK(*old)) { /* Keep it */ *new++ = *old; } else { if (DO_QP_PROF) QP_Event0(threadId++, *old); # if 0 /* ToDo: Fix log entries for pruned sparks in GUM */ if(RTSflags.GranFlags.granSimStats_Sparks) /* DumpSparkGranEvent(SP_PRUNED, threadId++);*/ DumpGranEvent(SP_PRUNED,Prelude_Z91Z93_closure); ^^^^^^^^^^^ should be a TSO # endif } } PendingSparksHd[pool] = PendingSparksBase[pool]; PendingSparksTl[pool] = new; } } # endif /* !GRAN */ \end{code} This is the real GC wrapper for the threaded world. No context switching or other nonsense... just set up StorageMgrInfo and perform a garbage collection. \begin{code} void handleTimerExpiry PROTO((rtsBool)); void ReallyPerformThreadGC(reqsize, do_full_collection) W_ reqsize; rtsBool do_full_collection; { # if defined(GRAN) I_ proc; #endif I_ num_ptr_roots = 0; /* we bump this counter as we store roots; de-bump it as we re-store them. */ P_ stack, tso, next; /* Discard the saved stack and TSO space. What's going on here: TSOs and StkOs are on the mutables list (mutable things in the old generation). Here, we change them to immutable, so that the scavenger (which chks all mutable objects) can detect their immutability and remove them from the list. Setting to MUTUPLE_VHS as the size is essentially saying "No pointers in here" (i.e., empty). Without this change of status, these objects might not really die, probably with some horrible disastrous consequence that we don't want to think about. Will & Phil 95/10 */ for(stack = AvailableStack; stack != Prelude_Z91Z93_closure; stack = next) { next = STKO_LINK(stack); FREEZE_MUT_HDR(stack, ImMutArrayOfPtrs_info); MUTUPLE_CLOSURE_SIZE(stack) = MUTUPLE_VHS; } for(tso = AvailableTSO; tso != Prelude_Z91Z93_closure; tso = next) { next = TSO_LINK(tso); FREEZE_MUT_HDR(tso, ImMutArrayOfPtrs_info); MUTUPLE_CLOSURE_SIZE(tso) = MUTUPLE_VHS; } AvailableStack = AvailableTSO = Prelude_Z91Z93_closure; PruneSparks(); # if defined(GRAN) traverse_eventq_for_gc(); /* tidy up eventq for GC */ /* Store head and tail of runnable lists as roots for GC */ if (RTSflags.GranFlags.Light) { StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[0]; StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[0]; } else { for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) { # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) ) fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsHd %d (proc: %d) -- 0x%lx\n", num_ptr_roots,proc,RunnableThreadsHd[proc]); # endif StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[proc]; # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) ) fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsTl %d (proc: %d) -- 0x%lx\n", num_ptr_roots,proc,RunnableThreadsTl[proc]); # endif StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[proc]; } /* forall proc ... */ } /* RTSflags.GranFlags.Light */ /* This is now done as part of collectHeap (see ../storage dir) */ /* num_ptr_roots = SaveSparkRoots(num_ptr_roots); */ /* num_ptr_roots = SaveEventRoots(num_ptr_roots); */ # else /* !GRAN */ StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd; StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl; StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsHd; StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsTl; # endif /* GRAN */ # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) ) fprintf(RTSflags.GcFlags.statsFile,"Saving CurrentTSO %d -- 0x%lx\n", num_ptr_roots,CurrentTSO); # endif StorageMgrInfo.roots[num_ptr_roots++] = CurrentTSO; # ifdef PAR StorageMgrInfo.roots[num_ptr_roots++] = PendingFetches; # endif # ifndef PAR StorageMgrInfo.roots[num_ptr_roots++] = StorageMgrInfo.StablePointerTable; # endif StorageMgrInfo.rootno = num_ptr_roots; blockUserSignals(); /* For VTALRM timer ticks to be handled correctly, we need to record that we are now about to enter GC, delaying the handling of timer expiry for delayed threads till after the GC. */ handleTimerExpiry(rtsFalse); /* ====> The REAL THING happens here */ if (collectHeap(reqsize, &StorageMgrInfo, do_full_collection) != GC_SUCCESS) { OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/ # if defined(TICKY_TICKY) if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo(); # endif EXIT(EXIT_FAILURE); } StorageMgrInfo.rootno = 0; /* reset */ /* root restoring ------------------------------- */ /* must do all the restoring exactly backwards to the storing! */ # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) ) fprintf(RTSflags.GcFlags.statsFile, "Restoring CurrentTSO %d -- new: 0x%lx\n", num_ptr_roots-1,StorageMgrInfo.roots[num_ptr_roots-1]); # endif # ifndef PAR StorageMgrInfo.StablePointerTable = StorageMgrInfo.roots[--num_ptr_roots]; # endif # ifdef PAR PendingFetches = StorageMgrInfo.roots[--num_ptr_roots]; # endif CurrentTSO = StorageMgrInfo.roots[--num_ptr_roots]; CurrentRegTable = TSO_INTERNAL_PTR(CurrentTSO); # if !defined(GRAN) WaitingThreadsTl = StorageMgrInfo.roots[--num_ptr_roots]; WaitingThreadsHd = StorageMgrInfo.roots[--num_ptr_roots]; RunnableThreadsTl = StorageMgrInfo.roots[--num_ptr_roots]; RunnableThreadsHd = StorageMgrInfo.roots[--num_ptr_roots]; # else /* GRAN */ /* num_ptr_roots = RestoreEventRoots(num_ptr_roots); */ /* num_ptr_roots = RestoreSparkRoots(num_ptr_roots); */ /* NB: PROC is unsigned datatype i.e. (PROC)-1 > 0 ! */ if (RTSflags.GranFlags.Light) { RunnableThreadsTl[0] = StorageMgrInfo.roots[--num_ptr_roots] ; RunnableThreadsHd[0] = StorageMgrInfo.roots[--num_ptr_roots] ; } else { for(proc = RTSflags.GranFlags.proc - 1; (proc >= 0) && (proc < RTSflags.GranFlags.proc) ; --proc) { # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) ) fprintf(RTSflags.GcFlags.statsFile, "Restoring RunnableThreadsTl %d (proc: %d) -- new: 0x%lx\n", num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]); # endif RunnableThreadsTl[proc] = StorageMgrInfo.roots[--num_ptr_roots]; # if defined(GRAN_CHECK) && defined(GRAN) if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) ) fprintf(RTSflags.GcFlags.statsFile, "Restoring RunnableThreadsHd %d (proc: %d) -- new: 0x%lx\n", num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]); # endif RunnableThreadsHd[proc] = StorageMgrInfo.roots[--num_ptr_roots]; } /* forall proc ... */ } /* RTSflags.GranFlags.Light */ # endif /* GRAN */ /* Semantics of GC ensures that a block of `reqsize' is now available */ SAVE_Hp += reqsize; /* Activate the handling of entries on the WaitingThreads queue again */ handleTimerExpiry(rtsTrue); unblockUserSignals(); } #endif /* CONCURRENT */ \end{code} This routine rattles down the B stack, black-holing any pending updates to avoid space leaks from them. \begin{code} #if !defined(CONCURRENT) static void BlackHoleUpdateStack(STG_NO_ARGS) { P_ PtrToUpdateFrame; if (! RTSflags.GcFlags.lazyBlackHoling) return; PtrToUpdateFrame = MAIN_SuB; /* ToDo: There may be an optimisation here which stops at the first BHed closure on the stack as all below must have been BHed */ while (SUBTRACT_B_STK(PtrToUpdateFrame, stackInfo.botB) > 0) { UPD_BH(GRAB_UPDATEE(PtrToUpdateFrame), BH_UPD_info); /* Move PtrToUpdateFrame down B Stack */ PtrToUpdateFrame = GRAB_SuB(PtrToUpdateFrame); } } #endif /* !CONCURRENT */ \end{code}