1 \section[PerformGC]{Wrapper for heap overflow}
7 @PerformGC@ is the wrapper for calls to @collectHeap@ in the
8 storage manager. It performs the following actions:
10 \item Save live registers.
11 \item If black holing is required before garbage collection we must
12 black hole the update frames on the B stack and any live registers
13 pointing at updatable closures --- possibly R1, if live and in update? --JSM
14 \item Call the garbage collector.
15 \item Restore registers.
17 They either succeed or crash-and-burn; hence, they don't return
20 @PerformGC@ saves the fixed STG registers. and calls the garbage
21 collector. It also black holes the B Stack if this is required at
22 garbage collection time.
24 There's also a function @PerformGCIO@ which does all the above and is
25 used to force a full collection.
28 #if defined(CONCURRENT)
29 EXTFUN(EnterNodeCode); /* For reentering node after GC */
30 EXTFUN(CheckHeapCode); /* For returning to thread after a context switch */
31 extern P_ AvailableStack;
33 EXTDATA_RO(FetchMe_info);
36 static void BlackHoleUpdateStack(STG_NO_ARGS);
37 #endif /* CONCURRENT */
39 extern smInfo StorageMgrInfo;
40 extern void PrintTickyInfo(STG_NO_ARGS);
42 /* the real work is done by this function --- see wrappers at end */
45 RealPerformGC(liveness, reqsize, always_reenter_node, do_full_collection)
48 W_ always_reenter_node;
49 rtsBool do_full_collection;
51 I_ num_ptr_roots = 0; /* we bump this counter as we
52 store roots; de-bump it
53 as we re-store them. */
54 #if defined(PROFILING)
58 /* stop the profiling timer --------------------- */
59 #if defined(PROFILING)
60 /* STOP_TIME_PROFILER; */
65 SAVE_Liveness = liveness;
68 fprintf(stderr,"RealGC:liveness=0x%lx,reqsize=0x%lx,reenter=%lx,do_full=%d,context_switch=%ld\n",
69 liveness, reqsize,always_reenter_node,do_full_collection,context_switch);
73 Even on a uniprocessor, we may have to reenter node after a
74 context switch. Though it can't turn into a FetchMe, its shape
75 may have changed (e.g. from a thunk to a data object).
77 if (always_reenter_node) {
78 /* Avoid infinite loops at the same heap check */
79 if (SAVE_Hp <= SAVE_HpLim && TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) {
80 TSO_SWITCH(CurrentTSO) = NULL;
83 /* Set up to re-enter Node, so as to be sure it's really there. */
84 ASSERT(liveness & LIVENESS_R1);
85 TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO);
86 TSO_PC2(CurrentTSO) = EnterNodeCode;
91 if (context_switch && !do_full_collection
92 # if defined(PROFILING)
96 /* We're in a GC callWrapper, so the thread state is safe */
97 TSO_ARG1(CurrentTSO) = reqsize;
98 TSO_PC1(CurrentTSO) = CheckHeapCode;
100 if (RTSflags.ParFlags.granSimStats) {
101 TSO_EXECTIME(CurrentTSO) += CURRENT_TIME - TSO_BLOCKEDAT(CurrentTSO);
105 ReSchedule(SAME_THREAD);
111 # if defined(PROFILING)
115 SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */
118 ReallyPerformThreadGC(reqsize, do_full_collection);
120 #else /* !CONCURRENT */
122 # if defined(PROFILING)
124 SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */
127 /* root saving ---------------------------------- */
129 # define __ENROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \
131 StorageMgrInfo.roots[num_ptr_roots] = CAT2(MAIN_R,n).p; \
135 __ENROOT_PTR_REG(IS_LIVE_R1(liveness),1);
136 __ENROOT_PTR_REG(IS_LIVE_R2(liveness),2);
137 __ENROOT_PTR_REG(IS_LIVE_R3(liveness),3);
138 __ENROOT_PTR_REG(IS_LIVE_R4(liveness),4);
139 __ENROOT_PTR_REG(IS_LIVE_R5(liveness),5);
140 __ENROOT_PTR_REG(IS_LIVE_R6(liveness),6);
141 __ENROOT_PTR_REG(IS_LIVE_R7(liveness),7);
142 __ENROOT_PTR_REG(IS_LIVE_R8(liveness),8);
145 * Before we garbage collect we may have to squeeze update frames and/or
146 * black hole the update stack
148 if (! RTSflags.GcFlags.squeezeUpdFrames) {
149 BlackHoleUpdateStack();
151 } else { /* Squeeze and/or black hole update frames */
154 displacement = SqueezeUpdateFrames(stackInfo.botB + BREL(1), MAIN_SpB, MAIN_SuB);
156 MAIN_SuB += BREL(displacement);
157 MAIN_SpB += BREL(displacement);
158 /* fprintf(stderr, "B size %d, squeezed out %d\n", MAIN_SpB - stackInfo.botB,
162 ASSERT(num_ptr_roots <= SM_MAXROOTS);
163 StorageMgrInfo.rootno = num_ptr_roots;
166 /* Move (SAVE_)Hp back to where it was */
167 /* (heap is known to grow upwards) */
168 /* we *do* have to do this, so reported stats will be right! */
170 /* the main business ---------------------------- */
177 /* Restore hpLim to its "correct" setting */
178 StorageMgrInfo.hplim += StorageMgrInfo.hardHpOverflowSize;
180 GC_result = collectHeap(reqsize, &StorageMgrInfo, do_full_collection);
182 if ( GC_result == GC_HARD_LIMIT_EXCEEDED ) {
183 OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
187 } else if ( GC_result == GC_SOFT_LIMIT_EXCEEDED ) {
188 /* Allow ourselves to use emergency space */
189 /* Set hplim so that we'll GC when we hit the soft limit */
190 StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize;
191 raiseError( softHeapOverflowHandler );
193 } else if ( GC_result == GC_SUCCESS ) {
194 /* Set hplim so that we'll GC when we hit the soft limit */
195 StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize;
197 } else { /* This should not happen */
198 fprintf(stderr, "Panic: garbage collector returned %d please report it as a bug to glasgow-haskell-bugs@dcs.gla.ac.uk\n", GC_result );
200 # if defined(TICKY_TICKY)
201 if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo();
207 StorageMgrInfo.rootno = 0; /* reset */
210 /* Semantics of GC ensures that a block of
211 `reqsize' is now available (and allocated) [NB: sequential only] */
213 /* root restoring ------------------------------- */
214 /* must do all the restoring exactly backwards to the storing! */
216 /* now the general regs, in *backwards* order */
218 # define __DEROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \
221 CAT2(MAIN_R,n).p = StorageMgrInfo.roots[num_ptr_roots]; \
224 __DEROOT_PTR_REG(IS_LIVE_R8(liveness),8);
225 __DEROOT_PTR_REG(IS_LIVE_R7(liveness),7);
226 __DEROOT_PTR_REG(IS_LIVE_R6(liveness),6);
227 __DEROOT_PTR_REG(IS_LIVE_R5(liveness),5);
228 __DEROOT_PTR_REG(IS_LIVE_R4(liveness),4);
229 __DEROOT_PTR_REG(IS_LIVE_R3(liveness),3);
230 __DEROOT_PTR_REG(IS_LIVE_R2(liveness),2);
231 __DEROOT_PTR_REG(IS_LIVE_R1(liveness),1);
233 ASSERT(num_ptr_roots == 0); /* we have put it all back */
235 unblockUserSignals();
237 #endif /* !CONCURRENT */
239 #if defined(PROFILING)
242 RESTART_TIME_PROFILER;
247 This is a wrapper used for all standard, non-threaded, non-parallel GC
250 #ifdef HEAP_CHK_HYGIENE
251 I_ doHygieneCheck = 0;
258 W_ liveness = HEAP_OVERFLOW_LIVENESS(args);
259 W_ reqsize = HEAP_OVERFLOW_REQSIZE(args);
260 W_ always_reenter_node = HEAP_OVERFLOW_REENTER(args);
262 #ifdef HEAP_CHK_HYGIENE
263 if (doHygieneCheck) {
268 RealPerformGC(liveness, reqsize, always_reenter_node, rtsFalse);
271 #if defined(CONCURRENT) && defined(GRAN)
272 /* This is directly called from the macro GRAN_RESCHEDULE out of the */
273 /* threaded world. -- HWL */
276 PerformReschedule(liveness, always_reenter_node)
278 rtsBool always_reenter_node;
281 rtsBool need_to_reschedule;
283 /* Reset the global NeedToReSchedule --
284 this is used only to communicate the fact that we should schedule
285 a new thread rather than the existing one following a fetch.
286 if (RTSflags.GranFlags.Light) {
290 ASSERT(!RTSflags.GranFlags.Light);
293 need_to_reschedule = NeedToReSchedule;
294 NeedToReSchedule = rtsFalse;
296 SAVE_Liveness = liveness;
298 if (always_reenter_node) {
299 /* Avoid infinite loops at the same context switch */
300 if (/* (TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) || */
301 (!need_to_reschedule &&
302 CurrentTime[CurrentProc]<EndOfTimeSlice &&
303 (TimeOfNextEvent==0 || TimeOfNextEvent>=CurrentTime[CurrentProc])
306 /* TSO_SWITCH(CurrentTSO) = NULL; */
310 /* Set up to re-enter Node, so as to be sure it's really there. */
311 ASSERT(liveness & LIVENESS_R1);
312 /* TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO); */
313 TSO_PC2(CurrentTSO) = (void *) EnterNodeCode;
316 /* We're in a GC callWrapper, so the thread state is safe */
317 TSO_ARG1(CurrentTSO) = 0;
318 TSO_PC1(CurrentTSO) = EnterNodeCode;
319 ReSchedule( (need_to_reschedule &&
320 !RTSflags.GranFlags.DoReScheduleOnFetch &&
321 !RTSflags.GranFlags.Light) ?
322 CHANGE_THREAD : SAME_THREAD );
323 /* In a block-on-fetch setup we must not use SAME_THREAD since that */
324 /* would continue the fetching TSO, which is still at the head of the */
326 /* GrAnSim-Light always uses SAME_THREAD */
331 /* this is a wrapper used when we want to do a full GC.
333 One reason might be that we're about to enter a time-critical piece
334 of code and want to reduce the risk of a GC during the run. The
335 motivating reason is that we want to force the GC to report any
336 dead Malloc Pointers to us.
338 Note: this should only be called using _ccall_GC_ which saves all
339 registers in the usual place (ie the global save area) before the
340 call and restores them afterwards.
342 ToDo: put in a runtime check that _ccall_GC_ is in action. */
345 StgPerformGarbageCollection()
347 # if ! defined(__STG_GCC_REGS__)
348 SaveAllStgRegs(); /* unregisterised case */
351 RealPerformGC(0,0,0,rtsTrue);
353 # if ! defined(__STG_GCC_REGS__)
354 RestoreAllStgRegs(); /* unregisterised case */
359 #if defined(CONCURRENT)
363 # if defined(DEPTH_FIRST_PRUNING)
365 /* Jim's spark pools are very similar to our processors, except that
366 he uses a hard-wired constant. This would be a mistake for us,
367 since we won't always need this many pools.
370 PruneSparks(STG_NO_ARGS)
372 sparkq spark, prev, next;
373 I_ proc, pool, prunedSparks;
374 I_ tot_sparks[MAX_PROC], total_sparks = 0, tot = 0;;
376 # if defined(GRAN_CHECK) && defined(GRAN)
377 if ( RTSflags.GranFlags.debug & 0x40 )
378 fprintf(stderr,"Pruning (depth-first) spark roots for GC ...\n");
381 for(proc=0; proc<RTSflags.GranFlags.proc; ++proc) {
382 tot_sparks[proc] = 0;
385 for (pool = 0; pool < SPARK_POOLS; pool++) {
388 for(spark = PendingSparksHd[proc][pool];
391 next = SPARK_NEXT(spark);
393 if(++total_sparks <= MAX_SPARKS || MAX_SPARKS == 0)
395 if ( RTSflags.GcFlags.giveStats )
396 if (i==ADVISORY_POOL) {
401 /* HACK! This clause should actually never happen HWL */
402 if ( (SPARK_NODE(spark) == NULL) ||
403 (SPARK_NODE(spark) == Prelude_Z91Z93_closure) ) {
404 # if defined(GRAN_CHECK) && defined(GRAN)
405 if ( RTSflags.GcFlags.giveStats &&
406 (RTSflags.GranFlags.debug & 0x40) )
407 fprintf(RTSflags.GcFlags.statsFile,"PruneSparks: Warning: spark @ 0x%lx points to NULL or Prelude_Z91Z93_closure\n", spark);
411 else if (SHOULD_SPARK(SPARK_NODE(spark))) {
414 PendingSparksHd[proc][pool] = spark;
416 SPARK_NEXT(prev) = spark;
417 SPARK_PREV(spark) = prev;
423 /* By now we know that the spark has to be pruned */
424 if(RTSflags.GranFlags.granSimStats_Sparks)
425 /* DumpRawGranEvent(CURRENT_PROC,SP_PRUNED,(W_) spark); */
426 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
427 Prelude_Z91Z93_closure,SPARK_NODE(spark),0);
431 } /* forall spark ... */
433 PendingSparksHd[proc][pool] = NULL;
435 SPARK_NEXT(prev) = NULL;
436 PendingSparksTl[proc][pool] = prev;
437 if ( (RTSflags.GcFlags.giveStats) &&
438 (RTSflags.GranFlags.debug & 0x1000) &&
440 fprintf(RTSflags.GcFlags.statsFile,"Pruning and disposing %lu sparks (_NS flag!) on proc %d (pool %d) in PruneSparks\n",
441 prunedSparks,proc,pool);
442 } /* forall pool ... */
443 } /* forall proc ... */
444 # if defined(GRAN_CHECK) && defined(GRAN)
445 if ( RTSflags.GcFlags.giveStats ) {
446 fprintf(RTSflags.GcFlags.statsFile,
447 "Spark statistics (after pruning) (total sparks: %d; before pruning: %d):",
449 for (proc=0; proc<RTSflags.GranFlags.proc; proc++) {
450 if (proc % 4 == 0) fprintf(RTSflags.GcFlags.statsFile,"\n> ");
451 fprintf(RTSflags.GcFlags.statsFile,"\tPE %d: %d ",proc,tot_sparks[proc]);
453 fprintf(RTSflags.GcFlags.statsFile,".\n");
458 # else /* !DEPTH_FIRST_PRUNING */
460 /* Auxiliary functions that are used in the GranSim version of PruneSparks */
463 arr_and(W_ arr[], I_ max)
468 /* Doesn't work with max==0; but then, many things don't work in this */
470 for (i=1, res = arr[0]; i<max; i++)
477 arr_max(W_ arr[], I_ max)
482 /* Doesn't work with max==0; but then, many things don't work in this */
484 for (i=1, res = arr[0]; i<max; i++)
485 res = (arr[i]>res) ? arr[i] : res;
490 /* In case of an excessive number of sparks, depth first pruning is a Bad */
491 /* Idea as we might end up with all remaining sparks on processor 0 and */
492 /* none on the other processors. So, this version uses breadth first */
493 /* pruning. -- HWL */
496 PruneSparks(STG_NO_ARGS)
499 prev_spark[MAX_PROC][SPARK_POOLS],
500 curr_spark[MAX_PROC][SPARK_POOLS];
503 endQueues[SPARK_POOLS], finishedQueues[SPARK_POOLS];
504 I_ pool, total_sparks=0,
505 prunedSparks[MAX_PROC][SPARK_POOLS];
506 I_ tot_sparks[MAX_PROC], tot = 0;;
508 # if defined(GRAN_CHECK) && defined(GRAN)
509 if ( RTSflags.GranFlags.debug & 0x40 )
510 fprintf(stderr,"Pruning (breadth-first) sparks for GC ...\n");
514 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
515 allProcs |= PE_NUMBER(proc);
516 tot_sparks[proc] = 0;
517 for(pool = 0; pool < SPARK_POOLS; ++pool) {
518 prev_spark[proc][pool] = NULL;
519 curr_spark[proc][pool] = PendingSparksHd[proc][pool];
520 prunedSparks[proc][pool] = 0;
522 finishedQueues[pool] = 0;
526 /* Breadth first pruning */
528 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
529 for(pool = 0; pool < SPARK_POOLS; ++pool) {
530 spark = curr_spark[proc][pool];
531 prev = prev_spark[proc][pool];
533 if (spark == NULL) { /* at the end of the queue already? */
534 if (! (endQueues[pool] & PE_NUMBER(proc)) ) {
535 endQueues[pool] |= PE_NUMBER(proc);
537 PendingSparksHd[proc][pool] = NULL;
539 SPARK_NEXT(prev) = NULL;
540 PendingSparksTl[proc][pool] = prev;
545 /* HACK! This clause should actually never happen HWL */
546 if ( (SPARK_NODE(spark) == NULL) ||
547 (SPARK_NODE(spark) == Prelude_Z91Z93_closure) ) {
548 # if defined(GRAN_CHECK) && defined(GRAN)
549 if ( RTSflags.GcFlags.giveStats &&
550 (RTSflags.GranFlags.debug & 0x40) )
551 fprintf(RTSflags.GcFlags.statsFile,"PruneSparks: Warning: spark @ 0x%lx points to NULL or Prelude_Z91Z93_closure\n", spark);
554 } else if (SHOULD_SPARK(SPARK_NODE(spark))) {
555 if(++total_sparks <= MAX_SPARKS || MAX_SPARKS == 0) {
556 if ( RTSflags.GcFlags.giveStats )
557 if (pool==ADVISORY_POOL) {
563 if (prev_spark[proc][pool] == NULL)
564 PendingSparksHd[proc][pool] = spark;
566 SPARK_NEXT(prev_spark[proc][pool]) = spark;
567 SPARK_PREV(spark) = prev_spark[proc][pool];
568 prev_spark[proc][pool] = spark;
569 curr_spark[proc][pool] = SPARK_NEXT(spark);
571 } else { /* total_sparks > MAX_SPARKS */
572 /* Sparkq will end before the current spark */
574 PendingSparksHd[proc][pool] = NULL;
576 SPARK_NEXT(prev) = NULL;
577 PendingSparksTl[proc][pool] = prev;
578 endQueues[pool] |= PE_NUMBER(proc);
583 /* By now we know that the spark has to be pruned */
584 if(RTSflags.GranFlags.granSimStats_Sparks)
585 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
586 Prelude_Z91Z93_closure,SPARK_NODE(spark),0);
588 SPARK_NODE(spark) = Prelude_Z91Z93_closure;
589 curr_spark[proc][pool] = SPARK_NEXT(spark);
590 prunedSparks[proc][pool]++;
592 } /* forall pool ... */
593 } /* forall proc ... */
594 } while (arr_and(endQueues,SPARK_POOLS) != allProcs);
596 /* Prune all sparks on all processor starting with */
597 /* curr_spark[proc][pool]. */
600 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
601 for(pool = 0; pool < SPARK_POOLS; ++pool) {
602 spark = curr_spark[proc][pool];
604 if ( spark != NULL ) {
605 if(RTSflags.GranFlags.granSimStats_Sparks)
606 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
607 Prelude_Z91Z93_closure,SPARK_NODE(spark),0);
609 SPARK_NODE(spark) = Prelude_Z91Z93_closure;
610 curr_spark[proc][pool] = SPARK_NEXT(spark);
612 prunedSparks[proc][pool]++;
615 finishedQueues[pool] |= PE_NUMBER(proc);
617 } /* forall pool ... */
618 } /* forall proc ... */
619 } while (arr_and(finishedQueues,SPARK_POOLS) != allProcs);
622 # if defined(GRAN_CHECK) && defined(GRAN)
623 if ( RTSflags.GranFlags.debug & 0x1000) {
624 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
625 for(pool = 0; pool < SPARK_POOLS; ++pool) {
626 if ( (RTSflags.GcFlags.giveStats) && (prunedSparks[proc][pool]>0)) {
627 fprintf(RTSflags.GcFlags.statsFile,
628 "Discarding %lu sparks on proc %d (pool %d) for GC purposes\n",
629 prunedSparks[proc][pool],proc,pool);
634 if ( RTSflags.GcFlags.giveStats ) {
635 fprintf(RTSflags.GcFlags.statsFile,
636 "Spark statistics (after discarding) (total sparks = %d):",tot);
637 for (proc=0; proc<RTSflags.GranFlags.proc; proc++) {
639 fprintf(RTSflags.GcFlags.statsFile,"\n> ");
640 fprintf(RTSflags.GcFlags.statsFile,
641 "\tPE %d: %d ",proc,tot_sparks[proc]);
643 fprintf(RTSflags.GcFlags.statsFile,".\n");
649 # endif /* !DEPTH_FIRST_PRUNING */
654 PruneSparks(STG_NO_ARGS)
661 for (pool = 0; pool < SPARK_POOLS; pool++) {
662 new = PendingSparksBase[pool];
663 for (old = PendingSparksHd[pool]; old < PendingSparksTl[pool]; old++) {
664 if (SHOULD_SPARK(*old)) {
669 QP_Event0(threadId++, *old);
671 /* ToDo: Fix log entries for pruned sparks in GUM */
672 if(RTSflags.GranFlags.granSimStats_Sparks)
673 /* DumpSparkGranEvent(SP_PRUNED, threadId++);*/
674 DumpGranEvent(SP_PRUNED,Prelude_Z91Z93_closure);
675 ^^^^^^^^^^^ should be a TSO
679 PendingSparksHd[pool] = PendingSparksBase[pool];
680 PendingSparksTl[pool] = new;
688 This is the real GC wrapper for the threaded world. No context
689 switching or other nonsense... just set up StorageMgrInfo and perform
690 a garbage collection.
693 void handleTimerExpiry PROTO((rtsBool));
696 ReallyPerformThreadGC(reqsize, do_full_collection)
698 rtsBool do_full_collection;
704 I_ num_ptr_roots = 0; /* we bump this counter as we store roots; de-bump it
705 as we re-store them. */
708 /* Discard the saved stack and TSO space.
709 What's going on here: TSOs and StkOs are on the mutables
710 list (mutable things in the old generation). Here, we change
711 them to immutable, so that the scavenger (which chks all
712 mutable objects) can detect their immutability and remove
713 them from the list. Setting to MUTUPLE_VHS as the size is
714 essentially saying "No pointers in here" (i.e., empty).
716 Without this change of status, these
717 objects might not really die, probably with some horrible
718 disastrous consequence that we don't want to think about.
722 for(stack = AvailableStack; stack != Prelude_Z91Z93_closure; stack = next) {
723 next = STKO_LINK(stack);
724 FREEZE_MUT_HDR(stack, ImMutArrayOfPtrs_info);
725 MUTUPLE_CLOSURE_SIZE(stack) = MUTUPLE_VHS;
728 for(tso = AvailableTSO; tso != Prelude_Z91Z93_closure; tso = next) {
729 next = TSO_LINK(tso);
730 FREEZE_MUT_HDR(tso, ImMutArrayOfPtrs_info);
731 MUTUPLE_CLOSURE_SIZE(tso) = MUTUPLE_VHS;
734 AvailableStack = AvailableTSO = Prelude_Z91Z93_closure;
739 traverse_eventq_for_gc(); /* tidy up eventq for GC */
741 /* Store head and tail of runnable lists as roots for GC */
742 if (RTSflags.GranFlags.Light) {
743 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[0];
744 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[0];
746 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
747 # if defined(GRAN_CHECK) && defined(GRAN)
748 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
749 fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsHd %d (proc: %d) -- 0x%lx\n",
750 num_ptr_roots,proc,RunnableThreadsHd[proc]);
753 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[proc];
755 # if defined(GRAN_CHECK) && defined(GRAN)
756 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
757 fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsTl %d (proc: %d) -- 0x%lx\n",
758 num_ptr_roots,proc,RunnableThreadsTl[proc]);
760 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[proc];
762 } /* forall proc ... */
763 } /* RTSflags.GranFlags.Light */
765 /* This is now done as part of collectHeap (see ../storage dir) */
766 /* num_ptr_roots = SaveSparkRoots(num_ptr_roots); */
767 /* num_ptr_roots = SaveEventRoots(num_ptr_roots); */
771 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd;
772 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl;
773 StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsHd;
774 StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsTl;
778 # if defined(GRAN_CHECK) && defined(GRAN)
779 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
780 fprintf(RTSflags.GcFlags.statsFile,"Saving CurrentTSO %d -- 0x%lx\n",
781 num_ptr_roots,CurrentTSO);
784 StorageMgrInfo.roots[num_ptr_roots++] = CurrentTSO;
787 StorageMgrInfo.roots[num_ptr_roots++] = PendingFetches;
791 StorageMgrInfo.roots[num_ptr_roots++] = StorageMgrInfo.StablePointerTable;
794 StorageMgrInfo.rootno = num_ptr_roots;
798 /* For VTALRM timer ticks to be handled correctly, we need to record that
799 we are now about to enter GC, delaying the handling of timer expiry
800 for delayed threads till after the GC.
802 handleTimerExpiry(rtsFalse);
804 /* ====> The REAL THING happens here */
805 if (collectHeap(reqsize, &StorageMgrInfo, do_full_collection) != GC_SUCCESS) {
807 OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
809 # if defined(TICKY_TICKY)
810 if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo();
815 StorageMgrInfo.rootno = 0; /* reset */
817 /* root restoring ------------------------------- */
818 /* must do all the restoring exactly backwards to the storing! */
820 # if defined(GRAN_CHECK) && defined(GRAN)
821 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
822 fprintf(RTSflags.GcFlags.statsFile,
823 "Restoring CurrentTSO %d -- new: 0x%lx\n",
824 num_ptr_roots-1,StorageMgrInfo.roots[num_ptr_roots-1]);
828 StorageMgrInfo.StablePointerTable = StorageMgrInfo.roots[--num_ptr_roots];
832 PendingFetches = StorageMgrInfo.roots[--num_ptr_roots];
834 CurrentTSO = StorageMgrInfo.roots[--num_ptr_roots];
835 CurrentRegTable = TSO_INTERNAL_PTR(CurrentTSO);
839 WaitingThreadsTl = StorageMgrInfo.roots[--num_ptr_roots];
840 WaitingThreadsHd = StorageMgrInfo.roots[--num_ptr_roots];
842 RunnableThreadsTl = StorageMgrInfo.roots[--num_ptr_roots];
843 RunnableThreadsHd = StorageMgrInfo.roots[--num_ptr_roots];
847 /* num_ptr_roots = RestoreEventRoots(num_ptr_roots); */
848 /* num_ptr_roots = RestoreSparkRoots(num_ptr_roots); */
850 /* NB: PROC is unsigned datatype i.e. (PROC)-1 > 0 ! */
852 if (RTSflags.GranFlags.Light) {
853 RunnableThreadsTl[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
854 RunnableThreadsHd[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
856 for(proc = RTSflags.GranFlags.proc - 1;
857 (proc >= 0) && (proc < RTSflags.GranFlags.proc) ;
859 # if defined(GRAN_CHECK) && defined(GRAN)
860 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
861 fprintf(RTSflags.GcFlags.statsFile,
862 "Restoring RunnableThreadsTl %d (proc: %d) -- new: 0x%lx\n",
863 num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
865 RunnableThreadsTl[proc] = StorageMgrInfo.roots[--num_ptr_roots];
867 # if defined(GRAN_CHECK) && defined(GRAN)
868 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
869 fprintf(RTSflags.GcFlags.statsFile,
870 "Restoring RunnableThreadsHd %d (proc: %d) -- new: 0x%lx\n",
871 num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
873 RunnableThreadsHd[proc] = StorageMgrInfo.roots[--num_ptr_roots];
874 } /* forall proc ... */
875 } /* RTSflags.GranFlags.Light */
879 /* Semantics of GC ensures that a block of `reqsize' is now available */
882 /* Activate the handling of entries on the WaitingThreads queue again */
883 handleTimerExpiry(rtsTrue);
885 unblockUserSignals();
888 #endif /* CONCURRENT */
892 This routine rattles down the B stack, black-holing any
893 pending updates to avoid space leaks from them.
896 #if !defined(CONCURRENT)
900 BlackHoleUpdateStack(STG_NO_ARGS)
904 if (! RTSflags.GcFlags.lazyBlackHoling)
907 PtrToUpdateFrame = MAIN_SuB;
909 /* ToDo: There may be an optimisation here which stops at the first
910 BHed closure on the stack as all below must have been BHed */
912 while (SUBTRACT_B_STK(PtrToUpdateFrame, stackInfo.botB) > 0) {
914 UPD_BH(GRAB_UPDATEE(PtrToUpdateFrame), BH_UPD_info);
916 /* Move PtrToUpdateFrame down B Stack */
917 PtrToUpdateFrame = GRAB_SuB(PtrToUpdateFrame);
920 #endif /* !CONCURRENT */