1 \section[PerformGC]{Wrapper for heap overflow}
7 @PerformGC@ is the wrapper for calls to @collectHeap@ in the
8 storage manager. It performs the following actions:
10 \item Save live registers.
11 \item If black holing is required before garbage collection we must
12 black hole the update frames on the B stack and any live registers
13 pointing at updatable closures --- possibly R1, if live and in update? --JSM
14 \item Call the garbage collector.
15 \item Restore registers.
17 They either succeed or crash-and-burn; hence, they don't return
20 @PerformGC@ saves the fixed STG registers. and calls the garbage
21 collector. It also black holes the B Stack if this is required at
22 garbage collection time.
24 There's also a function @PerformGCIO@ which does all the above and is
25 used to force a full collection.
28 #if defined(CONCURRENT)
29 EXTFUN(EnterNodeCode); /* For reentering node after GC */
30 EXTFUN(CheckHeapCode); /* For returning to thread after a context switch */
31 extern P_ AvailableStack;
33 EXTDATA_RO(FetchMe_info);
36 static void BlackHoleUpdateStack(STG_NO_ARGS);
37 #endif /* CONCURRENT */
39 extern smInfo StorageMgrInfo;
40 extern void PrintTickyInfo(STG_NO_ARGS);
42 /* the real work is done by this function --- see wrappers at end */
45 RealPerformGC(liveness, reqsize, always_reenter_node, do_full_collection)
48 W_ always_reenter_node;
49 rtsBool do_full_collection;
51 I_ num_ptr_roots = 0; /* we bump this counter as we
52 store roots; de-bump it
53 as we re-store them. */
54 #if defined(PROFILING)
58 /* stop the profiling timer --------------------- */
59 #if defined(PROFILING)
60 /* STOP_TIME_PROFILER; */
65 SAVE_Liveness = liveness;
68 fprintf(stderr,"RealGC:liveness=0x%lx,reqsize=0x%lx,reenter=%lx,do_full=%d,context_switch=%ld\n",
69 liveness, reqsize,always_reenter_node,do_full_collection,context_switch);
73 Even on a uniprocessor, we may have to reenter node after a
74 context switch. Though it can't turn into a FetchMe, its shape
75 may have changed (e.g. from a thunk to a data object).
77 if (always_reenter_node) {
78 /* Avoid infinite loops at the same heap check */
79 if (SAVE_Hp <= SAVE_HpLim && TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) {
80 TSO_SWITCH(CurrentTSO) = NULL;
83 /* Set up to re-enter Node, so as to be sure it's really there. */
84 ASSERT(liveness & LIVENESS_R1);
85 TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO);
86 TSO_PC2(CurrentTSO) = EnterNodeCode;
91 if (context_switch && !do_full_collection
92 # if defined(PROFILING)
96 /* We're in a GC callWrapper, so the thread state is safe */
97 TSO_ARG1(CurrentTSO) = reqsize;
98 TSO_PC1(CurrentTSO) = CheckHeapCode;
100 if (RTSflags.ParFlags.granSimStats) {
101 TSO_EXECTIME(CurrentTSO) += CURRENT_TIME - TSO_BLOCKEDAT(CurrentTSO);
105 ReSchedule(SAME_THREAD);
111 # if defined(PROFILING)
115 SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */
118 ReallyPerformThreadGC(reqsize, do_full_collection);
120 #else /* !CONCURRENT */
122 # if defined(PROFILING)
124 SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */
127 /* root saving ---------------------------------- */
129 # define __ENROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \
131 StorageMgrInfo.roots[num_ptr_roots] = CAT2(MAIN_R,n).p; \
135 __ENROOT_PTR_REG(IS_LIVE_R1(liveness),1);
136 __ENROOT_PTR_REG(IS_LIVE_R2(liveness),2);
137 __ENROOT_PTR_REG(IS_LIVE_R3(liveness),3);
138 __ENROOT_PTR_REG(IS_LIVE_R4(liveness),4);
139 __ENROOT_PTR_REG(IS_LIVE_R5(liveness),5);
140 __ENROOT_PTR_REG(IS_LIVE_R6(liveness),6);
141 __ENROOT_PTR_REG(IS_LIVE_R7(liveness),7);
142 __ENROOT_PTR_REG(IS_LIVE_R8(liveness),8);
145 * Before we garbage collect we may have to squeeze update frames and/or
146 * black hole the update stack
148 if (! RTSflags.GcFlags.squeezeUpdFrames) {
149 BlackHoleUpdateStack();
151 } else { /* Squeeze and/or black hole update frames */
154 displacement = SqueezeUpdateFrames(stackInfo.botB + BREL(1), MAIN_SpB, MAIN_SuB);
156 MAIN_SuB += BREL(displacement);
157 MAIN_SpB += BREL(displacement);
158 /* fprintf(stderr, "B size %d, squeezed out %d\n", MAIN_SpB - stackInfo.botB,
162 /* Add the stable pointer table to the roots list */
164 StorageMgrInfo.roots[num_ptr_roots++] = StorageMgrInfo.StablePointerTable;
167 ASSERT(num_ptr_roots <= SM_MAXROOTS);
168 StorageMgrInfo.rootno = num_ptr_roots;
171 /* Move (SAVE_)Hp back to where it was */
172 /* (heap is known to grow upwards) */
173 /* we *do* have to do this, so reported stats will be right! */
175 /* the main business ---------------------------- */
182 /* Restore hpLim to its "correct" setting */
183 StorageMgrInfo.hplim += StorageMgrInfo.hardHpOverflowSize;
185 GC_result = collectHeap(reqsize, &StorageMgrInfo, do_full_collection);
187 if ( GC_result == GC_HARD_LIMIT_EXCEEDED ) {
188 OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
192 } else if ( GC_result == GC_SOFT_LIMIT_EXCEEDED ) {
193 /* Allow ourselves to use emergency space */
194 /* Set hplim so that we'll GC when we hit the soft limit */
195 StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize;
196 raiseError( softHeapOverflowHandler );
198 } else if ( GC_result == GC_SUCCESS ) {
199 /* Set hplim so that we'll GC when we hit the soft limit */
200 StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize;
202 } else { /* This should not happen */
203 fprintf(stderr, "Panic: garbage collector returned %d please report it as a bug to glasgow-haskell-bugs@dcs.gla.ac.uk\n", GC_result );
205 # if defined(TICKY_TICKY)
206 if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo();
212 StorageMgrInfo.rootno = 0; /* reset */
215 /* Semantics of GC ensures that a block of
216 `reqsize' is now available (and allocated) [NB: sequential only] */
218 /* root restoring ------------------------------- */
219 /* must do all the restoring exactly backwards to the storing! */
221 /* remove the stable pointer table first */
223 StorageMgrInfo.StablePointerTable = StorageMgrInfo.roots[--num_ptr_roots];
226 /* now the general regs, in *backwards* order */
228 # define __DEROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \
231 CAT2(MAIN_R,n).p = StorageMgrInfo.roots[num_ptr_roots]; \
234 __DEROOT_PTR_REG(IS_LIVE_R8(liveness),8);
235 __DEROOT_PTR_REG(IS_LIVE_R7(liveness),7);
236 __DEROOT_PTR_REG(IS_LIVE_R6(liveness),6);
237 __DEROOT_PTR_REG(IS_LIVE_R5(liveness),5);
238 __DEROOT_PTR_REG(IS_LIVE_R4(liveness),4);
239 __DEROOT_PTR_REG(IS_LIVE_R3(liveness),3);
240 __DEROOT_PTR_REG(IS_LIVE_R2(liveness),2);
241 __DEROOT_PTR_REG(IS_LIVE_R1(liveness),1);
243 ASSERT(num_ptr_roots == 0); /* we have put it all back */
245 unblockUserSignals();
247 #endif /* !CONCURRENT */
249 #if defined(PROFILING)
252 RESTART_TIME_PROFILER;
257 This is a wrapper used for all standard, non-threaded, non-parallel GC
260 #ifdef HEAP_CHK_HYGIENE
261 I_ doHygieneCheck = 0;
268 W_ liveness = HEAP_OVERFLOW_LIVENESS(args);
269 W_ reqsize = HEAP_OVERFLOW_REQSIZE(args);
270 W_ always_reenter_node = HEAP_OVERFLOW_REENTER(args);
272 #ifdef HEAP_CHK_HYGIENE
273 if (doHygieneCheck) {
278 RealPerformGC(liveness, reqsize, always_reenter_node, rtsFalse);
281 #if defined(CONCURRENT) && defined(GRAN)
282 /* This is directly called from the macro GRAN_RESCHEDULE out of the */
283 /* threaded world. -- HWL */
286 PerformReschedule(liveness, always_reenter_node)
288 rtsBool always_reenter_node;
291 rtsBool need_to_reschedule;
293 /* Reset the global NeedToReSchedule --
294 this is used only to communicate the fact that we should schedule
295 a new thread rather than the existing one following a fetch.
296 if (RTSflags.GranFlags.Light) {
300 ASSERT(!RTSflags.GranFlags.Light);
303 need_to_reschedule = NeedToReSchedule;
304 NeedToReSchedule = rtsFalse;
306 SAVE_Liveness = liveness;
308 if (always_reenter_node) {
309 /* Avoid infinite loops at the same context switch */
310 if (/* (TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) || */
311 (!need_to_reschedule &&
312 CurrentTime[CurrentProc]<EndOfTimeSlice &&
313 (TimeOfNextEvent==0 || TimeOfNextEvent>=CurrentTime[CurrentProc])
316 /* TSO_SWITCH(CurrentTSO) = NULL; */
320 /* Set up to re-enter Node, so as to be sure it's really there. */
321 ASSERT(liveness & LIVENESS_R1);
322 /* TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO); */
323 TSO_PC2(CurrentTSO) = (void *) EnterNodeCode;
326 /* We're in a GC callWrapper, so the thread state is safe */
327 TSO_ARG1(CurrentTSO) = 0;
328 TSO_PC1(CurrentTSO) = EnterNodeCode;
329 ReSchedule( (need_to_reschedule &&
330 !RTSflags.GranFlags.DoReScheduleOnFetch &&
331 !RTSflags.GranFlags.Light) ?
332 CHANGE_THREAD : SAME_THREAD );
333 /* In a block-on-fetch setup we must not use SAME_THREAD since that */
334 /* would continue the fetching TSO, which is still at the head of the */
336 /* GrAnSim-Light always uses SAME_THREAD */
341 /* this is a wrapper used when we want to do a full GC.
343 One reason might be that we're about to enter a time-critical piece
344 of code and want to reduce the risk of a GC during the run. The
345 motivating reason is that we want to force the GC to report any
346 dead Malloc Pointers to us.
348 Note: this should only be called using _ccall_GC_ which saves all
349 registers in the usual place (ie the global save area) before the
350 call and restores them afterwards.
352 ToDo: put in a runtime check that _ccall_GC_ is in action. */
355 StgPerformGarbageCollection()
357 # if ! defined(__STG_GCC_REGS__)
358 SaveAllStgRegs(); /* unregisterised case */
361 RealPerformGC(0,0,0,rtsTrue);
363 # if ! defined(__STG_GCC_REGS__)
364 RestoreAllStgRegs(); /* unregisterised case */
369 #if defined(CONCURRENT)
373 # if defined(DEPTH_FIRST_PRUNING)
375 /* Jim's spark pools are very similar to our processors, except that
376 he uses a hard-wired constant. This would be a mistake for us,
377 since we won't always need this many pools.
380 PruneSparks(STG_NO_ARGS)
382 sparkq spark, prev, next;
383 I_ proc, pool, prunedSparks;
384 I_ tot_sparks[MAX_PROC], total_sparks = 0, tot = 0;;
386 # if defined(GRAN_CHECK) && defined(GRAN)
387 if ( RTSflags.GranFlags.debug & 0x40 )
388 fprintf(stderr,"Pruning (depth-first) spark roots for GC ...\n");
391 for(proc=0; proc<RTSflags.GranFlags.proc; ++proc) {
392 tot_sparks[proc] = 0;
395 for (pool = 0; pool < SPARK_POOLS; pool++) {
398 for(spark = PendingSparksHd[proc][pool];
401 next = SPARK_NEXT(spark);
403 if(++total_sparks <= MAX_SPARKS || MAX_SPARKS == 0)
405 if ( RTSflags.GcFlags.giveStats )
406 if (i==ADVISORY_POOL) {
411 /* HACK! This clause should actually never happen HWL */
412 if ( (SPARK_NODE(spark) == NULL) ||
413 (SPARK_NODE(spark) == Prelude_Z91Z93_closure) ) {
414 # if defined(GRAN_CHECK) && defined(GRAN)
415 if ( RTSflags.GcFlags.giveStats &&
416 (RTSflags.GranFlags.debug & 0x40) )
417 fprintf(RTSflags.GcFlags.statsFile,"PruneSparks: Warning: spark @ 0x%lx points to NULL or Prelude_Z91Z93_closure\n", spark);
421 else if (SHOULD_SPARK(SPARK_NODE(spark))) {
424 PendingSparksHd[proc][pool] = spark;
426 SPARK_NEXT(prev) = spark;
427 SPARK_PREV(spark) = prev;
433 /* By now we know that the spark has to be pruned */
434 if(RTSflags.GranFlags.granSimStats_Sparks)
435 /* DumpRawGranEvent(CURRENT_PROC,SP_PRUNED,(W_) spark); */
436 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
437 Prelude_Z91Z93_closure,SPARK_NODE(spark),0);
441 } /* forall spark ... */
443 PendingSparksHd[proc][pool] = NULL;
445 SPARK_NEXT(prev) = NULL;
446 PendingSparksTl[proc][pool] = prev;
447 if ( (RTSflags.GcFlags.giveStats) &&
448 (RTSflags.GranFlags.debug & 0x1000) &&
450 fprintf(RTSflags.GcFlags.statsFile,"Pruning and disposing %lu sparks (_NS flag!) on proc %d (pool %d) in PruneSparks\n",
451 prunedSparks,proc,pool);
452 } /* forall pool ... */
453 } /* forall proc ... */
454 # if defined(GRAN_CHECK) && defined(GRAN)
455 if ( RTSflags.GcFlags.giveStats ) {
456 fprintf(RTSflags.GcFlags.statsFile,
457 "Spark statistics (after pruning) (total sparks: %d; before pruning: %d):",
459 for (proc=0; proc<RTSflags.GranFlags.proc; proc++) {
460 if (proc % 4 == 0) fprintf(RTSflags.GcFlags.statsFile,"\n> ");
461 fprintf(RTSflags.GcFlags.statsFile,"\tPE %d: %d ",proc,tot_sparks[proc]);
463 fprintf(RTSflags.GcFlags.statsFile,".\n");
468 # else /* !DEPTH_FIRST_PRUNING */
470 /* Auxiliary functions that are used in the GranSim version of PruneSparks */
473 arr_and(W_ arr[], I_ max)
478 /* Doesn't work with max==0; but then, many things don't work in this */
480 for (i=1, res = arr[0]; i<max; i++)
487 arr_max(W_ arr[], I_ max)
492 /* Doesn't work with max==0; but then, many things don't work in this */
494 for (i=1, res = arr[0]; i<max; i++)
495 res = (arr[i]>res) ? arr[i] : res;
500 /* In case of an excessive number of sparks, depth first pruning is a Bad */
501 /* Idea as we might end up with all remaining sparks on processor 0 and */
502 /* none on the other processors. So, this version uses breadth first */
503 /* pruning. -- HWL */
506 PruneSparks(STG_NO_ARGS)
509 prev_spark[MAX_PROC][SPARK_POOLS],
510 curr_spark[MAX_PROC][SPARK_POOLS];
513 endQueues[SPARK_POOLS], finishedQueues[SPARK_POOLS];
514 I_ pool, total_sparks=0,
515 prunedSparks[MAX_PROC][SPARK_POOLS];
516 I_ tot_sparks[MAX_PROC], tot = 0;;
518 # if defined(GRAN_CHECK) && defined(GRAN)
519 if ( RTSflags.GranFlags.debug & 0x40 )
520 fprintf(stderr,"Pruning (breadth-first) sparks for GC ...\n");
524 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
525 allProcs |= PE_NUMBER(proc);
526 tot_sparks[proc] = 0;
527 for(pool = 0; pool < SPARK_POOLS; ++pool) {
528 prev_spark[proc][pool] = NULL;
529 curr_spark[proc][pool] = PendingSparksHd[proc][pool];
530 prunedSparks[proc][pool] = 0;
532 finishedQueues[pool] = 0;
536 /* Breadth first pruning */
538 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
539 for(pool = 0; pool < SPARK_POOLS; ++pool) {
540 spark = curr_spark[proc][pool];
541 prev = prev_spark[proc][pool];
543 if (spark == NULL) { /* at the end of the queue already? */
544 if (! (endQueues[pool] & PE_NUMBER(proc)) ) {
545 endQueues[pool] |= PE_NUMBER(proc);
547 PendingSparksHd[proc][pool] = NULL;
549 SPARK_NEXT(prev) = NULL;
550 PendingSparksTl[proc][pool] = prev;
555 /* HACK! This clause should actually never happen HWL */
556 if ( (SPARK_NODE(spark) == NULL) ||
557 (SPARK_NODE(spark) == Prelude_Z91Z93_closure) ) {
558 # if defined(GRAN_CHECK) && defined(GRAN)
559 if ( RTSflags.GcFlags.giveStats &&
560 (RTSflags.GranFlags.debug & 0x40) )
561 fprintf(RTSflags.GcFlags.statsFile,"PruneSparks: Warning: spark @ 0x%lx points to NULL or Prelude_Z91Z93_closure\n", spark);
564 } else if (SHOULD_SPARK(SPARK_NODE(spark))) {
565 if(++total_sparks <= MAX_SPARKS || MAX_SPARKS == 0) {
566 if ( RTSflags.GcFlags.giveStats )
567 if (pool==ADVISORY_POOL) {
573 if (prev_spark[proc][pool] == NULL)
574 PendingSparksHd[proc][pool] = spark;
576 SPARK_NEXT(prev_spark[proc][pool]) = spark;
577 SPARK_PREV(spark) = prev_spark[proc][pool];
578 prev_spark[proc][pool] = spark;
579 curr_spark[proc][pool] = SPARK_NEXT(spark);
581 } else { /* total_sparks > MAX_SPARKS */
582 /* Sparkq will end before the current spark */
584 PendingSparksHd[proc][pool] = NULL;
586 SPARK_NEXT(prev) = NULL;
587 PendingSparksTl[proc][pool] = prev;
588 endQueues[pool] |= PE_NUMBER(proc);
593 /* By now we know that the spark has to be pruned */
594 if(RTSflags.GranFlags.granSimStats_Sparks)
595 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
596 Prelude_Z91Z93_closure,SPARK_NODE(spark),0);
598 SPARK_NODE(spark) = Prelude_Z91Z93_closure;
599 curr_spark[proc][pool] = SPARK_NEXT(spark);
600 prunedSparks[proc][pool]++;
602 } /* forall pool ... */
603 } /* forall proc ... */
604 } while (arr_and(endQueues,SPARK_POOLS) != allProcs);
606 /* Prune all sparks on all processor starting with */
607 /* curr_spark[proc][pool]. */
610 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
611 for(pool = 0; pool < SPARK_POOLS; ++pool) {
612 spark = curr_spark[proc][pool];
614 if ( spark != NULL ) {
615 if(RTSflags.GranFlags.granSimStats_Sparks)
616 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
617 Prelude_Z91Z93_closure,SPARK_NODE(spark),0);
619 SPARK_NODE(spark) = Prelude_Z91Z93_closure;
620 curr_spark[proc][pool] = SPARK_NEXT(spark);
622 prunedSparks[proc][pool]++;
625 finishedQueues[pool] |= PE_NUMBER(proc);
627 } /* forall pool ... */
628 } /* forall proc ... */
629 } while (arr_and(finishedQueues,SPARK_POOLS) != allProcs);
632 # if defined(GRAN_CHECK) && defined(GRAN)
633 if ( RTSflags.GranFlags.debug & 0x1000) {
634 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
635 for(pool = 0; pool < SPARK_POOLS; ++pool) {
636 if ( (RTSflags.GcFlags.giveStats) && (prunedSparks[proc][pool]>0)) {
637 fprintf(RTSflags.GcFlags.statsFile,
638 "Discarding %lu sparks on proc %d (pool %d) for GC purposes\n",
639 prunedSparks[proc][pool],proc,pool);
644 if ( RTSflags.GcFlags.giveStats ) {
645 fprintf(RTSflags.GcFlags.statsFile,
646 "Spark statistics (after discarding) (total sparks = %d):",tot);
647 for (proc=0; proc<RTSflags.GranFlags.proc; proc++) {
649 fprintf(RTSflags.GcFlags.statsFile,"\n> ");
650 fprintf(RTSflags.GcFlags.statsFile,
651 "\tPE %d: %d ",proc,tot_sparks[proc]);
653 fprintf(RTSflags.GcFlags.statsFile,".\n");
659 # endif /* !DEPTH_FIRST_PRUNING */
664 PruneSparks(STG_NO_ARGS)
671 for (pool = 0; pool < SPARK_POOLS; pool++) {
672 new = PendingSparksBase[pool];
673 for (old = PendingSparksHd[pool]; old < PendingSparksTl[pool]; old++) {
674 if (SHOULD_SPARK(*old)) {
679 QP_Event0(threadId++, *old);
681 /* ToDo: Fix log entries for pruned sparks in GUM */
682 if(RTSflags.GranFlags.granSimStats_Sparks)
683 /* DumpSparkGranEvent(SP_PRUNED, threadId++);*/
684 DumpGranEvent(SP_PRUNED,Prelude_Z91Z93_closure);
685 ^^^^^^^^^^^ should be a TSO
689 PendingSparksHd[pool] = PendingSparksBase[pool];
690 PendingSparksTl[pool] = new;
698 This is the real GC wrapper for the threaded world. No context
699 switching or other nonsense... just set up StorageMgrInfo and perform
700 a garbage collection.
703 void handleTimerExpiry PROTO((rtsBool));
706 ReallyPerformThreadGC(reqsize, do_full_collection)
708 rtsBool do_full_collection;
714 I_ num_ptr_roots = 0; /* we bump this counter as we store roots; de-bump it
715 as we re-store them. */
718 /* Discard the saved stack and TSO space.
719 What's going on here: TSOs and StkOs are on the mutables
720 list (mutable things in the old generation). Here, we change
721 them to immutable, so that the scavenger (which chks all
722 mutable objects) can detect their immutability and remove
723 them from the list. Setting to MUTUPLE_VHS as the size is
724 essentially saying "No pointers in here" (i.e., empty).
726 Without this change of status, these
727 objects might not really die, probably with some horrible
728 disastrous consequence that we don't want to think about.
732 for(stack = AvailableStack; stack != Prelude_Z91Z93_closure; stack = next) {
733 next = STKO_LINK(stack);
734 FREEZE_MUT_HDR(stack, ImMutArrayOfPtrs_info);
735 MUTUPLE_CLOSURE_SIZE(stack) = MUTUPLE_VHS;
738 for(tso = AvailableTSO; tso != Prelude_Z91Z93_closure; tso = next) {
739 next = TSO_LINK(tso);
740 FREEZE_MUT_HDR(tso, ImMutArrayOfPtrs_info);
741 MUTUPLE_CLOSURE_SIZE(tso) = MUTUPLE_VHS;
744 AvailableStack = AvailableTSO = Prelude_Z91Z93_closure;
749 traverse_eventq_for_gc(); /* tidy up eventq for GC */
751 /* Store head and tail of runnable lists as roots for GC */
752 if (RTSflags.GranFlags.Light) {
753 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[0];
754 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[0];
756 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
757 # if defined(GRAN_CHECK) && defined(GRAN)
758 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
759 fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsHd %d (proc: %d) -- 0x%lx\n",
760 num_ptr_roots,proc,RunnableThreadsHd[proc]);
763 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[proc];
765 # if defined(GRAN_CHECK) && defined(GRAN)
766 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
767 fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsTl %d (proc: %d) -- 0x%lx\n",
768 num_ptr_roots,proc,RunnableThreadsTl[proc]);
770 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[proc];
772 } /* forall proc ... */
773 } /* RTSflags.GranFlags.Light */
775 /* This is now done as part of collectHeap (see ../storage dir) */
776 /* num_ptr_roots = SaveSparkRoots(num_ptr_roots); */
777 /* num_ptr_roots = SaveEventRoots(num_ptr_roots); */
781 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd;
782 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl;
783 StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsHd;
784 StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsTl;
788 # if defined(GRAN_CHECK) && defined(GRAN)
789 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
790 fprintf(RTSflags.GcFlags.statsFile,"Saving CurrentTSO %d -- 0x%lx\n",
791 num_ptr_roots,CurrentTSO);
794 StorageMgrInfo.roots[num_ptr_roots++] = CurrentTSO;
797 StorageMgrInfo.roots[num_ptr_roots++] = PendingFetches;
801 StorageMgrInfo.roots[num_ptr_roots++] = StorageMgrInfo.StablePointerTable;
804 StorageMgrInfo.rootno = num_ptr_roots;
808 /* For VTALRM timer ticks to be handled correctly, we need to record that
809 we are now about to enter GC, delaying the handling of timer expiry
810 for delayed threads till after the GC.
812 handleTimerExpiry(rtsFalse);
814 /* ====> The REAL THING happens here */
815 if (collectHeap(reqsize, &StorageMgrInfo, do_full_collection) != GC_SUCCESS) {
817 OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
819 # if defined(TICKY_TICKY)
820 if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo();
825 StorageMgrInfo.rootno = 0; /* reset */
827 /* root restoring ------------------------------- */
828 /* must do all the restoring exactly backwards to the storing! */
830 # if defined(GRAN_CHECK) && defined(GRAN)
831 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
832 fprintf(RTSflags.GcFlags.statsFile,
833 "Restoring CurrentTSO %d -- new: 0x%lx\n",
834 num_ptr_roots-1,StorageMgrInfo.roots[num_ptr_roots-1]);
838 StorageMgrInfo.StablePointerTable = StorageMgrInfo.roots[--num_ptr_roots];
842 PendingFetches = StorageMgrInfo.roots[--num_ptr_roots];
844 CurrentTSO = StorageMgrInfo.roots[--num_ptr_roots];
845 CurrentRegTable = TSO_INTERNAL_PTR(CurrentTSO);
849 WaitingThreadsTl = StorageMgrInfo.roots[--num_ptr_roots];
850 WaitingThreadsHd = StorageMgrInfo.roots[--num_ptr_roots];
852 RunnableThreadsTl = StorageMgrInfo.roots[--num_ptr_roots];
853 RunnableThreadsHd = StorageMgrInfo.roots[--num_ptr_roots];
857 /* num_ptr_roots = RestoreEventRoots(num_ptr_roots); */
858 /* num_ptr_roots = RestoreSparkRoots(num_ptr_roots); */
860 /* NB: PROC is unsigned datatype i.e. (PROC)-1 > 0 ! */
862 if (RTSflags.GranFlags.Light) {
863 RunnableThreadsTl[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
864 RunnableThreadsHd[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
866 for(proc = RTSflags.GranFlags.proc - 1;
867 (proc >= 0) && (proc < RTSflags.GranFlags.proc) ;
869 # if defined(GRAN_CHECK) && defined(GRAN)
870 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
871 fprintf(RTSflags.GcFlags.statsFile,
872 "Restoring RunnableThreadsTl %d (proc: %d) -- new: 0x%lx\n",
873 num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
875 RunnableThreadsTl[proc] = StorageMgrInfo.roots[--num_ptr_roots];
877 # if defined(GRAN_CHECK) && defined(GRAN)
878 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
879 fprintf(RTSflags.GcFlags.statsFile,
880 "Restoring RunnableThreadsHd %d (proc: %d) -- new: 0x%lx\n",
881 num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
883 RunnableThreadsHd[proc] = StorageMgrInfo.roots[--num_ptr_roots];
884 } /* forall proc ... */
885 } /* RTSflags.GranFlags.Light */
889 /* Semantics of GC ensures that a block of `reqsize' is now available */
892 /* Activate the handling of entries on the WaitingThreads queue again */
893 handleTimerExpiry(rtsTrue);
895 unblockUserSignals();
898 #endif /* CONCURRENT */
902 This routine rattles down the B stack, black-holing any
903 pending updates to avoid space leaks from them.
906 #if !defined(CONCURRENT)
910 BlackHoleUpdateStack(STG_NO_ARGS)
914 if (! RTSflags.GcFlags.lazyBlackHoling)
917 PtrToUpdateFrame = MAIN_SuB;
919 /* ToDo: There may be an optimisation here which stops at the first
920 BHed closure on the stack as all below must have been BHed */
922 while (SUBTRACT_B_STK(PtrToUpdateFrame, stackInfo.botB) > 0) {
924 UPD_BH(GRAB_UPDATEE(PtrToUpdateFrame), BH_UPD_info);
926 /* Move PtrToUpdateFrame down B Stack */
927 PtrToUpdateFrame = GRAB_SuB(PtrToUpdateFrame);
930 #endif /* !CONCURRENT */