1 \section[PerformGC]{Wrapper for heap overflow}
7 @PerformGC@ is the wrapper for calls to @collectHeap@ in the
8 storage manager. It performs the following actions:
10 \item Save live registers.
11 \item If black holing is required before garbage collection we must
12 black hole the update frames on the B stack and any live registers
13 pointing at updatable closures --- possibly R1, if live and in update? --JSM
14 \item Call the garbage collector.
15 \item Restore registers.
17 They either succeed or crash-and-burn; hence, they don't return
20 @PerformGC@ saves the fixed STG registers. and calls the garbage
21 collector. It also black holes the B Stack if this is required at
22 garbage collection time.
24 There's also a function @PerformGCIO@ which does all the above and is
25 used to force a full collection.
28 #if defined(CONCURRENT)
29 EXTFUN(EnterNodeCode); /* For reentering node after GC */
30 EXTFUN(CheckHeapCode); /* For returning to thread after a context switch */
31 extern P_ AvailableStack;
33 EXTDATA_RO(FetchMe_info);
36 static void BlackHoleUpdateStack(STG_NO_ARGS);
37 #endif /* CONCURRENT */
39 extern smInfo StorageMgrInfo;
40 extern void PrintTickyInfo(STG_NO_ARGS);
42 /* the real work is done by this function --- see wrappers at end */
45 RealPerformGC(liveness, reqsize, always_reenter_node, do_full_collection)
48 W_ always_reenter_node;
49 rtsBool do_full_collection;
51 I_ num_ptr_roots = 0; /* we bump this counter as we
52 store roots; de-bump it
53 as we re-store them. */
54 #if defined(PROFILING)
58 /* stop the profiling timer --------------------- */
59 #if defined(PROFILING)
60 /* STOP_TIME_PROFILER; */
65 SAVE_Liveness = liveness;
68 fprintf(stderr,"RealGC:liveness=0x%lx,reqsize=0x%lx,reenter=%lx,do_full=%d,context_switch=%ld\n",
69 liveness, reqsize,always_reenter_node,do_full_collection,context_switch);
73 Even on a uniprocessor, we may have to reenter node after a
74 context switch. Though it can't turn into a FetchMe, its shape
75 may have changed (e.g. from a thunk to a data object).
77 if (always_reenter_node) {
78 /* Avoid infinite loops at the same heap check */
79 if (SAVE_Hp <= SAVE_HpLim && TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) {
80 TSO_SWITCH(CurrentTSO) = NULL;
83 /* Set up to re-enter Node, so as to be sure it's really there. */
84 ASSERT(liveness & LIVENESS_R1);
85 TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO);
86 TSO_PC2(CurrentTSO) = EnterNodeCode;
91 if (context_switch && !do_full_collection
92 # if defined(PROFILING)
96 /* We're in a GC callWrapper, so the thread state is safe */
97 TSO_ARG1(CurrentTSO) = reqsize;
98 TSO_PC1(CurrentTSO) = CheckHeapCode;
100 if (RTSflags.ParFlags.granSimStats) {
101 TSO_EXECTIME(CurrentTSO) += CURRENT_TIME - TSO_BLOCKEDAT(CurrentTSO);
105 ReSchedule(SAME_THREAD);
111 # if defined(PROFILING)
115 SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */
118 ReallyPerformThreadGC(reqsize, do_full_collection);
120 #else /* !CONCURRENT */
122 # if defined(PROFILING)
124 SET_CCC_RTS(CC_GC,0,1); /* without the sub_scc_count++ */
127 /* root saving ---------------------------------- */
129 # define __ENROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \
131 StorageMgrInfo.roots[num_ptr_roots] = CAT2(MAIN_R,n).p; \
135 __ENROOT_PTR_REG(IS_LIVE_R1(liveness),1);
136 __ENROOT_PTR_REG(IS_LIVE_R2(liveness),2);
137 __ENROOT_PTR_REG(IS_LIVE_R3(liveness),3);
138 __ENROOT_PTR_REG(IS_LIVE_R4(liveness),4);
139 __ENROOT_PTR_REG(IS_LIVE_R5(liveness),5);
140 __ENROOT_PTR_REG(IS_LIVE_R6(liveness),6);
141 __ENROOT_PTR_REG(IS_LIVE_R7(liveness),7);
142 __ENROOT_PTR_REG(IS_LIVE_R8(liveness),8);
145 * Before we garbage collect we may have to squeeze update frames and/or
146 * black hole the update stack
148 if (! RTSflags.GcFlags.squeezeUpdFrames) {
149 BlackHoleUpdateStack();
151 } else { /* Squeeze and/or black hole update frames */
154 displacement = SqueezeUpdateFrames(stackInfo.botB + BREL(1), MAIN_SpB, MAIN_SuB);
156 MAIN_SuB += BREL(displacement);
157 MAIN_SpB += BREL(displacement);
158 /* fprintf(stderr, "B size %d, squeezed out %d\n", MAIN_SpB - stackInfo.botB,
162 /* Add the stable pointer table to the roots list */
164 StorageMgrInfo.roots[num_ptr_roots++] = StorageMgrInfo.StablePointerTable;
167 ASSERT(num_ptr_roots <= SM_MAXROOTS);
168 StorageMgrInfo.rootno = num_ptr_roots;
171 /* Move (SAVE_)Hp back to where it was */
172 /* (heap is known to grow upwards) */
173 /* we *do* have to do this, so reported stats will be right! */
175 /* the main business ---------------------------- */
182 /* Restore hpLim to its "correct" setting */
183 StorageMgrInfo.hplim += StorageMgrInfo.hardHpOverflowSize;
185 GC_result = collectHeap(reqsize, &StorageMgrInfo, do_full_collection);
187 if ( GC_result == GC_HARD_LIMIT_EXCEEDED ) {
188 OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
192 } else if ( GC_result == GC_SOFT_LIMIT_EXCEEDED ) {
193 /* Allow ourselves to use emergency space */
194 /* Set hplim so that we'll GC when we hit the soft limit */
195 StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize;
196 raiseError( softHeapOverflowHandler );
198 } else if ( GC_result == GC_SUCCESS ) {
199 /* Set hplim so that we'll GC when we hit the soft limit */
200 StorageMgrInfo.hplim -= StorageMgrInfo.hardHpOverflowSize;
202 } else { /* This should not happen */
203 fprintf(stderr, "Panic: garbage collector returned %d please report it as a bug to glasgow-haskell-bugs@dcs.gla.ac.uk\n", GC_result );
205 # if defined(TICKY_TICKY)
206 if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo();
212 StorageMgrInfo.rootno = 0; /* reset */
215 /* Semantics of GC ensures that a block of
216 `reqsize' is now available (and allocated) [NB: sequential only] */
218 /* root restoring ------------------------------- */
219 /* must do all the restoring exactly backwards to the storing! */
221 /* remove the stable pointer table first */
223 StorageMgrInfo.StablePointerTable = StorageMgrInfo.roots[--num_ptr_roots];
226 /* now the general regs, in *backwards* order */
228 # define __DEROOT_PTR_REG(cond,n) /* n == 1 <=> R1 */ \
231 CAT2(MAIN_R,n).p = StorageMgrInfo.roots[num_ptr_roots]; \
234 __DEROOT_PTR_REG(IS_LIVE_R8(liveness),8);
235 __DEROOT_PTR_REG(IS_LIVE_R7(liveness),7);
236 __DEROOT_PTR_REG(IS_LIVE_R6(liveness),6);
237 __DEROOT_PTR_REG(IS_LIVE_R5(liveness),5);
238 __DEROOT_PTR_REG(IS_LIVE_R4(liveness),4);
239 __DEROOT_PTR_REG(IS_LIVE_R3(liveness),3);
240 __DEROOT_PTR_REG(IS_LIVE_R2(liveness),2);
241 __DEROOT_PTR_REG(IS_LIVE_R1(liveness),1);
243 ASSERT(num_ptr_roots == 0); /* we have put it all back */
245 unblockUserSignals();
247 #endif /* !CONCURRENT */
249 #if defined(PROFILING)
252 RESTART_TIME_PROFILER;
257 This is a wrapper used for all standard, non-threaded, non-parallel GC
260 #ifdef HEAP_CHK_HYGIENE
261 I_ doHygieneCheck = 0;
268 W_ liveness = HEAP_OVERFLOW_LIVENESS(args);
269 W_ reqsize = HEAP_OVERFLOW_REQSIZE(args);
270 W_ always_reenter_node = HEAP_OVERFLOW_REENTER(args);
272 #ifdef HEAP_CHK_HYGIENE
273 if (doHygieneCheck) {
278 RealPerformGC(liveness, reqsize, always_reenter_node, rtsFalse);
281 #if defined(CONCURRENT) && defined(GRAN)
282 /* This is directly called from the macro GRAN_RESCHEDULE out of the */
283 /* threaded world. -- HWL */
286 PerformReschedule(liveness, always_reenter_node)
288 rtsBool always_reenter_node;
291 rtsBool need_to_reschedule;
293 #if 0 && defined(DEBUG)
294 fprintf(stderr,"PerfReS:liveness=0x%lx,reenter=%lx,,context_switch=%ld\n",
295 liveness, always_reenter_node, context_switch);
298 /* Reset the global NeedToReSchedule --
299 this is used only to communicate the fact that we should schedule
300 a new thread rather than the existing one following a fetch.
301 if (RTSflags.GranFlags.Light) {
305 ASSERT(!RTSflags.GranFlags.Light);
308 need_to_reschedule = NeedToReSchedule;
309 NeedToReSchedule = rtsFalse;
311 SAVE_Liveness = liveness;
313 if (always_reenter_node) {
314 /* Avoid infinite loops at the same context switch */
315 if (/* (TSO_SWITCH(CurrentTSO) == TSO_PC2(CurrentTSO)) || */
316 (!need_to_reschedule &&
317 CurrentTime[CurrentProc]<EndOfTimeSlice &&
318 (TimeOfNextEvent==0 || TimeOfNextEvent>=CurrentTime[CurrentProc])
321 /* TSO_SWITCH(CurrentTSO) = NULL; */
325 /* Set up to re-enter Node, so as to be sure it's really there. */
326 ASSERT(liveness & LIVENESS_R1);
327 /* TSO_SWITCH(CurrentTSO) = TSO_PC2(CurrentTSO); */
328 TSO_PC2(CurrentTSO) = (void *) EnterNodeCode;
331 /* We're in a GC callWrapper, so the thread state is safe */
332 TSO_ARG1(CurrentTSO) = 0;
333 TSO_PC1(CurrentTSO) = EnterNodeCode;
334 ReSchedule( (need_to_reschedule &&
335 !RTSflags.GranFlags.DoReScheduleOnFetch &&
336 !RTSflags.GranFlags.Light) ?
337 CHANGE_THREAD : SAME_THREAD );
338 /* In a block-on-fetch setup we must not use SAME_THREAD since that */
339 /* would continue the fetching TSO, which is still at the head of the */
341 /* GrAnSim-Light always uses SAME_THREAD */
346 /* this is a wrapper used when we want to do a full GC.
348 One reason might be that we're about to enter a time-critical piece
349 of code and want to reduce the risk of a GC during the run. The
350 motivating reason is that we want to force the GC to report any
351 dead Malloc Pointers to us.
353 Note: this should only be called using _ccall_GC_ which saves all
354 registers in the usual place (ie the global save area) before the
355 call and restores them afterwards.
357 ToDo: put in a runtime check that _ccall_GC_ is in action. */
360 StgPerformGarbageCollection()
362 # if ! defined(__STG_GCC_REGS__)
363 SaveAllStgRegs(); /* unregisterised case */
366 RealPerformGC(0,0,0,rtsTrue);
368 # if ! defined(__STG_GCC_REGS__)
369 RestoreAllStgRegs(); /* unregisterised case */
374 #if defined(CONCURRENT)
378 # if defined(DEPTH_FIRST_PRUNING)
380 /* Jim's spark pools are very similar to our processors, except that
381 he uses a hard-wired constant. This would be a mistake for us,
382 since we won't always need this many pools.
385 PruneSparks(STG_NO_ARGS)
387 sparkq spark, prev, next;
388 I_ proc, pool, prunedSparks;
389 I_ tot_sparks[MAX_PROC], total_sparks = 0, tot = 0;;
391 # if defined(GRAN_CHECK) && defined(GRAN)
392 if ( RTSflags.GranFlags.debug & 0x40 )
393 fprintf(stderr,"Pruning (depth-first) spark roots for GC ...\n");
396 for(proc=0; proc<RTSflags.GranFlags.proc; ++proc) {
397 tot_sparks[proc] = 0;
400 for (pool = 0; pool < SPARK_POOLS; pool++) {
403 for(spark = PendingSparksHd[proc][pool];
406 next = SPARK_NEXT(spark);
408 if(++total_sparks <= MAX_SPARKS || MAX_SPARKS == 0)
410 if ( RTSflags.GcFlags.giveStats )
411 if (i==ADVISORY_POOL) {
416 /* HACK! This clause should actually never happen HWL */
417 if ( (SPARK_NODE(spark) == NULL) ||
418 (SPARK_NODE(spark) == PrelBase_Z91Z93_closure) ) {
419 # if defined(GRAN_CHECK) && defined(GRAN)
420 if ( RTSflags.GcFlags.giveStats &&
421 (RTSflags.GranFlags.debug & 0x40) )
422 fprintf(RTSflags.GcFlags.statsFile,"PruneSparks: Warning: spark @ 0x%lx points to NULL or PrelBase_Z91Z93_closure\n", spark);
426 else if (SHOULD_SPARK(SPARK_NODE(spark))) {
429 PendingSparksHd[proc][pool] = spark;
431 SPARK_NEXT(prev) = spark;
432 SPARK_PREV(spark) = prev;
438 /* By now we know that the spark has to be pruned */
439 if(RTSflags.GranFlags.granSimStats_Sparks)
440 /* DumpRawGranEvent(CURRENT_PROC,SP_PRUNED,(W_) spark); */
441 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
442 PrelBase_Z91Z93_closure,SPARK_NODE(spark),0);
446 } /* forall spark ... */
448 PendingSparksHd[proc][pool] = NULL;
450 SPARK_NEXT(prev) = NULL;
451 PendingSparksTl[proc][pool] = prev;
452 if ( (RTSflags.GcFlags.giveStats) &&
453 (RTSflags.GranFlags.debug & 0x1000) &&
455 fprintf(RTSflags.GcFlags.statsFile,"Pruning and disposing %lu sparks (_NS flag!) on proc %d (pool %d) in PruneSparks\n",
456 prunedSparks,proc,pool);
457 } /* forall pool ... */
458 } /* forall proc ... */
459 # if defined(GRAN_CHECK) && defined(GRAN)
460 if ( RTSflags.GcFlags.giveStats ) {
461 fprintf(RTSflags.GcFlags.statsFile,
462 "Spark statistics (after pruning) (total sparks: %d; before pruning: %d):",
464 for (proc=0; proc<RTSflags.GranFlags.proc; proc++) {
465 if (proc % 4 == 0) fprintf(RTSflags.GcFlags.statsFile,"\n> ");
466 fprintf(RTSflags.GcFlags.statsFile,"\tPE %d: %d ",proc,tot_sparks[proc]);
468 fprintf(RTSflags.GcFlags.statsFile,".\n");
473 # else /* !DEPTH_FIRST_PRUNING */
475 /* Auxiliary functions that are used in the GranSim version of PruneSparks */
478 arr_and(W_ arr[], I_ max)
483 /* Doesn't work with max==0; but then, many things don't work in this */
485 for (i=1, res = arr[0]; i<max; i++)
492 arr_max(W_ arr[], I_ max)
497 /* Doesn't work with max==0; but then, many things don't work in this */
499 for (i=1, res = arr[0]; i<max; i++)
500 res = (arr[i]>res) ? arr[i] : res;
505 /* In case of an excessive number of sparks, depth first pruning is a Bad */
506 /* Idea as we might end up with all remaining sparks on processor 0 and */
507 /* none on the other processors. So, this version uses breadth first */
508 /* pruning. -- HWL */
511 PruneSparks(STG_NO_ARGS)
514 prev_spark[MAX_PROC][SPARK_POOLS],
515 curr_spark[MAX_PROC][SPARK_POOLS];
518 endQueues[SPARK_POOLS], finishedQueues[SPARK_POOLS];
519 I_ pool, total_sparks=0,
520 prunedSparks[MAX_PROC][SPARK_POOLS];
521 I_ tot_sparks[MAX_PROC], tot = 0;;
523 # if defined(GRAN_CHECK) && defined(GRAN)
524 if ( RTSflags.GranFlags.debug & 0x40 )
525 fprintf(stderr,"Pruning (breadth-first) sparks for GC ...\n");
529 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
530 allProcs |= PE_NUMBER(proc);
531 tot_sparks[proc] = 0;
532 for(pool = 0; pool < SPARK_POOLS; ++pool) {
533 prev_spark[proc][pool] = NULL;
534 curr_spark[proc][pool] = PendingSparksHd[proc][pool];
535 prunedSparks[proc][pool] = 0;
537 finishedQueues[pool] = 0;
541 /* Breadth first pruning */
543 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
544 for(pool = 0; pool < SPARK_POOLS; ++pool) {
545 spark = curr_spark[proc][pool];
546 prev = prev_spark[proc][pool];
548 if (spark == NULL) { /* at the end of the queue already? */
549 if (! (endQueues[pool] & PE_NUMBER(proc)) ) {
550 endQueues[pool] |= PE_NUMBER(proc);
552 PendingSparksHd[proc][pool] = NULL;
554 SPARK_NEXT(prev) = NULL;
555 PendingSparksTl[proc][pool] = prev;
560 /* HACK! This clause should actually never happen HWL */
561 if ( (SPARK_NODE(spark) == NULL) ||
562 (SPARK_NODE(spark) == PrelBase_Z91Z93_closure) ) {
563 # if defined(GRAN_CHECK) && defined(GRAN)
564 if ( RTSflags.GcFlags.giveStats &&
565 (RTSflags.GranFlags.debug & 0x40) )
566 fprintf(RTSflags.GcFlags.statsFile,"PruneSparks: Warning: spark @ 0x%lx points to NULL or PrelBase_Z91Z93_closure\n", spark);
569 } else if (SHOULD_SPARK(SPARK_NODE(spark))) {
570 if(++total_sparks <= MAX_SPARKS || MAX_SPARKS == 0) {
571 if ( RTSflags.GcFlags.giveStats )
572 if (pool==ADVISORY_POOL) {
578 if (prev_spark[proc][pool] == NULL)
579 PendingSparksHd[proc][pool] = spark;
581 SPARK_NEXT(prev_spark[proc][pool]) = spark;
582 SPARK_PREV(spark) = prev_spark[proc][pool];
583 prev_spark[proc][pool] = spark;
584 curr_spark[proc][pool] = SPARK_NEXT(spark);
586 } else { /* total_sparks > MAX_SPARKS */
587 /* Sparkq will end before the current spark */
589 PendingSparksHd[proc][pool] = NULL;
591 SPARK_NEXT(prev) = NULL;
592 PendingSparksTl[proc][pool] = prev;
593 endQueues[pool] |= PE_NUMBER(proc);
598 /* By now we know that the spark has to be pruned */
599 if(RTSflags.GranFlags.granSimStats_Sparks)
600 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
601 PrelBase_Z91Z93_closure,SPARK_NODE(spark),0);
603 SPARK_NODE(spark) = PrelBase_Z91Z93_closure;
604 curr_spark[proc][pool] = SPARK_NEXT(spark);
605 prunedSparks[proc][pool]++;
607 } /* forall pool ... */
608 } /* forall proc ... */
609 } while (arr_and(endQueues,SPARK_POOLS) != allProcs);
611 /* Prune all sparks on all processor starting with */
612 /* curr_spark[proc][pool]. */
615 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
616 for(pool = 0; pool < SPARK_POOLS; ++pool) {
617 spark = curr_spark[proc][pool];
619 if ( spark != NULL ) {
620 if(RTSflags.GranFlags.granSimStats_Sparks)
621 DumpRawGranEvent(CurrentProc,0,SP_PRUNED,
622 PrelBase_Z91Z93_closure,SPARK_NODE(spark),0);
624 SPARK_NODE(spark) = PrelBase_Z91Z93_closure;
625 curr_spark[proc][pool] = SPARK_NEXT(spark);
627 prunedSparks[proc][pool]++;
630 finishedQueues[pool] |= PE_NUMBER(proc);
632 } /* forall pool ... */
633 } /* forall proc ... */
634 } while (arr_and(finishedQueues,SPARK_POOLS) != allProcs);
637 # if defined(GRAN_CHECK) && defined(GRAN)
638 if ( RTSflags.GranFlags.debug & 0x1000) {
639 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
640 for(pool = 0; pool < SPARK_POOLS; ++pool) {
641 if ( (RTSflags.GcFlags.giveStats) && (prunedSparks[proc][pool]>0)) {
642 fprintf(RTSflags.GcFlags.statsFile,
643 "Discarding %lu sparks on proc %d (pool %d) for GC purposes\n",
644 prunedSparks[proc][pool],proc,pool);
649 if ( RTSflags.GcFlags.giveStats ) {
650 fprintf(RTSflags.GcFlags.statsFile,
651 "Spark statistics (after discarding) (total sparks = %d):",tot);
652 for (proc=0; proc<RTSflags.GranFlags.proc; proc++) {
654 fprintf(RTSflags.GcFlags.statsFile,"\n> ");
655 fprintf(RTSflags.GcFlags.statsFile,
656 "\tPE %d: %d ",proc,tot_sparks[proc]);
658 fprintf(RTSflags.GcFlags.statsFile,".\n");
664 # endif /* !DEPTH_FIRST_PRUNING */
669 PruneSparks(STG_NO_ARGS)
676 for (pool = 0; pool < SPARK_POOLS; pool++) {
677 new = PendingSparksBase[pool];
678 for (old = PendingSparksHd[pool]; old < PendingSparksTl[pool]; old++) {
679 if (SHOULD_SPARK(*old)) {
684 QP_Event0(threadId++, *old);
686 /* ToDo: Fix log entries for pruned sparks in GUM */
687 if(RTSflags.GranFlags.granSimStats_Sparks)
688 /* DumpSparkGranEvent(SP_PRUNED, threadId++);*/
689 DumpGranEvent(SP_PRUNED,PrelBase_Z91Z93_closure);
690 ^^^^^^^^^^^ should be a TSO
694 PendingSparksHd[pool] = PendingSparksBase[pool];
695 PendingSparksTl[pool] = new;
703 This is the real GC wrapper for the threaded world. No context
704 switching or other nonsense... just set up StorageMgrInfo and perform
705 a garbage collection.
708 void handleTimerExpiry PROTO((rtsBool));
711 ReallyPerformThreadGC(reqsize, do_full_collection)
713 rtsBool do_full_collection;
719 I_ num_ptr_roots = 0; /* we bump this counter as we store roots; de-bump it
720 as we re-store them. */
723 /* Discard the saved stack and TSO space.
724 What's going on here: TSOs and StkOs are on the mutables
725 list (mutable things in the old generation). Here, we change
726 them to immutable, so that the scavenger (which chks all
727 mutable objects) can detect their immutability and remove
728 them from the list. Setting to MUTUPLE_VHS as the size is
729 essentially saying "No pointers in here" (i.e., empty).
731 Without this change of status, these
732 objects might not really die, probably with some horrible
733 disastrous consequence that we don't want to think about.
737 for(stack = AvailableStack; stack != PrelBase_Z91Z93_closure; stack = next) {
738 next = STKO_LINK(stack);
739 FREEZE_MUT_HDR(stack, ImMutArrayOfPtrs_info);
740 MUTUPLE_CLOSURE_SIZE(stack) = MUTUPLE_VHS;
743 for(tso = AvailableTSO; tso != PrelBase_Z91Z93_closure; tso = next) {
744 next = TSO_LINK(tso);
745 FREEZE_MUT_HDR(tso, ImMutArrayOfPtrs_info);
746 MUTUPLE_CLOSURE_SIZE(tso) = MUTUPLE_VHS;
749 AvailableStack = AvailableTSO = PrelBase_Z91Z93_closure;
754 traverse_eventq_for_gc(); /* tidy up eventq for GC */
756 /* Store head and tail of runnable lists as roots for GC */
757 if (RTSflags.GranFlags.Light) {
758 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[0];
759 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[0];
761 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
762 # if defined(GRAN_CHECK) && defined(GRAN)
763 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
764 fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsHd %d (proc: %d) -- 0x%lx\n",
765 num_ptr_roots,proc,RunnableThreadsHd[proc]);
768 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[proc];
770 # if defined(GRAN_CHECK) && defined(GRAN)
771 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
772 fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsTl %d (proc: %d) -- 0x%lx\n",
773 num_ptr_roots,proc,RunnableThreadsTl[proc]);
775 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[proc];
777 } /* forall proc ... */
778 } /* RTSflags.GranFlags.Light */
780 /* This is now done as part of collectHeap (see ../storage dir) */
781 /* num_ptr_roots = SaveSparkRoots(num_ptr_roots); */
782 /* num_ptr_roots = SaveEventRoots(num_ptr_roots); */
786 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd;
787 StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl;
788 StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsHd;
789 StorageMgrInfo.roots[num_ptr_roots++] = WaitingThreadsTl;
793 # if defined(GRAN_CHECK) && defined(GRAN)
794 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
795 fprintf(RTSflags.GcFlags.statsFile,"Saving CurrentTSO %d -- 0x%lx\n",
796 num_ptr_roots,CurrentTSO);
799 StorageMgrInfo.roots[num_ptr_roots++] = CurrentTSO;
802 StorageMgrInfo.roots[num_ptr_roots++] = PendingFetches;
806 StorageMgrInfo.roots[num_ptr_roots++] = StorageMgrInfo.StablePointerTable;
809 StorageMgrInfo.rootno = num_ptr_roots;
813 /* For VTALRM timer ticks to be handled correctly, we need to record that
814 we are now about to enter GC, delaying the handling of timer expiry
815 for delayed threads till after the GC.
817 handleTimerExpiry(rtsFalse);
819 /* ====> The REAL THING happens here */
820 if (collectHeap(reqsize, &StorageMgrInfo, do_full_collection) != GC_SUCCESS) {
822 OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
824 # if defined(TICKY_TICKY)
825 if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo();
830 StorageMgrInfo.rootno = 0; /* reset */
832 /* root restoring ------------------------------- */
833 /* must do all the restoring exactly backwards to the storing! */
835 # if defined(GRAN_CHECK) && defined(GRAN)
836 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
837 fprintf(RTSflags.GcFlags.statsFile,
838 "Restoring CurrentTSO %d -- new: 0x%lx\n",
839 num_ptr_roots-1,StorageMgrInfo.roots[num_ptr_roots-1]);
843 StorageMgrInfo.StablePointerTable = StorageMgrInfo.roots[--num_ptr_roots];
847 PendingFetches = StorageMgrInfo.roots[--num_ptr_roots];
849 CurrentTSO = StorageMgrInfo.roots[--num_ptr_roots];
850 CurrentRegTable = TSO_INTERNAL_PTR(CurrentTSO);
854 WaitingThreadsTl = StorageMgrInfo.roots[--num_ptr_roots];
855 WaitingThreadsHd = StorageMgrInfo.roots[--num_ptr_roots];
857 RunnableThreadsTl = StorageMgrInfo.roots[--num_ptr_roots];
858 RunnableThreadsHd = StorageMgrInfo.roots[--num_ptr_roots];
862 /* num_ptr_roots = RestoreEventRoots(num_ptr_roots); */
863 /* num_ptr_roots = RestoreSparkRoots(num_ptr_roots); */
865 /* NB: PROC is unsigned datatype i.e. (PROC)-1 > 0 ! */
867 if (RTSflags.GranFlags.Light) {
868 RunnableThreadsTl[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
869 RunnableThreadsHd[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
871 for(proc = RTSflags.GranFlags.proc - 1;
872 (proc >= 0) && (proc < RTSflags.GranFlags.proc) ;
874 # if defined(GRAN_CHECK) && defined(GRAN)
875 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
876 fprintf(RTSflags.GcFlags.statsFile,
877 "Restoring RunnableThreadsTl %d (proc: %d) -- new: 0x%lx\n",
878 num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
880 RunnableThreadsTl[proc] = StorageMgrInfo.roots[--num_ptr_roots];
882 # if defined(GRAN_CHECK) && defined(GRAN)
883 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
884 fprintf(RTSflags.GcFlags.statsFile,
885 "Restoring RunnableThreadsHd %d (proc: %d) -- new: 0x%lx\n",
886 num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
888 RunnableThreadsHd[proc] = StorageMgrInfo.roots[--num_ptr_roots];
889 } /* forall proc ... */
890 } /* RTSflags.GranFlags.Light */
894 /* Semantics of GC ensures that a block of `reqsize' is now available */
897 /* Activate the handling of entries on the WaitingThreads queue again */
898 handleTimerExpiry(rtsTrue);
900 unblockUserSignals();
903 #endif /* CONCURRENT */
907 This routine rattles down the B stack, black-holing any
908 pending updates to avoid space leaks from them.
911 #if !defined(CONCURRENT)
915 BlackHoleUpdateStack(STG_NO_ARGS)
919 if (! RTSflags.GcFlags.lazyBlackHoling)
922 PtrToUpdateFrame = MAIN_SuB;
924 /* ToDo: There may be an optimisation here which stops at the first
925 BHed closure on the stack as all below must have been BHed */
927 while (SUBTRACT_B_STK(PtrToUpdateFrame, stackInfo.botB) > 0) {
929 UPD_BH(GRAB_UPDATEE(PtrToUpdateFrame), BH_UPD_info);
931 /* Move PtrToUpdateFrame down B Stack */
932 PtrToUpdateFrame = GRAB_SuB(PtrToUpdateFrame);
935 #endif /* !CONCURRENT */