1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavengeTSO (StgTSO *tso)
54 if (tso->what_next == ThreadRelocated) {
55 // the only way this can happen is if the old TSO was on the
56 // mutable list. We might have other links to this defunct
57 // TSO, so we must update its link field.
58 evacuate((StgClosure**)&tso->_link);
62 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
64 // update the pointer from the Task.
65 if (tso->bound != NULL) {
66 tso->bound->tso = tso;
69 saved_eager = gct->eager_promotion;
70 gct->eager_promotion = rtsFalse;
73 evacuate((StgClosure **)&tso->blocked_exceptions);
74 evacuate((StgClosure **)&tso->bq);
76 // scavange current transaction record
77 evacuate((StgClosure **)&tso->trec);
79 // scavenge this thread's stack
80 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
82 tso->dirty = gct->failed_to_evac;
84 evacuate((StgClosure **)&tso->_link);
85 if ( tso->why_blocked == BlockedOnMVar
86 || tso->why_blocked == BlockedOnBlackHole
87 || tso->why_blocked == BlockedOnMsgThrowTo
88 || tso->why_blocked == NotBlocked
90 evacuate(&tso->block_info.closure);
93 // in the THREADED_RTS, block_info.closure must always point to a
94 // valid closure, because we assume this in throwTo(). In the
95 // non-threaded RTS it might be a FD (for
96 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
98 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
102 if (tso->dirty == 0 && gct->failed_to_evac) {
103 tso->flags |= TSO_LINK_DIRTY;
105 tso->flags &= ~TSO_LINK_DIRTY;
108 gct->eager_promotion = saved_eager;
111 /* -----------------------------------------------------------------------------
112 Mutable arrays of pointers
113 -------------------------------------------------------------------------- */
115 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
121 any_failed = rtsFalse;
122 p = (StgPtr)&a->payload[0];
123 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
125 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
127 evacuate((StgClosure**)p);
129 if (gct->failed_to_evac) {
130 any_failed = rtsTrue;
131 *mutArrPtrsCard(a,m) = 1;
132 gct->failed_to_evac = rtsFalse;
134 *mutArrPtrsCard(a,m) = 0;
138 q = (StgPtr)&a->payload[a->ptrs];
141 evacuate((StgClosure**)p);
143 if (gct->failed_to_evac) {
144 any_failed = rtsTrue;
145 *mutArrPtrsCard(a,m) = 1;
146 gct->failed_to_evac = rtsFalse;
148 *mutArrPtrsCard(a,m) = 0;
152 gct->failed_to_evac = any_failed;
153 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
156 // scavenge only the marked areas of a MUT_ARR_PTRS
157 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
163 any_failed = rtsFalse;
164 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
166 if (*mutArrPtrsCard(a,m) != 0) {
167 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
168 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
169 (StgPtr)&a->payload[a->ptrs]);
171 evacuate((StgClosure**)p);
173 if (gct->failed_to_evac) {
174 any_failed = rtsTrue;
175 gct->failed_to_evac = rtsFalse;
177 *mutArrPtrsCard(a,m) = 0;
182 gct->failed_to_evac = any_failed;
183 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
186 /* -----------------------------------------------------------------------------
187 Blocks of function args occur on the stack (at the top) and
189 -------------------------------------------------------------------------- */
192 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
199 switch (fun_info->f.fun_type) {
201 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
202 size = BITMAP_SIZE(fun_info->f.b.bitmap);
205 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
206 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
210 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
211 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
214 if ((bitmap & 1) == 0) {
215 evacuate((StgClosure **)p);
218 bitmap = bitmap >> 1;
226 STATIC_INLINE GNUC_ATTR_HOT StgPtr
227 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
231 StgFunInfoTable *fun_info;
233 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
234 ASSERT(fun_info->i.type != PAP);
237 switch (fun_info->f.fun_type) {
239 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
242 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
246 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
250 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
253 if ((bitmap & 1) == 0) {
254 evacuate((StgClosure **)p);
257 bitmap = bitmap >> 1;
265 STATIC_INLINE GNUC_ATTR_HOT StgPtr
266 scavenge_PAP (StgPAP *pap)
269 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
273 scavenge_AP (StgAP *ap)
276 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
279 /* -----------------------------------------------------------------------------
281 -------------------------------------------------------------------------- */
283 /* Similar to scavenge_large_bitmap(), but we don't write back the
284 * pointers we get back from evacuate().
287 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
294 bitmap = large_srt->l.bitmap[b];
295 size = (nat)large_srt->l.size;
296 p = (StgClosure **)large_srt->srt;
297 for (i = 0; i < size; ) {
298 if ((bitmap & 1) != 0) {
303 if (i % BITS_IN(W_) == 0) {
305 bitmap = large_srt->l.bitmap[b];
307 bitmap = bitmap >> 1;
312 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
313 * srt field in the info table. That's ok, because we'll
314 * never dereference it.
316 STATIC_INLINE GNUC_ATTR_HOT void
317 scavenge_srt (StgClosure **srt, nat srt_bitmap)
325 if (bitmap == (StgHalfWord)(-1)) {
326 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
330 while (bitmap != 0) {
331 if ((bitmap & 1) != 0) {
332 #if defined(__PIC__) && defined(mingw32_HOST_OS)
333 // Special-case to handle references to closures hiding out in DLLs, since
334 // double indirections required to get at those. The code generator knows
335 // which is which when generating the SRT, so it stores the (indirect)
336 // reference to the DLL closure in the table by first adding one to it.
337 // We check for this here, and undo the addition before evacuating it.
339 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
340 // closure that's fixed at link-time, and no extra magic is required.
341 if ( (unsigned long)(*srt) & 0x1 ) {
342 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
351 bitmap = bitmap >> 1;
356 STATIC_INLINE GNUC_ATTR_HOT void
357 scavenge_thunk_srt(const StgInfoTable *info)
359 StgThunkInfoTable *thunk_info;
361 if (!major_gc) return;
363 thunk_info = itbl_to_thunk_itbl(info);
364 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
367 STATIC_INLINE GNUC_ATTR_HOT void
368 scavenge_fun_srt(const StgInfoTable *info)
370 StgFunInfoTable *fun_info;
372 if (!major_gc) return;
374 fun_info = itbl_to_fun_itbl(info);
375 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
378 /* -----------------------------------------------------------------------------
379 Scavenge a block from the given scan pointer up to bd->free.
381 evac_gen is set by the caller to be either zero (for a step in a
382 generation < N) or G where G is the generation of the step being
385 We sometimes temporarily change evac_gen back to zero if we're
386 scavenging a mutable object where eager promotion isn't such a good
388 -------------------------------------------------------------------------- */
390 static GNUC_ATTR_HOT void
391 scavenge_block (bdescr *bd)
395 rtsBool saved_eager_promotion;
398 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
399 bd->start, bd->gen_no, bd->u.scan);
402 gct->evac_gen = bd->gen;
403 saved_eager_promotion = gct->eager_promotion;
404 gct->failed_to_evac = rtsFalse;
406 ws = &gct->gens[bd->gen->no];
410 // we might be evacuating into the very object that we're
411 // scavenging, so we have to check the real bd->free pointer each
412 // time around the loop.
413 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
415 ASSERT(bd->link == NULL);
416 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
417 info = get_itbl((StgClosure *)p);
419 ASSERT(gct->thunk_selector_depth == 0);
422 switch (info->type) {
427 StgMVar *mvar = ((StgMVar *)p);
428 gct->eager_promotion = rtsFalse;
429 evacuate((StgClosure **)&mvar->head);
430 evacuate((StgClosure **)&mvar->tail);
431 evacuate((StgClosure **)&mvar->value);
432 gct->eager_promotion = saved_eager_promotion;
434 if (gct->failed_to_evac) {
435 mvar->header.info = &stg_MVAR_DIRTY_info;
437 mvar->header.info = &stg_MVAR_CLEAN_info;
439 p += sizeofW(StgMVar);
444 scavenge_fun_srt(info);
445 evacuate(&((StgClosure *)p)->payload[1]);
446 evacuate(&((StgClosure *)p)->payload[0]);
447 p += sizeofW(StgHeader) + 2;
451 scavenge_thunk_srt(info);
452 evacuate(&((StgThunk *)p)->payload[1]);
453 evacuate(&((StgThunk *)p)->payload[0]);
454 p += sizeofW(StgThunk) + 2;
458 evacuate(&((StgClosure *)p)->payload[1]);
459 evacuate(&((StgClosure *)p)->payload[0]);
460 p += sizeofW(StgHeader) + 2;
464 scavenge_thunk_srt(info);
465 evacuate(&((StgThunk *)p)->payload[0]);
466 p += sizeofW(StgThunk) + 1;
470 scavenge_fun_srt(info);
472 evacuate(&((StgClosure *)p)->payload[0]);
473 p += sizeofW(StgHeader) + 1;
477 scavenge_thunk_srt(info);
478 p += sizeofW(StgThunk) + 1;
482 scavenge_fun_srt(info);
484 p += sizeofW(StgHeader) + 1;
488 scavenge_thunk_srt(info);
489 p += sizeofW(StgThunk) + 2;
493 scavenge_fun_srt(info);
495 p += sizeofW(StgHeader) + 2;
499 scavenge_thunk_srt(info);
500 evacuate(&((StgThunk *)p)->payload[0]);
501 p += sizeofW(StgThunk) + 2;
505 scavenge_fun_srt(info);
507 evacuate(&((StgClosure *)p)->payload[0]);
508 p += sizeofW(StgHeader) + 2;
512 scavenge_fun_srt(info);
519 scavenge_thunk_srt(info);
520 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
521 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
522 evacuate((StgClosure **)p);
524 p += info->layout.payload.nptrs;
535 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
536 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
537 evacuate((StgClosure **)p);
539 p += info->layout.payload.nptrs;
544 StgBCO *bco = (StgBCO *)p;
545 evacuate((StgClosure **)&bco->instrs);
546 evacuate((StgClosure **)&bco->literals);
547 evacuate((StgClosure **)&bco->ptrs);
554 evacuate(&((StgInd *)p)->indirectee);
555 p += sizeofW(StgInd);
560 gct->eager_promotion = rtsFalse;
561 evacuate(&((StgMutVar *)p)->var);
562 gct->eager_promotion = saved_eager_promotion;
564 if (gct->failed_to_evac) {
565 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
567 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
569 p += sizeofW(StgMutVar);
574 StgBlockingQueue *bq = (StgBlockingQueue *)p;
576 gct->eager_promotion = rtsFalse;
578 evacuate((StgClosure**)&bq->owner);
579 evacuate((StgClosure**)&bq->queue);
580 evacuate((StgClosure**)&bq->link);
581 gct->eager_promotion = saved_eager_promotion;
583 if (gct->failed_to_evac) {
584 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
586 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
588 p += sizeofW(StgBlockingQueue);
594 StgSelector *s = (StgSelector *)p;
595 evacuate(&s->selectee);
596 p += THUNK_SELECTOR_sizeW();
600 // A chunk of stack saved in a heap object
603 StgAP_STACK *ap = (StgAP_STACK *)p;
606 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
607 p = (StgPtr)ap->payload + ap->size;
612 p = scavenge_PAP((StgPAP *)p);
616 p = scavenge_AP((StgAP *)p);
621 p += arr_words_sizeW((StgArrWords *)p);
624 case MUT_ARR_PTRS_CLEAN:
625 case MUT_ARR_PTRS_DIRTY:
627 // We don't eagerly promote objects pointed to by a mutable
628 // array, but if we find the array only points to objects in
629 // the same or an older generation, we mark it "clean" and
630 // avoid traversing it during minor GCs.
631 gct->eager_promotion = rtsFalse;
633 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
635 if (gct->failed_to_evac) {
636 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
638 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
641 gct->eager_promotion = saved_eager_promotion;
642 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
646 case MUT_ARR_PTRS_FROZEN:
647 case MUT_ARR_PTRS_FROZEN0:
650 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
652 // If we're going to put this object on the mutable list, then
653 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
654 if (gct->failed_to_evac) {
655 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
657 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
664 StgTSO *tso = (StgTSO *)p;
674 gct->eager_promotion = rtsFalse;
676 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
677 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
678 evacuate((StgClosure **)p);
680 p += info->layout.payload.nptrs;
682 gct->eager_promotion = saved_eager_promotion;
683 gct->failed_to_evac = rtsTrue; // mutable
690 StgTRecChunk *tc = ((StgTRecChunk *) p);
691 TRecEntry *e = &(tc -> entries[0]);
692 gct->eager_promotion = rtsFalse;
693 evacuate((StgClosure **)&tc->prev_chunk);
694 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
695 evacuate((StgClosure **)&e->tvar);
696 evacuate((StgClosure **)&e->expected_value);
697 evacuate((StgClosure **)&e->new_value);
699 gct->eager_promotion = saved_eager_promotion;
700 gct->failed_to_evac = rtsTrue; // mutable
701 p += sizeofW(StgTRecChunk);
706 barf("scavenge: unimplemented/strange closure type %d @ %p",
711 * We need to record the current object on the mutable list if
712 * (a) It is actually mutable, or
713 * (b) It contains pointers to a younger generation.
714 * Case (b) arises if we didn't manage to promote everything that
715 * the current object points to into the current generation.
717 if (gct->failed_to_evac) {
718 gct->failed_to_evac = rtsFalse;
719 if (bd->gen_no > 0) {
720 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
726 gct->copied += ws->todo_free - bd->free;
730 debugTrace(DEBUG_gc, " scavenged %ld bytes",
731 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
733 // update stats: this is a block that has been scavenged
734 gct->scanned += bd->free - bd->u.scan;
735 bd->u.scan = bd->free;
737 if (bd != ws->todo_bd) {
738 // we're not going to evac any more objects into
739 // this block, so push it now.
740 push_scanned_block(bd, ws);
745 /* -----------------------------------------------------------------------------
746 Scavenge everything on the mark stack.
748 This is slightly different from scavenge():
749 - we don't walk linearly through the objects, so the scavenger
750 doesn't need to advance the pointer on to the next object.
751 -------------------------------------------------------------------------- */
754 scavenge_mark_stack(void)
758 rtsBool saved_eager_promotion;
760 gct->evac_gen = oldest_gen;
761 saved_eager_promotion = gct->eager_promotion;
763 while ((p = pop_mark_stack())) {
765 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
766 info = get_itbl((StgClosure *)p);
769 switch (info->type) {
774 StgMVar *mvar = ((StgMVar *)p);
775 gct->eager_promotion = rtsFalse;
776 evacuate((StgClosure **)&mvar->head);
777 evacuate((StgClosure **)&mvar->tail);
778 evacuate((StgClosure **)&mvar->value);
779 gct->eager_promotion = saved_eager_promotion;
781 if (gct->failed_to_evac) {
782 mvar->header.info = &stg_MVAR_DIRTY_info;
784 mvar->header.info = &stg_MVAR_CLEAN_info;
790 scavenge_fun_srt(info);
791 evacuate(&((StgClosure *)p)->payload[1]);
792 evacuate(&((StgClosure *)p)->payload[0]);
796 scavenge_thunk_srt(info);
797 evacuate(&((StgThunk *)p)->payload[1]);
798 evacuate(&((StgThunk *)p)->payload[0]);
802 evacuate(&((StgClosure *)p)->payload[1]);
803 evacuate(&((StgClosure *)p)->payload[0]);
808 scavenge_fun_srt(info);
809 evacuate(&((StgClosure *)p)->payload[0]);
814 scavenge_thunk_srt(info);
815 evacuate(&((StgThunk *)p)->payload[0]);
820 evacuate(&((StgClosure *)p)->payload[0]);
825 scavenge_fun_srt(info);
830 scavenge_thunk_srt(info);
838 scavenge_fun_srt(info);
845 scavenge_thunk_srt(info);
846 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
847 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
848 evacuate((StgClosure **)p);
860 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
861 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
862 evacuate((StgClosure **)p);
868 StgBCO *bco = (StgBCO *)p;
869 evacuate((StgClosure **)&bco->instrs);
870 evacuate((StgClosure **)&bco->literals);
871 evacuate((StgClosure **)&bco->ptrs);
876 // don't need to do anything here: the only possible case
877 // is that we're in a 1-space compacting collector, with
878 // no "old" generation.
883 evacuate(&((StgInd *)p)->indirectee);
887 case MUT_VAR_DIRTY: {
888 gct->eager_promotion = rtsFalse;
889 evacuate(&((StgMutVar *)p)->var);
890 gct->eager_promotion = saved_eager_promotion;
892 if (gct->failed_to_evac) {
893 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
895 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
902 StgBlockingQueue *bq = (StgBlockingQueue *)p;
904 gct->eager_promotion = rtsFalse;
906 evacuate((StgClosure**)&bq->owner);
907 evacuate((StgClosure**)&bq->queue);
908 evacuate((StgClosure**)&bq->link);
909 gct->eager_promotion = saved_eager_promotion;
911 if (gct->failed_to_evac) {
912 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
914 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
924 StgSelector *s = (StgSelector *)p;
925 evacuate(&s->selectee);
929 // A chunk of stack saved in a heap object
932 StgAP_STACK *ap = (StgAP_STACK *)p;
935 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
940 scavenge_PAP((StgPAP *)p);
944 scavenge_AP((StgAP *)p);
947 case MUT_ARR_PTRS_CLEAN:
948 case MUT_ARR_PTRS_DIRTY:
951 // We don't eagerly promote objects pointed to by a mutable
952 // array, but if we find the array only points to objects in
953 // the same or an older generation, we mark it "clean" and
954 // avoid traversing it during minor GCs.
955 gct->eager_promotion = rtsFalse;
957 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
959 if (gct->failed_to_evac) {
960 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
962 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
965 gct->eager_promotion = saved_eager_promotion;
966 gct->failed_to_evac = rtsTrue; // mutable anyhow.
970 case MUT_ARR_PTRS_FROZEN:
971 case MUT_ARR_PTRS_FROZEN0:
976 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
978 // If we're going to put this object on the mutable list, then
979 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
980 if (gct->failed_to_evac) {
981 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
983 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
990 scavengeTSO((StgTSO*)p);
998 gct->eager_promotion = rtsFalse;
1000 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1001 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1002 evacuate((StgClosure **)p);
1005 gct->eager_promotion = saved_eager_promotion;
1006 gct->failed_to_evac = rtsTrue; // mutable
1013 StgTRecChunk *tc = ((StgTRecChunk *) p);
1014 TRecEntry *e = &(tc -> entries[0]);
1015 gct->eager_promotion = rtsFalse;
1016 evacuate((StgClosure **)&tc->prev_chunk);
1017 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1018 evacuate((StgClosure **)&e->tvar);
1019 evacuate((StgClosure **)&e->expected_value);
1020 evacuate((StgClosure **)&e->new_value);
1022 gct->eager_promotion = saved_eager_promotion;
1023 gct->failed_to_evac = rtsTrue; // mutable
1028 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1032 if (gct->failed_to_evac) {
1033 gct->failed_to_evac = rtsFalse;
1034 if (gct->evac_gen) {
1035 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1038 } // while (p = pop_mark_stack())
1041 /* -----------------------------------------------------------------------------
1042 Scavenge one object.
1044 This is used for objects that are temporarily marked as mutable
1045 because they contain old-to-new generation pointers. Only certain
1046 objects can have this property.
1047 -------------------------------------------------------------------------- */
1050 scavenge_one(StgPtr p)
1052 const StgInfoTable *info;
1054 rtsBool saved_eager_promotion;
1056 saved_eager_promotion = gct->eager_promotion;
1058 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1059 info = get_itbl((StgClosure *)p);
1061 switch (info->type) {
1066 StgMVar *mvar = ((StgMVar *)p);
1067 gct->eager_promotion = rtsFalse;
1068 evacuate((StgClosure **)&mvar->head);
1069 evacuate((StgClosure **)&mvar->tail);
1070 evacuate((StgClosure **)&mvar->value);
1071 gct->eager_promotion = saved_eager_promotion;
1073 if (gct->failed_to_evac) {
1074 mvar->header.info = &stg_MVAR_DIRTY_info;
1076 mvar->header.info = &stg_MVAR_CLEAN_info;
1090 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1091 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1092 evacuate((StgClosure **)q);
1098 case FUN_1_0: // hardly worth specialising these guys
1115 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1116 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1117 evacuate((StgClosure **)q);
1123 case MUT_VAR_DIRTY: {
1126 gct->eager_promotion = rtsFalse;
1127 evacuate(&((StgMutVar *)p)->var);
1128 gct->eager_promotion = saved_eager_promotion;
1130 if (gct->failed_to_evac) {
1131 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1133 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1138 case BLOCKING_QUEUE:
1140 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1142 gct->eager_promotion = rtsFalse;
1144 evacuate((StgClosure**)&bq->owner);
1145 evacuate((StgClosure**)&bq->queue);
1146 evacuate((StgClosure**)&bq->link);
1147 gct->eager_promotion = saved_eager_promotion;
1149 if (gct->failed_to_evac) {
1150 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1152 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1157 case THUNK_SELECTOR:
1159 StgSelector *s = (StgSelector *)p;
1160 evacuate(&s->selectee);
1166 StgAP_STACK *ap = (StgAP_STACK *)p;
1169 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1170 p = (StgPtr)ap->payload + ap->size;
1175 p = scavenge_PAP((StgPAP *)p);
1179 p = scavenge_AP((StgAP *)p);
1183 // nothing to follow
1186 case MUT_ARR_PTRS_CLEAN:
1187 case MUT_ARR_PTRS_DIRTY:
1189 // We don't eagerly promote objects pointed to by a mutable
1190 // array, but if we find the array only points to objects in
1191 // the same or an older generation, we mark it "clean" and
1192 // avoid traversing it during minor GCs.
1193 gct->eager_promotion = rtsFalse;
1195 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1197 if (gct->failed_to_evac) {
1198 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1200 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1203 gct->eager_promotion = saved_eager_promotion;
1204 gct->failed_to_evac = rtsTrue;
1208 case MUT_ARR_PTRS_FROZEN:
1209 case MUT_ARR_PTRS_FROZEN0:
1211 // follow everything
1212 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1214 // If we're going to put this object on the mutable list, then
1215 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1216 if (gct->failed_to_evac) {
1217 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1219 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1226 scavengeTSO((StgTSO*)p);
1234 gct->eager_promotion = rtsFalse;
1236 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1237 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1238 evacuate((StgClosure **)p);
1241 gct->eager_promotion = saved_eager_promotion;
1242 gct->failed_to_evac = rtsTrue; // mutable
1250 StgTRecChunk *tc = ((StgTRecChunk *) p);
1251 TRecEntry *e = &(tc -> entries[0]);
1252 gct->eager_promotion = rtsFalse;
1253 evacuate((StgClosure **)&tc->prev_chunk);
1254 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1255 evacuate((StgClosure **)&e->tvar);
1256 evacuate((StgClosure **)&e->expected_value);
1257 evacuate((StgClosure **)&e->new_value);
1259 gct->eager_promotion = saved_eager_promotion;
1260 gct->failed_to_evac = rtsTrue; // mutable
1265 // IND can happen, for example, when the interpreter allocates
1266 // a gigantic AP closure (more than one block), which ends up
1267 // on the large-object list and then gets updated. See #3424.
1270 evacuate(&((StgInd *)p)->indirectee);
1272 #if 0 && defined(DEBUG)
1273 if (RtsFlags.DebugFlags.gc)
1274 /* Debugging code to print out the size of the thing we just
1278 StgPtr start = gen->scan;
1279 bdescr *start_bd = gen->scan_bd;
1282 if (start_bd != gen->scan_bd) {
1283 size += (P_)BLOCK_ROUND_UP(start) - start;
1284 start_bd = start_bd->link;
1285 while (start_bd != gen->scan_bd) {
1286 size += BLOCK_SIZE_W;
1287 start_bd = start_bd->link;
1290 (P_)BLOCK_ROUND_DOWN(gen->scan);
1292 size = gen->scan - start;
1294 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1300 barf("scavenge_one: strange object %d", (int)(info->type));
1303 no_luck = gct->failed_to_evac;
1304 gct->failed_to_evac = rtsFalse;
1308 /* -----------------------------------------------------------------------------
1309 Scavenging mutable lists.
1311 We treat the mutable list of each generation > N (i.e. all the
1312 generations older than the one being collected) as roots. We also
1313 remove non-mutable objects from the mutable list at this point.
1314 -------------------------------------------------------------------------- */
1317 scavenge_mutable_list(bdescr *bd, generation *gen)
1321 gct->evac_gen = gen;
1322 for (; bd != NULL; bd = bd->link) {
1323 for (q = bd->start; q < bd->free; q++) {
1325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1328 switch (get_itbl((StgClosure *)p)->type) {
1330 // can happen due to concurrent writeMutVars
1332 mutlist_MUTVARS++; break;
1333 case MUT_ARR_PTRS_CLEAN:
1334 case MUT_ARR_PTRS_DIRTY:
1335 case MUT_ARR_PTRS_FROZEN:
1336 case MUT_ARR_PTRS_FROZEN0:
1337 mutlist_MUTARRS++; break;
1339 barf("MVAR_CLEAN on mutable list");
1341 mutlist_MVARS++; break;
1343 mutlist_OTHERS++; break;
1347 // Check whether this object is "clean", that is it
1348 // definitely doesn't point into a young generation.
1349 // Clean objects don't need to be scavenged. Some clean
1350 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1351 // list at all; others, such as TSO
1352 // are always on the mutable list.
1354 switch (get_itbl((StgClosure *)p)->type) {
1355 case MUT_ARR_PTRS_CLEAN:
1356 recordMutableGen_GC((StgClosure *)p,gen->no);
1358 case MUT_ARR_PTRS_DIRTY:
1360 rtsBool saved_eager_promotion;
1361 saved_eager_promotion = gct->eager_promotion;
1362 gct->eager_promotion = rtsFalse;
1364 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1366 if (gct->failed_to_evac) {
1367 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1369 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1372 gct->eager_promotion = saved_eager_promotion;
1373 gct->failed_to_evac = rtsFalse;
1374 recordMutableGen_GC((StgClosure *)p,gen->no);
1378 StgTSO *tso = (StgTSO *)p;
1379 if (tso->dirty == 0) {
1380 // Should be on the mutable list because its link
1381 // field is dirty. However, in parallel GC we may
1382 // have a thread on multiple mutable lists, so
1383 // this assertion would be invalid:
1384 // ASSERT(tso->flags & TSO_LINK_DIRTY);
1386 evacuate((StgClosure **)&tso->_link);
1387 if ( tso->why_blocked == BlockedOnMVar
1388 || tso->why_blocked == BlockedOnBlackHole
1389 || tso->why_blocked == BlockedOnMsgThrowTo
1390 || tso->why_blocked == NotBlocked
1392 evacuate((StgClosure **)&tso->block_info.prev);
1394 if (gct->failed_to_evac) {
1395 recordMutableGen_GC((StgClosure *)p,gen->no);
1396 gct->failed_to_evac = rtsFalse;
1398 tso->flags &= ~TSO_LINK_DIRTY;
1407 if (scavenge_one(p)) {
1408 // didn't manage to promote everything, so put the
1409 // object back on the list.
1410 recordMutableGen_GC((StgClosure *)p,gen->no);
1417 scavenge_capability_mut_lists (Capability *cap)
1421 /* Mutable lists from each generation > N
1422 * we want to *scavenge* these roots, not evacuate them: they're not
1423 * going to move in this GC.
1424 * Also do them in reverse generation order, for the usual reason:
1425 * namely to reduce the likelihood of spurious old->new pointers.
1427 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1428 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1429 freeChain_sync(cap->saved_mut_lists[g]);
1430 cap->saved_mut_lists[g] = NULL;
1434 /* -----------------------------------------------------------------------------
1435 Scavenging the static objects.
1437 We treat the mutable list of each generation > N (i.e. all the
1438 generations older than the one being collected) as roots. We also
1439 remove non-mutable objects from the mutable list at this point.
1440 -------------------------------------------------------------------------- */
1443 scavenge_static(void)
1446 const StgInfoTable *info;
1448 debugTrace(DEBUG_gc, "scavenging static objects");
1450 /* Always evacuate straight to the oldest generation for static
1452 gct->evac_gen = oldest_gen;
1454 /* keep going until we've scavenged all the objects on the linked
1459 /* get the next static object from the list. Remember, there might
1460 * be more stuff on this list after each evacuation...
1461 * (static_objects is a global)
1463 p = gct->static_objects;
1464 if (p == END_OF_STATIC_LIST) {
1468 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1471 if (info->type==RBH)
1472 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1474 // make sure the info pointer is into text space
1476 /* Take this object *off* the static_objects list,
1477 * and put it on the scavenged_static_objects list.
1479 gct->static_objects = *STATIC_LINK(info,p);
1480 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1481 gct->scavenged_static_objects = p;
1483 switch (info -> type) {
1487 StgInd *ind = (StgInd *)p;
1488 evacuate(&ind->indirectee);
1490 /* might fail to evacuate it, in which case we have to pop it
1491 * back on the mutable list of the oldest generation. We
1492 * leave it *on* the scavenged_static_objects list, though,
1493 * in case we visit this object again.
1495 if (gct->failed_to_evac) {
1496 gct->failed_to_evac = rtsFalse;
1497 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1503 scavenge_thunk_srt(info);
1507 scavenge_fun_srt(info);
1514 next = (P_)p->payload + info->layout.payload.ptrs;
1515 // evacuate the pointers
1516 for (q = (P_)p->payload; q < next; q++) {
1517 evacuate((StgClosure **)q);
1523 barf("scavenge_static: strange closure %d", (int)(info->type));
1526 ASSERT(gct->failed_to_evac == rtsFalse);
1530 /* -----------------------------------------------------------------------------
1531 scavenge a chunk of memory described by a bitmap
1532 -------------------------------------------------------------------------- */
1535 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1541 bitmap = large_bitmap->bitmap[b];
1542 for (i = 0; i < size; ) {
1543 if ((bitmap & 1) == 0) {
1544 evacuate((StgClosure **)p);
1548 if (i % BITS_IN(W_) == 0) {
1550 bitmap = large_bitmap->bitmap[b];
1552 bitmap = bitmap >> 1;
1557 STATIC_INLINE StgPtr
1558 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1561 if ((bitmap & 1) == 0) {
1562 evacuate((StgClosure **)p);
1565 bitmap = bitmap >> 1;
1571 /* -----------------------------------------------------------------------------
1572 scavenge_stack walks over a section of stack and evacuates all the
1573 objects pointed to by it. We can use the same code for walking
1574 AP_STACK_UPDs, since these are just sections of copied stack.
1575 -------------------------------------------------------------------------- */
1578 scavenge_stack(StgPtr p, StgPtr stack_end)
1580 const StgRetInfoTable* info;
1585 * Each time around this loop, we are looking at a chunk of stack
1586 * that starts with an activation record.
1589 while (p < stack_end) {
1590 info = get_ret_itbl((StgClosure *)p);
1592 switch (info->i.type) {
1595 // In SMP, we can get update frames that point to indirections
1596 // when two threads evaluate the same thunk. We do attempt to
1597 // discover this situation in threadPaused(), but it's
1598 // possible that the following sequence occurs:
1607 // Now T is an indirection, and the update frame is already
1608 // marked on A's stack, so we won't traverse it again in
1609 // threadPaused(). We could traverse the whole stack again
1610 // before GC, but that seems like overkill.
1612 // Scavenging this update frame as normal would be disastrous;
1613 // the updatee would end up pointing to the value. So we
1614 // check whether the value after evacuation is a BLACKHOLE,
1615 // and if not, we change the update frame to an stg_enter
1616 // frame that simply returns the value. Hence, blackholing is
1617 // compulsory (otherwise we would have to check for thunks
1620 // Note [upd-black-hole]
1621 // One slight hiccup is that the THUNK_SELECTOR machinery can
1622 // overwrite the updatee with an IND. In parallel GC, this
1623 // could even be happening concurrently, so we can't check for
1624 // the IND. Fortunately if we assume that blackholing is
1625 // happening (either lazy or eager), then we can be sure that
1626 // the updatee is never a THUNK_SELECTOR and we're ok.
1627 // NB. this is a new invariant: blackholing is not optional.
1629 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1632 evacuate(&frame->updatee);
1634 if (GET_CLOSURE_TAG(v) != 0 ||
1635 (get_itbl(v)->type != BLACKHOLE)) {
1636 // blackholing is compulsory, see above.
1637 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1639 ASSERT(v->header.info != &stg_TSO_info);
1640 p += sizeofW(StgUpdateFrame);
1644 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1645 case CATCH_STM_FRAME:
1646 case CATCH_RETRY_FRAME:
1647 case ATOMICALLY_FRAME:
1651 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1652 size = BITMAP_SIZE(info->i.layout.bitmap);
1653 // NOTE: the payload starts immediately after the info-ptr, we
1654 // don't have an StgHeader in the same sense as a heap closure.
1656 p = scavenge_small_bitmap(p, size, bitmap);
1660 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1668 evacuate((StgClosure **)p);
1671 size = BCO_BITMAP_SIZE(bco);
1672 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1677 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1682 size = GET_LARGE_BITMAP(&info->i)->size;
1684 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1686 // and don't forget to follow the SRT
1690 // Dynamic bitmap: the mask is stored on the stack, and
1691 // there are a number of non-pointers followed by a number
1692 // of pointers above the bitmapped area. (see StgMacros.h,
1697 dyn = ((StgRetDyn *)p)->liveness;
1699 // traverse the bitmap first
1700 bitmap = RET_DYN_LIVENESS(dyn);
1701 p = (P_)&((StgRetDyn *)p)->payload[0];
1702 size = RET_DYN_BITMAP_SIZE;
1703 p = scavenge_small_bitmap(p, size, bitmap);
1705 // skip over the non-ptr words
1706 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1708 // follow the ptr words
1709 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1710 evacuate((StgClosure **)p);
1718 StgRetFun *ret_fun = (StgRetFun *)p;
1719 StgFunInfoTable *fun_info;
1721 evacuate(&ret_fun->fun);
1722 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1723 p = scavenge_arg_block(fun_info, ret_fun->payload);
1728 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1733 /*-----------------------------------------------------------------------------
1734 scavenge the large object list.
1736 evac_gen set by caller; similar games played with evac_gen as with
1737 scavenge() - see comment at the top of scavenge(). Most large
1738 objects are (repeatedly) mutable, so most of the time evac_gen will
1740 --------------------------------------------------------------------------- */
1743 scavenge_large (gen_workspace *ws)
1748 gct->evac_gen = ws->gen;
1750 bd = ws->todo_large_objects;
1752 for (; bd != NULL; bd = ws->todo_large_objects) {
1754 // take this object *off* the large objects list and put it on
1755 // the scavenged large objects list. This is so that we can
1756 // treat new_large_objects as a stack and push new objects on
1757 // the front when evacuating.
1758 ws->todo_large_objects = bd->link;
1760 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1761 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1762 ws->gen->n_scavenged_large_blocks += bd->blocks;
1763 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1766 if (scavenge_one(p)) {
1767 if (ws->gen->no > 0) {
1768 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1773 gct->scanned += closure_sizeW((StgClosure*)p);
1777 /* ----------------------------------------------------------------------------
1778 Look for work to do.
1780 We look for the oldest gen that has either a todo block that can
1781 be scanned, or a block of work on the global queue that we can
1784 It is important to take work from the *oldest* generation that we
1785 has work available, because that minimizes the likelihood of
1786 evacuating objects into a young generation when they should have
1787 been eagerly promoted. This really does make a difference (the
1788 cacheprof benchmark is one that is affected).
1790 We also want to scan the todo block if possible before grabbing
1791 work from the global queue, the reason being that we don't want to
1792 steal work from the global queue and starve other threads if there
1793 is other work we can usefully be doing.
1794 ------------------------------------------------------------------------- */
1797 scavenge_find_work (void)
1801 rtsBool did_something, did_anything;
1804 gct->scav_find_work++;
1806 did_anything = rtsFalse;
1809 did_something = rtsFalse;
1810 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1813 gct->scan_bd = NULL;
1815 // If we have a scan block with some work to do,
1816 // scavenge everything up to the free pointer.
1817 if (ws->todo_bd->u.scan < ws->todo_free)
1819 scavenge_block(ws->todo_bd);
1820 did_something = rtsTrue;
1824 // If we have any large objects to scavenge, do them now.
1825 if (ws->todo_large_objects) {
1827 did_something = rtsTrue;
1831 if ((bd = grab_local_todo_block(ws)) != NULL) {
1833 did_something = rtsTrue;
1838 if (did_something) {
1839 did_anything = rtsTrue;
1843 #if defined(THREADED_RTS)
1844 if (work_stealing) {
1845 // look for work to steal
1846 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1847 if ((bd = steal_todo_block(g)) != NULL) {
1849 did_something = rtsTrue;
1854 if (did_something) {
1855 did_anything = rtsTrue;
1861 // only return when there is no more work to do
1863 return did_anything;
1866 /* ----------------------------------------------------------------------------
1867 Scavenge until we can't find anything more to scavenge.
1868 ------------------------------------------------------------------------- */
1876 work_to_do = rtsFalse;
1878 // scavenge static objects
1879 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1880 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1884 // scavenge objects in compacted generation
1885 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1886 scavenge_mark_stack();
1887 work_to_do = rtsTrue;
1890 // Order is important here: we want to deal in full blocks as
1891 // much as possible, so go for global work in preference to
1892 // local work. Only if all the global work has been exhausted
1893 // do we start scavenging the fragments of blocks in the local
1895 if (scavenge_find_work()) goto loop;
1897 if (work_to_do) goto loop;