+ /* Already evacuated, just return the forwarding address.
+ * HOWEVER: if the requested destination generation (evac_gen) is
+ * older than the actual generation (because the object was
+ * already evacuated to a younger generation) then we have to
+ * set the failed_to_evac flag to indicate that we couldn't
+ * manage to promote the object to the desired generation.
+ */
+ if (evac_gen > 0) { // optimisation
+ StgClosure *p = ((StgEvacuated*)q)->evacuee;
+ if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
+ failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
+ }
+ return ((StgEvacuated*)q)->evacuee;
+
+ case ARR_WORDS:
+ // just copy the block
+ return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
+
+ case MUT_ARR_PTRS:
+ case MUT_ARR_PTRS_FROZEN:
+ // just copy the block
+ return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+
+ case TSO:
+ {
+ StgTSO *tso = (StgTSO *)q;
+
+ /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
+ */
+ if (tso->what_next == ThreadRelocated) {
+ q = (StgClosure *)tso->link;
+ goto loop;
+ }
+
+ /* To evacuate a small TSO, we need to relocate the update frame
+ * list it contains.
+ */
+ {
+ StgTSO *new_tso;
+ StgPtr p, q;
+
+ new_tso = (StgTSO *)copyPart((StgClosure *)tso,
+ tso_sizeW(tso),
+ sizeofW(StgTSO), stp);
+ move_TSO(tso, new_tso);
+ for (p = tso->sp, q = new_tso->sp;
+ p < tso->stack+tso->stack_size;) {
+ *q++ = *p++;
+ }
+
+ return (StgClosure *)new_tso;
+ }
+ }
+
+#if defined(PAR)
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+ //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
+ to = copy(q,BLACKHOLE_sizeW(),stp);
+ //ToDo: derive size etc from reverted IP
+ //to = copy(q,size,stp);
+ IF_DEBUG(gc,
+ belch("@@ evacuate: RBH %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to)));
+ return to;
+ }
+
+ case BLOCKED_FETCH:
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
+ to = copy(q,sizeofW(StgBlockedFetch),stp);
+ IF_DEBUG(gc,
+ belch("@@ evacuate: %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to)));
+ return to;
+
+# ifdef DIST
+ case REMOTE_REF:
+# endif
+ case FETCH_ME:
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
+ to = copy(q,sizeofW(StgFetchMe),stp);
+ IF_DEBUG(gc,
+ belch("@@ evacuate: %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to)));
+ return to;
+
+ case FETCH_ME_BQ:
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
+ to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
+ IF_DEBUG(gc,
+ belch("@@ evacuate: %p (%s) to %p (%s)",
+ q, info_type(q), to, info_type(to)));
+ return to;
+#endif
+
+ default:
+ barf("evacuate: strange closure type %d", (int)(info->type));
+ }
+
+ barf("evacuate");
+}
+
+/* -----------------------------------------------------------------------------
+ Evaluate a THUNK_SELECTOR if possible.
+
+ returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
+ a closure pointer if we evaluated it and this is the result. Note
+ that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
+ reducing it to HNF, just that we have eliminated the selection.
+ The result might be another thunk, or even another THUNK_SELECTOR.
+
+ If the return value is non-NULL, the original selector thunk has
+ been BLACKHOLE'd, and should be updated with an indirection or a
+ forwarding pointer. If the return value is NULL, then the selector
+ thunk is unchanged.
+ -------------------------------------------------------------------------- */
+
+static StgClosure *
+eval_thunk_selector( nat field, StgSelector * p )
+{
+ StgInfoTable *info;
+ const StgInfoTable *info_ptr;
+ StgClosure *selectee;
+
+ selectee = p->selectee;
+
+ // Save the real info pointer (NOTE: not the same as get_itbl()).
+ info_ptr = p->header.info;
+
+ // If the THUNK_SELECTOR is in a generation that we are not
+ // collecting, then bail out early. We won't be able to save any
+ // space in any case, and updating with an indirection is trickier
+ // in an old gen.
+ if (Bdescr((StgPtr)p)->gen_no > N) {
+ return NULL;
+ }
+
+ // BLACKHOLE the selector thunk, since it is now under evaluation.
+ // This is important to stop us going into an infinite loop if
+ // this selector thunk eventually refers to itself.
+ SET_INFO(p,&stg_BLACKHOLE_info);
+
+selector_loop:
+
+ // We don't want to end up in to-space, because this causes
+ // problems when the GC later tries to evacuate the result of
+ // eval_thunk_selector(). There are various ways this could
+ // happen:
+ //
+ // - following an IND_STATIC
+ //
+ // - when the old generation is compacted, the mark phase updates
+ // from-space pointers to be to-space pointers, and we can't
+ // reliably tell which we're following (eg. from an IND_STATIC).
+ //
+ // So we use the block-descriptor test to find out if we're in
+ // to-space.
+ //
+ if (HEAP_ALLOCED(selectee) &&
+ Bdescr((StgPtr)selectee)->flags & BF_EVACUATED) {
+ goto bale_out;
+ }
+
+ info = get_itbl(selectee);
+ switch (info->type) {
+ case CONSTR:
+ case CONSTR_1_0:
+ case CONSTR_0_1:
+ case CONSTR_2_0:
+ case CONSTR_1_1:
+ case CONSTR_0_2:
+ case CONSTR_STATIC:
+ case CONSTR_NOCAF_STATIC:
+ // check that the size is in range
+ ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
+ info->layout.payload.nptrs));
+
+ // ToDo: shouldn't we test whether this pointer is in
+ // to-space?
+ return selectee->payload[field];
+
+ case IND:
+ case IND_PERM:
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ case IND_STATIC:
+ selectee = ((StgInd *)selectee)->indirectee;
+ goto selector_loop;
+
+ case EVACUATED:
+ // We don't follow pointers into to-space; the constructor
+ // has already been evacuated, so we won't save any space
+ // leaks by evaluating this selector thunk anyhow.
+ break;
+
+ case THUNK_SELECTOR:
+ {
+ StgClosure *val;
+
+ // check that we don't recurse too much, re-using the
+ // depth bound also used in evacuate().
+ thunk_selector_depth++;
+ if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
+ break;
+ }
+
+ val = eval_thunk_selector(info->layout.selector_offset,
+ (StgSelector *)selectee);
+
+ thunk_selector_depth--;
+
+ if (val == NULL) {
+ break;
+ } else {
+ // We evaluated this selector thunk, so update it with
+ // an indirection. NOTE: we don't use UPD_IND here,
+ // because we are guaranteed that p is in a generation
+ // that we are collecting, and we never want to put the
+ // indirection on a mutable list.
+#ifdef PROFILING
+ // For the purposes of LDV profiling, we have destroyed
+ // the original selector thunk.
+ SET_INFO(p, info_ptr);
+ LDV_recordDead_FILL_SLOP_DYNAMIC(selectee);
+#endif
+ ((StgInd *)selectee)->indirectee = val;
+ SET_INFO(selectee,&stg_IND_info);
+#ifdef PROFILING
+ // For the purposes of LDV profiling, we have created an
+ // indirection.
+ LDV_recordCreate(selectee);
+#endif
+ selectee = val;
+ goto selector_loop;
+ }
+ }
+
+ case AP:
+ case THUNK:
+ case THUNK_1_0:
+ case THUNK_0_1:
+ case THUNK_2_0:
+ case THUNK_1_1:
+ case THUNK_0_2:
+ case THUNK_STATIC:
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ case BLACKHOLE_BQ:
+#if defined(PAR)
+ case RBH:
+ case BLOCKED_FETCH:
+# ifdef DIST
+ case REMOTE_REF:
+# endif
+ case FETCH_ME:
+ case FETCH_ME_BQ:
+#endif
+ // not evaluated yet
+ break;
+
+ default:
+ barf("eval_thunk_selector: strange selectee %d",
+ (int)(info->type));
+ }
+
+bale_out:
+ // We didn't manage to evaluate this thunk; restore the old info pointer
+ SET_INFO(p, info_ptr);
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ move_TSO is called to update the TSO structure after it has been
+ moved from one place to another.
+ -------------------------------------------------------------------------- */
+
+void
+move_TSO (StgTSO *src, StgTSO *dest)
+{
+ ptrdiff_t diff;
+
+ // relocate the stack pointer...
+ diff = (StgPtr)dest - (StgPtr)src; // In *words*
+ dest->sp = (StgPtr)dest->sp + diff;
+}
+
+/* Similar to scavenge_large_bitmap(), but we don't write back the
+ * pointers we get back from evacuate().
+ */
+static void
+scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
+{
+ nat i, b, size;
+ StgWord bitmap;
+ StgClosure **p;
+
+ b = 0;
+ bitmap = large_srt->l.bitmap[b];
+ size = (nat)large_srt->l.size;
+ p = (StgClosure **)large_srt->srt;
+ for (i = 0; i < size; ) {
+ if ((bitmap & 1) != 0) {
+ evacuate(*p);
+ }
+ i++;
+ p++;
+ if (i % BITS_IN(W_) == 0) {
+ b++;
+ bitmap = large_srt->l.bitmap[b];
+ } else {
+ bitmap = bitmap >> 1;
+ }
+ }
+}
+
+/* evacuate the SRT. If srt_bitmap is zero, then there isn't an
+ * srt field in the info table. That's ok, because we'll
+ * never dereference it.
+ */
+static inline void
+scavenge_srt (StgClosure **srt, nat srt_bitmap)
+{
+ nat bitmap;
+ StgClosure **p;
+
+ bitmap = srt_bitmap;
+ p = srt;
+
+ if (bitmap == (StgHalfWord)(-1)) {
+ scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
+ return;
+ }
+
+ while (bitmap != 0) {
+ if ((bitmap & 1) != 0) {
+#ifdef ENABLE_WIN32_DLL_SUPPORT
+ // Special-case to handle references to closures hiding out in DLLs, since
+ // double indirections required to get at those. The code generator knows
+ // which is which when generating the SRT, so it stores the (indirect)
+ // reference to the DLL closure in the table by first adding one to it.
+ // We check for this here, and undo the addition before evacuating it.
+ //
+ // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
+ // closure that's fixed at link-time, and no extra magic is required.
+ if ( (unsigned long)(*srt) & 0x1 ) {
+ evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
+ } else {
+ evacuate(*p);
+ }
+#else
+ evacuate(*p);
+#endif
+ }
+ p++;
+ bitmap = bitmap >> 1;
+ }
+}
+
+
+static inline void
+scavenge_thunk_srt(const StgInfoTable *info)
+{
+ StgThunkInfoTable *thunk_info;
+
+ thunk_info = itbl_to_thunk_itbl(info);
+ scavenge_srt((StgClosure **)thunk_info->srt, thunk_info->i.srt_bitmap);
+}
+
+static inline void
+scavenge_fun_srt(const StgInfoTable *info)
+{
+ StgFunInfoTable *fun_info;
+
+ fun_info = itbl_to_fun_itbl(info);
+ scavenge_srt((StgClosure **)fun_info->srt, fun_info->i.srt_bitmap);
+}
+
+static inline void
+scavenge_ret_srt(const StgInfoTable *info)
+{
+ StgRetInfoTable *ret_info;
+
+ ret_info = itbl_to_ret_itbl(info);
+ scavenge_srt((StgClosure **)ret_info->srt, ret_info->i.srt_bitmap);
+}
+
+/* -----------------------------------------------------------------------------
+ Scavenge a TSO.
+ -------------------------------------------------------------------------- */
+
+static void
+scavengeTSO (StgTSO *tso)
+{
+ // chase the link field for any TSOs on the same queue
+ (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
+ if ( tso->why_blocked == BlockedOnMVar
+ || tso->why_blocked == BlockedOnBlackHole
+ || tso->why_blocked == BlockedOnException
+#if defined(PAR)
+ || tso->why_blocked == BlockedOnGA
+ || tso->why_blocked == BlockedOnGA_NoSend
+#endif
+ ) {
+ tso->block_info.closure = evacuate(tso->block_info.closure);
+ }
+ if ( tso->blocked_exceptions != NULL ) {
+ tso->blocked_exceptions =
+ (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
+ }
+
+ // scavenge this thread's stack
+ scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
+}
+
+/* -----------------------------------------------------------------------------
+ Blocks of function args occur on the stack (at the top) and
+ in PAPs.
+ -------------------------------------------------------------------------- */
+
+static inline StgPtr
+scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
+{
+ StgPtr p;
+ StgWord bitmap;
+ nat size;
+
+ p = (StgPtr)args;
+ switch (fun_info->fun_type) {
+ case ARG_GEN:
+ bitmap = BITMAP_BITS(fun_info->bitmap);
+ size = BITMAP_SIZE(fun_info->bitmap);
+ goto small_bitmap;
+ case ARG_GEN_BIG:
+ size = ((StgLargeBitmap *)fun_info->bitmap)->size;
+ scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
+ p += size;
+ break;
+ default:
+ bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
+ size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->fun_type]);
+ small_bitmap:
+ while (size > 0) {
+ if ((bitmap & 1) == 0) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ p++;
+ bitmap = bitmap >> 1;
+ size--;
+ }
+ break;
+ }
+ return p;
+}
+
+static inline StgPtr
+scavenge_PAP (StgPAP *pap)
+{
+ StgPtr p;
+ StgWord bitmap, size;
+ StgFunInfoTable *fun_info;
+
+ pap->fun = evacuate(pap->fun);
+ fun_info = get_fun_itbl(pap->fun);
+ ASSERT(fun_info->i.type != PAP);
+
+ p = (StgPtr)pap->payload;
+ size = pap->n_args;
+
+ switch (fun_info->fun_type) {
+ case ARG_GEN:
+ bitmap = BITMAP_BITS(fun_info->bitmap);
+ goto small_bitmap;
+ case ARG_GEN_BIG:
+ scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
+ p += size;
+ break;
+ case ARG_BCO:
+ scavenge_large_bitmap((StgPtr)pap->payload, BCO_BITMAP(pap->fun), size);
+ p += size;
+ break;
+ default:
+ bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
+ small_bitmap:
+ size = pap->n_args;
+ while (size > 0) {
+ if ((bitmap & 1) == 0) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ p++;
+ bitmap = bitmap >> 1;
+ size--;
+ }
+ break;
+ }
+ return p;
+}
+
+/* -----------------------------------------------------------------------------
+ Scavenge a given step until there are no more objects in this step
+ to scavenge.
+
+ evac_gen is set by the caller to be either zero (for a step in a
+ generation < N) or G where G is the generation of the step being
+ scavenged.
+
+ We sometimes temporarily change evac_gen back to zero if we're
+ scavenging a mutable object where early promotion isn't such a good
+ idea.
+ -------------------------------------------------------------------------- */
+
+static void
+scavenge(step *stp)
+{
+ StgPtr p, q;
+ StgInfoTable *info;
+ bdescr *bd;
+ nat saved_evac_gen = evac_gen;
+
+ p = stp->scan;
+ bd = stp->scan_bd;
+
+ failed_to_evac = rtsFalse;
+
+ /* scavenge phase - standard breadth-first scavenging of the
+ * evacuated objects
+ */
+
+ while (bd != stp->hp_bd || p < stp->hp) {
+
+ // If we're at the end of this block, move on to the next block
+ if (bd != stp->hp_bd && p == bd->free) {
+ bd = bd->link;
+ p = bd->start;
+ continue;
+ }
+
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
+ info = get_itbl((StgClosure *)p);
+
+ ASSERT(thunk_selector_depth == 0);
+
+ q = p;
+ switch (info->type) {
+
+ case MVAR:
+ /* treat MVars specially, because we don't want to evacuate the
+ * mut_link field in the middle of the closure.
+ */
+ {
+ StgMVar *mvar = ((StgMVar *)p);
+ evac_gen = 0;
+ (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
+ (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
+ (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)mvar);
+ failed_to_evac = rtsFalse; // mutable.
+ p += sizeofW(StgMVar);
+ break;
+ }
+
+ case FUN_2_0:
+ scavenge_fun_srt(info);
+ ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2;
+ break;
+
+ case THUNK_2_0:
+ scavenge_thunk_srt(info);
+ case CONSTR_2_0:
+ ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2;
+ break;
+
+ case THUNK_1_0:
+ scavenge_thunk_srt(info);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
+ break;
+
+ case FUN_1_0:
+ scavenge_fun_srt(info);
+ case CONSTR_1_0:
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 1;
+ break;
+
+ case THUNK_0_1:
+ scavenge_thunk_srt(info);
+ p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
+ break;
+
+ case FUN_0_1:
+ scavenge_fun_srt(info);
+ case CONSTR_0_1:
+ p += sizeofW(StgHeader) + 1;
+ break;
+
+ case THUNK_0_2:
+ scavenge_thunk_srt(info);
+ p += sizeofW(StgHeader) + 2;
+ break;
+
+ case FUN_0_2:
+ scavenge_fun_srt(info);
+ case CONSTR_0_2:
+ p += sizeofW(StgHeader) + 2;
+ break;
+
+ case THUNK_1_1:
+ scavenge_thunk_srt(info);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2;
+ break;
+
+ case FUN_1_1:
+ scavenge_fun_srt(info);
+ case CONSTR_1_1:
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2;
+ break;
+
+ case FUN:
+ scavenge_fun_srt(info);
+ goto gen_obj;
+
+ case THUNK:
+ scavenge_thunk_srt(info);
+ // fall through
+
+ gen_obj:
+ case CONSTR:
+ case WEAK:
+ case FOREIGN:
+ case STABLE_NAME:
+ {
+ StgPtr end;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ p += info->layout.payload.nptrs;
+ break;
+ }
+
+ case BCO: {
+ StgBCO *bco = (StgBCO *)p;
+ (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs);
+ (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals);
+ (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs);
+ (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls);
+ p += bco_sizeW(bco);
+ break;
+ }
+
+ case IND_PERM:
+ if (stp->gen->no != 0) {
+#ifdef PROFILING
+ // @LDV profiling
+ // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
+ // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
+ LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
+#endif
+ //
+ // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ //
+ SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that p has just been created.
+ LDV_recordCreate((StgClosure *)p);
+#endif
+ }
+ // fall through
+ case IND_OLDGEN_PERM:
+ ((StgIndOldGen *)p)->indirectee =
+ evacuate(((StgIndOldGen *)p)->indirectee);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordOldToNewPtrs((StgMutClosure *)p);
+ }
+ p += sizeofW(StgIndOldGen);
+ break;
+
+ case MUT_VAR:
+ evac_gen = 0;
+ ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)p);
+ failed_to_evac = rtsFalse; // mutable anyhow
+ p += sizeofW(StgMutVar);
+ break;
+
+ case MUT_CONS:
+ // ignore these
+ failed_to_evac = rtsFalse; // mutable anyhow
+ p += sizeofW(StgMutVar);
+ break;
+
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ p += BLACKHOLE_sizeW();
+ break;
+
+ case BLACKHOLE_BQ:
+ {
+ StgBlockingQueue *bh = (StgBlockingQueue *)p;
+ (StgClosure *)bh->blocking_queue =
+ evacuate((StgClosure *)bh->blocking_queue);
+ recordMutable((StgMutClosure *)bh);
+ failed_to_evac = rtsFalse;
+ p += BLACKHOLE_sizeW();
+ break;
+ }
+
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)p;
+ s->selectee = evacuate(s->selectee);
+ p += THUNK_SELECTOR_sizeW();
+ break;
+ }
+
+ // A chunk of stack saved in a heap object
+ case AP_STACK:
+ {
+ StgAP_STACK *ap = (StgAP_STACK *)p;
+
+ ap->fun = evacuate(ap->fun);
+ scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
+ p = (StgPtr)ap->payload + ap->size;
+ break;
+ }
+
+ case PAP:
+ case AP:
+ p = scavenge_PAP((StgPAP *)p);
+ break;
+
+ case ARR_WORDS:
+ // nothing to follow
+ p += arr_words_sizeW((StgArrWords *)p);
+ break;
+
+ case MUT_ARR_PTRS:
+ // follow everything
+ {
+ StgPtr next;
+
+ evac_gen = 0; // repeatedly mutable
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)q);
+ failed_to_evac = rtsFalse; // mutable anyhow.
+ break;
+ }
+
+ case MUT_ARR_PTRS_FROZEN:
+ // follow everything
+ {
+ StgPtr next;
+
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ // it's tempting to recordMutable() if failed_to_evac is
+ // false, but that breaks some assumptions (eg. every
+ // closure on the mutable list is supposed to have the MUT
+ // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
+ break;
+ }
+
+ case TSO:
+ {
+ StgTSO *tso = (StgTSO *)p;
+ evac_gen = 0;
+ scavengeTSO(tso);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)tso);
+ failed_to_evac = rtsFalse; // mutable anyhow.
+ p += tso_sizeW(tso);
+ break;
+ }
+
+#if defined(PAR)
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+#if 0
+ nat size, ptrs, nonptrs, vhs;
+ char str[80];
+ StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+#endif
+ StgRBH *rbh = (StgRBH *)p;
+ (StgClosure *)rbh->blocking_queue =
+ evacuate((StgClosure *)rbh->blocking_queue);
+ recordMutable((StgMutClosure *)to);
+ failed_to_evac = rtsFalse; // mutable anyhow.
+ IF_DEBUG(gc,
+ belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ p, info_type(p), (StgClosure *)rbh->blocking_queue));
+ // ToDo: use size of reverted closure here!
+ p += BLACKHOLE_sizeW();
+ break;
+ }
+
+ case BLOCKED_FETCH:
+ {
+ StgBlockedFetch *bf = (StgBlockedFetch *)p;
+ // follow the pointer to the node which is being demanded
+ (StgClosure *)bf->node =
+ evacuate((StgClosure *)bf->node);
+ // follow the link to the rest of the blocking queue
+ (StgClosure *)bf->link =
+ evacuate((StgClosure *)bf->link);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)bf);
+ }
+ IF_DEBUG(gc,
+ belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
+ p += sizeofW(StgBlockedFetch);
+ break;
+ }
+
+#ifdef DIST
+ case REMOTE_REF:
+#endif
+ case FETCH_ME:
+ p += sizeofW(StgFetchMe);
+ break; // nothing to do in this case
+
+ case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ {
+ StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
+ (StgClosure *)fmbq->blocking_queue =
+ evacuate((StgClosure *)fmbq->blocking_queue);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)fmbq);
+ }
+ IF_DEBUG(gc,
+ belch("@@ scavenge: %p (%s) exciting, isn't it",
+ p, info_type((StgClosure *)p)));
+ p += sizeofW(StgFetchMeBlockingQueue);
+ break;
+ }
+#endif
+
+ default:
+ barf("scavenge: unimplemented/strange closure type %d @ %p",
+ info->type, p);
+ }
+
+ /* If we didn't manage to promote all the objects pointed to by
+ * the current object, then we have to designate this object as
+ * mutable (because it contains old-to-new generation pointers).
+ */
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ mkMutCons((StgClosure *)q, &generations[evac_gen]);
+ }
+ }
+
+ stp->scan_bd = bd;
+ stp->scan = p;
+}
+
+/* -----------------------------------------------------------------------------
+ Scavenge everything on the mark stack.
+
+ This is slightly different from scavenge():
+ - we don't walk linearly through the objects, so the scavenger
+ doesn't need to advance the pointer on to the next object.
+ -------------------------------------------------------------------------- */
+
+static void
+scavenge_mark_stack(void)
+{
+ StgPtr p, q;
+ StgInfoTable *info;
+ nat saved_evac_gen;
+
+ evac_gen = oldest_gen->no;
+ saved_evac_gen = evac_gen;
+
+linear_scan:
+ while (!mark_stack_empty()) {
+ p = pop_mark_stack();
+
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
+ info = get_itbl((StgClosure *)p);
+
+ q = p;
+ switch (info->type) {
+
+ case MVAR:
+ /* treat MVars specially, because we don't want to evacuate the
+ * mut_link field in the middle of the closure.
+ */
+ {
+ StgMVar *mvar = ((StgMVar *)p);
+ evac_gen = 0;
+ (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
+ (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
+ (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse; // mutable.
+ break;
+ }
+
+ case FUN_2_0:
+ scavenge_fun_srt(info);
+ ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ break;
+
+ case THUNK_2_0:
+ scavenge_thunk_srt(info);
+ case CONSTR_2_0:
+ ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ break;
+
+ case FUN_1_0:
+ case FUN_1_1:
+ scavenge_fun_srt(info);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ break;
+
+ case THUNK_1_0:
+ case THUNK_1_1:
+ scavenge_thunk_srt(info);
+ case CONSTR_1_0:
+ case CONSTR_1_1:
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ break;
+
+ case FUN_0_1:
+ case FUN_0_2:
+ scavenge_fun_srt(info);
+ break;
+
+ case THUNK_0_1:
+ case THUNK_0_2:
+ scavenge_thunk_srt(info);
+ break;
+
+ case CONSTR_0_1:
+ case CONSTR_0_2:
+ break;
+
+ case FUN:
+ scavenge_fun_srt(info);
+ goto gen_obj;
+
+ case THUNK:
+ scavenge_thunk_srt(info);
+ // fall through
+
+ gen_obj:
+ case CONSTR:
+ case WEAK:
+ case FOREIGN:
+ case STABLE_NAME:
+ {
+ StgPtr end;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ break;
+ }
+
+ case BCO: {
+ StgBCO *bco = (StgBCO *)p;
+ (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs);
+ (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals);
+ (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs);
+ (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls);
+ break;
+ }
+
+ case IND_PERM:
+ // don't need to do anything here: the only possible case
+ // is that we're in a 1-space compacting collector, with
+ // no "old" generation.
+ break;
+
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ ((StgIndOldGen *)p)->indirectee =
+ evacuate(((StgIndOldGen *)p)->indirectee);
+ if (failed_to_evac) {
+ recordOldToNewPtrs((StgMutClosure *)p);
+ }
+ failed_to_evac = rtsFalse;
+ break;
+
+ case MUT_VAR:
+ evac_gen = 0;
+ ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
+
+ case MUT_CONS:
+ // ignore these
+ failed_to_evac = rtsFalse;
+ break;
+
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ case ARR_WORDS:
+ break;
+
+ case BLACKHOLE_BQ:
+ {
+ StgBlockingQueue *bh = (StgBlockingQueue *)p;
+ (StgClosure *)bh->blocking_queue =
+ evacuate((StgClosure *)bh->blocking_queue);
+ failed_to_evac = rtsFalse;
+ break;
+ }
+
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)p;
+ s->selectee = evacuate(s->selectee);
+ break;
+ }
+
+ // A chunk of stack saved in a heap object
+ case AP_STACK:
+ {
+ StgAP_STACK *ap = (StgAP_STACK *)p;
+
+ ap->fun = evacuate(ap->fun);
+ scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
+ break;
+ }
+
+ case PAP:
+ case AP:
+ scavenge_PAP((StgPAP *)p);
+ break;
+
+ case MUT_ARR_PTRS:
+ // follow everything
+ {
+ StgPtr next;
+
+ evac_gen = 0; // repeatedly mutable
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse; // mutable anyhow.
+ break;
+ }
+
+ case MUT_ARR_PTRS_FROZEN:
+ // follow everything
+ {
+ StgPtr next;
+
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ break;
+ }
+
+ case TSO:
+ {
+ StgTSO *tso = (StgTSO *)p;
+ evac_gen = 0;
+ scavengeTSO(tso);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
+ }
+
+#if defined(PAR)
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+#if 0
+ nat size, ptrs, nonptrs, vhs;
+ char str[80];
+ StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+#endif
+ StgRBH *rbh = (StgRBH *)p;
+ (StgClosure *)rbh->blocking_queue =
+ evacuate((StgClosure *)rbh->blocking_queue);
+ recordMutable((StgMutClosure *)rbh);
+ failed_to_evac = rtsFalse; // mutable anyhow.
+ IF_DEBUG(gc,
+ belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ p, info_type(p), (StgClosure *)rbh->blocking_queue));
+ break;
+ }
+
+ case BLOCKED_FETCH:
+ {
+ StgBlockedFetch *bf = (StgBlockedFetch *)p;
+ // follow the pointer to the node which is being demanded
+ (StgClosure *)bf->node =
+ evacuate((StgClosure *)bf->node);
+ // follow the link to the rest of the blocking queue
+ (StgClosure *)bf->link =
+ evacuate((StgClosure *)bf->link);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)bf);
+ }
+ IF_DEBUG(gc,
+ belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
+ break;
+ }
+
+#ifdef DIST
+ case REMOTE_REF:
+#endif
+ case FETCH_ME:
+ break; // nothing to do in this case
+
+ case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ {
+ StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
+ (StgClosure *)fmbq->blocking_queue =
+ evacuate((StgClosure *)fmbq->blocking_queue);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)fmbq);
+ }
+ IF_DEBUG(gc,
+ belch("@@ scavenge: %p (%s) exciting, isn't it",
+ p, info_type((StgClosure *)p)));
+ break;
+ }
+#endif // PAR
+
+ default:
+ barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
+ info->type, p);
+ }
+
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ mkMutCons((StgClosure *)q, &generations[evac_gen]);
+ }
+
+ // mark the next bit to indicate "scavenged"
+ mark(q+1, Bdescr(q));
+
+ } // while (!mark_stack_empty())
+
+ // start a new linear scan if the mark stack overflowed at some point
+ if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
+ IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
+ mark_stack_overflowed = rtsFalse;
+ oldgen_scan_bd = oldest_gen->steps[0].blocks;
+ oldgen_scan = oldgen_scan_bd->start;
+ }
+
+ if (oldgen_scan_bd) {
+ // push a new thing on the mark stack
+ loop:
+ // find a closure that is marked but not scavenged, and start
+ // from there.
+ while (oldgen_scan < oldgen_scan_bd->free
+ && !is_marked(oldgen_scan,oldgen_scan_bd)) {
+ oldgen_scan++;
+ }
+
+ if (oldgen_scan < oldgen_scan_bd->free) {
+
+ // already scavenged?
+ if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
+ oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ goto loop;
+ }
+ push_mark_stack(oldgen_scan);
+ // ToDo: bump the linear scan by the actual size of the object
+ oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ goto linear_scan;
+ }
+
+ oldgen_scan_bd = oldgen_scan_bd->link;
+ if (oldgen_scan_bd != NULL) {
+ oldgen_scan = oldgen_scan_bd->start;
+ goto loop;
+ }
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ Scavenge one object.
+
+ This is used for objects that are temporarily marked as mutable
+ because they contain old-to-new generation pointers. Only certain
+ objects can have this property.
+ -------------------------------------------------------------------------- */
+
+static rtsBool
+scavenge_one(StgPtr p)
+{
+ const StgInfoTable *info;
+ nat saved_evac_gen = evac_gen;
+ rtsBool no_luck;
+
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
+ info = get_itbl((StgClosure *)p);
+
+ switch (info->type) {
+
+ case FUN:
+ case FUN_1_0: // hardly worth specialising these guys
+ case FUN_0_1:
+ case FUN_1_1:
+ case FUN_0_2:
+ case FUN_2_0:
+ case THUNK:
+ case THUNK_1_0:
+ case THUNK_0_1:
+ case THUNK_1_1:
+ case THUNK_0_2:
+ case THUNK_2_0:
+ case CONSTR:
+ case CONSTR_1_0:
+ case CONSTR_0_1:
+ case CONSTR_1_1:
+ case CONSTR_0_2:
+ case CONSTR_2_0:
+ case WEAK:
+ case FOREIGN:
+ case IND_PERM:
+ case IND_OLDGEN_PERM:
+ {
+ StgPtr q, end;
+
+ end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
+ (StgClosure *)*q = evacuate((StgClosure *)*q);
+ }
+ break;
+ }
+
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ break;
+
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)p;
+ s->selectee = evacuate(s->selectee);
+ break;
+ }
+
+ case ARR_WORDS:
+ // nothing to follow
+ break;