+/* In the DEBUG case, we also zero out the slop of the old closure,
+ * so that the sanity checker can tell where the next closure is.
+ *
+ * Two important invariants: we should never try to update a closure
+ * to point to itself, and the closure being updated should not
+ * already have been updated (the mutable list will get messed up
+ * otherwise).
+ *
+ * NB. We do *not* do this in SMP mode, because when we have the
+ * possibility of multiple threads entering the same closure, zeroing
+ * the slop in one of the threads would have a disastrous effect on
+ * the other (seen in the wild!).
+ */
+#if !defined(DEBUG) || defined(SMP)
+
+#define DEBUG_FILL_SLOP(p) /* nothing */
+
+#else /* DEBUG */
+
+#ifdef CMINUSMINUS
+
+#define DEBUG_FILL_SLOP(p) \
+ W_ inf; \
+ W_ sz; \
+ W_ i; \
+ inf = %GET_STD_INFO(p); \
+ if (%INFO_TYPE(inf) == HALF_W_(THUNK_SELECTOR)) { \
+ StgThunk_payload(p,0) = 0; \
+ } else { \
+ if (%INFO_TYPE(inf) != HALF_W_(BLACKHOLE)) { \
+ if (%INFO_TYPE(inf) == HALF_W_(AP_STACK)) { \
+ sz = StgAP_STACK_size(p) + BYTES_TO_WDS(SIZEOF_StgAP_STACK_NoHdr); \
+ } else { \
+ sz = TO_W_(%INFO_PTRS(inf)) + TO_W_(%INFO_NPTRS(inf)); \
+ } \
+ i = 0; \
+ for: \
+ if (i < sz) { \
+ StgThunk_payload(p,i) = 0; \
+ i = i + 1; \
+ goto for; \
+ } \
+ } }
+
+#else /* !CMINUSMINUS */
+
+INLINE_HEADER void
+DEBUG_FILL_SLOP(StgClosure *p)
+{
+ StgInfoTable *inf = get_itbl(p);
+ nat i, sz;
+
+ switch (inf->type) {
+ case BLACKHOLE:
+ return;
+ case AP_STACK:
+ sz = ((StgAP_STACK *)p)->size + sizeofW(StgAP_STACK) - sizeofW(StgHeader);
+ break;
+ case THUNK_SELECTOR:
+#ifdef SMP
+ ((StgSelector *)p)->selectee = 0;
+#endif
+ return;
+ default:
+ sz = inf->layout.payload.ptrs + inf->layout.payload.nptrs;
+ break;
+ }
+ for (i = 0; i < sz; i++) {
+ ((StgThunk *)p)->payload[i] = 0;
+ }
+}
+
+#endif /* CMINUSMINUS */
+#endif /* DEBUG */
+
+/* We have two versions of this macro (sadly), one for use in C-- code,
+ * and the other for C.
+ *
+ * The and_then argument is a performance hack so that we can paste in
+ * the continuation code directly. It helps shave a couple of
+ * instructions off the common case in the update code, which is
+ * worthwhile (the update code is often part of the inner loop).
+ * (except that gcc now appears to common up this code again and
+ * invert the optimisation. Grrrr --SDM).
+ */
+#ifdef CMINUSMINUS
+#define generation(n) (W_[generations] + n*SIZEOF_generation)
+#define updateWithIndirection(ind_info, p1, p2, and_then) \
+ W_ bd; \
+ \
+/* ASSERT( p1 != p2 && !closure_IND(p1) ); \
+ */ LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
+/* foreign "C" cas(p1 "ptr", 0, stg_WHITEHOLE_info); \
+ */ bd = Bdescr(p1); \
+ if (bdescr_gen_no(bd) == 0 :: CInt) { \
+ StgInd_indirectee(p1) = p2; \
+ SET_INFO(p1, ind_info); \
+ LDV_RECORD_CREATE(p1); \
+ TICK_UPD_NEW_IND(); \
+ and_then; \
+ } else { \
+ DEBUG_FILL_SLOP(p1); \
+ foreign "C" recordMutableGenLock(p1 "ptr", \
+ generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \
+ StgInd_indirectee(p1) = p2; \
+ SET_INFO(p1, stg_IND_OLDGEN_info); \
+ LDV_RECORD_CREATE(p1); \
+ TICK_UPD_OLD_IND(); \
+ and_then; \
+ }
+#else
+#define updateWithIndirection(ind_info, p1, p2, and_then) \