/* -----------------------------------------------------------------------------
- * $Id: GCCompact.c,v 1.2 2001/07/24 14:29:13 simonmar Exp $
*
* (c) The GHC Team 2001
*
*
* ---------------------------------------------------------------------------*/
+#include "PosixSource.h"
#include "Rts.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "BlockAlloc.h"
#include "MBlock.h"
#include "GCCompact.h"
#include "Schedule.h"
-#include "StablePriv.h"
+#include "Apply.h"
+
+// Turn off inlining when debugging - it obfuscates things
+#ifdef DEBUG
+# undef STATIC_INLINE
+# define STATIC_INLINE static
+#endif
/* -----------------------------------------------------------------------------
Threading / unthreading pointers.
the chain with the new location of the object. We stop when we
reach the info pointer at the end.
- We use a trick to identify the info pointer, because the
- LOOKS_LIKE_GHC_INFO() macro involves a function call and can be
- expensive. The trick is that when swapping pointers for threading,
- we set the low bit of the original pointer, with the result that
- all the pointers in the chain have their low bits set except for
- the info pointer.
+ We use a trick to identify the info pointer: when swapping pointers
+ for threading, we set the low bit of the original pointer, with the
+ result that all the pointers in the chain have their low bits set
+ except for the info pointer.
-------------------------------------------------------------------------- */
-static inline void
+STATIC_INLINE void
thread( StgPtr p )
{
StgPtr q = (StgPtr)*p;
- ASSERT(!LOOKS_LIKE_GHC_INFO(q));
+ bdescr *bd;
+
+ // It doesn't look like a closure at the moment, because the info
+ // ptr is possibly threaded:
+ // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
+
if (HEAP_ALLOCED(q)) {
- if (Bdescr(q)->gen_no > 0) {
+ bd = Bdescr(q);
+ // a handy way to discover whether the ptr is into the
+ // compacted area of the old gen, is that the EVACUATED flag
+ // is zero (it's non-zero for all the other areas of live
+ // memory).
+ if ((bd->flags & BF_EVACUATED) == 0) {
*p = (StgWord)*q;
*q = (StgWord)p + 1; // set the low bit
}
}
}
-static inline void
+STATIC_INLINE void
unthread( StgPtr p, StgPtr free )
{
- StgPtr q = (StgPtr)*p, r;
+ StgWord q = *p, r;
- while (((StgWord)q & 1) != 0) {
- (StgWord)q -= 1; // unset the low bit again
- r = (StgPtr)*q;
- *q = (StgWord)free;
+ while ((q & 1) != 0) {
+ q -= 1; // unset the low bit again
+ r = *((StgPtr)q);
+ *((StgPtr)q) = (StgWord)free;
q = r;
}
- *p = (StgWord)q;
+ *p = q;
}
-static inline StgInfoTable *
+STATIC_INLINE StgInfoTable *
get_threaded_info( StgPtr p )
{
StgPtr q = (P_)GET_INFO((StgClosure *)p);
while (((StgWord)q & 1) != 0) {
q = (P_)*((StgPtr)((StgWord)q-1));
}
+
+ ASSERT(LOOKS_LIKE_INFO_PTR(q));
return INFO_PTR_TO_STRUCT((StgInfoTable *)q);
}
// A word-aligned memmove will be faster for small objects than libc's or gcc's.
// Remember, the two regions *might* overlap, but: to <= from.
-static inline void
+STATIC_INLINE void
move(StgPtr to, StgPtr from, nat size)
{
for(; size > 0; --size) {
}
}
-static inline nat
+STATIC_INLINE nat
obj_sizeW( StgClosure *p, StgInfoTable *info )
{
switch (info->type) {
+ case THUNK_0_1:
+ case THUNK_1_0:
+ return sizeofW(StgThunk) + 1;
case FUN_0_1:
case CONSTR_0_1:
case FUN_1_0:
case CONSTR_1_0:
return sizeofW(StgHeader) + 1;
- case THUNK_0_1:
case THUNK_0_2:
+ case THUNK_1_1:
+ case THUNK_2_0:
+ return sizeofW(StgThunk) + 2;
case FUN_0_2:
case CONSTR_0_2:
- case THUNK_1_0:
- case THUNK_1_1:
case FUN_1_1:
case CONSTR_1_1:
- case THUNK_2_0:
case FUN_2_0:
case CONSTR_2_0:
- return sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
+ return sizeofW(StgHeader) + 2;
case THUNK_SELECTOR:
return THUNK_SELECTOR_sizeW();
- case AP_UPD:
+ case AP_STACK:
+ return ap_stack_sizeW((StgAP_STACK *)p);
+ case AP:
case PAP:
return pap_sizeW((StgPAP *)p);
case ARR_WORDS:
return arr_words_sizeW((StgArrWords *)p);
- case MUT_ARR_PTRS:
+ case MUT_ARR_PTRS_CLEAN:
+ case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
case TSO:
return tso_sizeW((StgTSO *)p);
+ case BCO:
+ return bco_sizeW((StgBCO *)p);
+ case TVAR_WAIT_QUEUE:
+ return sizeofW(StgTVarWaitQueue);
+ case TVAR:
+ return sizeofW(StgTVar);
+ case TREC_CHUNK:
+ return sizeofW(StgTRecChunk);
+ case TREC_HEADER:
+ return sizeofW(StgTRecHeader);
default:
return sizeW_fromITBL(info);
}
case IND_STATIC:
thread((StgPtr)&((StgInd *)p)->indirectee);
- break;
+ p = *IND_STATIC_LINK(p);
+ continue;
case THUNK_STATIC:
+ p = *THUNK_STATIC_LINK(p);
+ continue;
case FUN_STATIC:
+ p = *FUN_STATIC_LINK(p);
+ continue;
case CONSTR_STATIC:
- break;
+ p = *STATIC_LINK(info,p);
+ continue;
default:
barf("thread_static: strange closure %d", (int)(info->type));
}
- p = STATIC_LINK(info,p);
}
}
+STATIC_INLINE void
+thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
+{
+ nat i, b;
+ StgWord bitmap;
+
+ b = 0;
+ bitmap = large_bitmap->bitmap[b];
+ for (i = 0; i < size; ) {
+ if ((bitmap & 1) == 0) {
+ thread(p);
+ }
+ i++;
+ p++;
+ if (i % BITS_IN(W_) == 0) {
+ b++;
+ bitmap = large_bitmap->bitmap[b];
+ } else {
+ bitmap = bitmap >> 1;
+ }
+ }
+}
+
+STATIC_INLINE StgPtr
+thread_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
+{
+ StgPtr p;
+ StgWord bitmap;
+ nat size;
+
+ p = (StgPtr)args;
+ switch (fun_info->f.fun_type) {
+ case ARG_GEN:
+ bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
+ size = BITMAP_SIZE(fun_info->f.b.bitmap);
+ goto small_bitmap;
+ case ARG_GEN_BIG:
+ size = GET_FUN_LARGE_BITMAP(fun_info)->size;
+ thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
+ p += size;
+ break;
+ default:
+ bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
+ size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
+ small_bitmap:
+ while (size > 0) {
+ if ((bitmap & 1) == 0) {
+ thread(p);
+ }
+ p++;
+ bitmap = bitmap >> 1;
+ size--;
+ }
+ break;
+ }
+ return p;
+}
+
static void
thread_stack(StgPtr p, StgPtr stack_end)
{
- StgPtr q;
- const StgInfoTable* info;
- StgWord32 bitmap;
+ const StgRetInfoTable* info;
+ StgWord bitmap;
+ nat size;
// highly similar to scavenge_stack, but we do pointer threading here.
while (p < stack_end) {
- q = (StgPtr)*p;
- // If we've got a tag, skip over that many words on the stack
- if ( IS_ARG_TAG((W_)q) ) {
- p += ARG_SIZE(q);
- p++; continue;
- }
-
- // Is q a pointer to a closure?
- if ( !LOOKS_LIKE_GHC_INFO(q) ) {
- thread(p);
- p++;
- continue;
- }
-
- // Otherwise, q must be the info pointer of an activation
+ // *p must be the info pointer of an activation
// record. All activation records have 'bitmap' style layout
// info.
//
- info = get_itbl((StgClosure *)p);
+ info = get_ret_itbl((StgClosure *)p);
- switch (info->type) {
+ switch (info->i.type) {
// Dynamic bitmap: the mask is stored on the stack
case RET_DYN:
- bitmap = ((StgRetDyn *)p)->liveness;
+ {
+ StgWord dyn;
+ dyn = ((StgRetDyn *)p)->liveness;
+
+ // traverse the bitmap first
+ bitmap = RET_DYN_LIVENESS(dyn);
p = (P_)&((StgRetDyn *)p)->payload[0];
- goto small_bitmap;
+ size = RET_DYN_BITMAP_SIZE;
+ while (size > 0) {
+ if ((bitmap & 1) == 0) {
+ thread(p);
+ }
+ p++;
+ bitmap = bitmap >> 1;
+ size--;
+ }
- // probably a slow-entry point return address:
- case FUN:
- case FUN_STATIC:
- p++;
+ // skip over the non-ptr words
+ p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
+
+ // follow the ptr words
+ for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
+ thread(p);
+ p++;
+ }
continue;
+ }
- // small bitmap (< 32 entries, or 64 on a 64-bit machine)
+ // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
+ case CATCH_RETRY_FRAME:
+ case CATCH_STM_FRAME:
+ case ATOMICALLY_FRAME:
case UPDATE_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
- case SEQ_FRAME:
- case RET_BCO:
case RET_SMALL:
case RET_VEC_SMALL:
- bitmap = info->layout.bitmap;
+ bitmap = BITMAP_BITS(info->i.layout.bitmap);
+ size = BITMAP_SIZE(info->i.layout.bitmap);
p++;
- // this assumes that the payload starts immediately after the info-ptr
- small_bitmap:
- while (bitmap != 0) {
+ // NOTE: the payload starts immediately after the info-ptr, we
+ // don't have an StgHeader in the same sense as a heap closure.
+ while (size > 0) {
if ((bitmap & 1) == 0) {
thread(p);
}
p++;
bitmap = bitmap >> 1;
+ size--;
}
continue;
- // large bitmap (> 32 entries)
+ case RET_BCO: {
+ StgBCO *bco;
+ nat size;
+
+ p++;
+ bco = (StgBCO *)*p;
+ thread(p);
+ p++;
+ size = BCO_BITMAP_SIZE(bco);
+ thread_large_bitmap(p, BCO_BITMAP(bco), size);
+ p += size;
+ continue;
+ }
+
+ // large bitmap (> 32 entries, or 64 on a 64-bit machine)
case RET_BIG:
case RET_VEC_BIG:
- {
- StgPtr q;
- StgLargeBitmap *large_bitmap;
- nat i;
-
- large_bitmap = info->layout.large_bitmap;
p++;
+ size = GET_LARGE_BITMAP(&info->i)->size;
+ thread_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
+ p += size;
+ continue;
- for (i=0; i<large_bitmap->size; i++) {
- bitmap = large_bitmap->bitmap[i];
- q = p + sizeof(W_) * 8;
- while (bitmap != 0) {
- if ((bitmap & 1) == 0) {
- thread(p);
- }
- p++;
- bitmap = bitmap >> 1;
- }
- if (i+1 < large_bitmap->size) {
- while (p < q) {
- thread(p);
- p++;
- }
- }
- }
+ case RET_FUN:
+ {
+ StgRetFun *ret_fun = (StgRetFun *)p;
+ StgFunInfoTable *fun_info;
+
+ fun_info = itbl_to_fun_itbl(
+ get_threaded_info((StgPtr)ret_fun->fun));
+ // *before* threading it!
+ thread((StgPtr)&ret_fun->fun);
+ p = thread_arg_block(fun_info, ret_fun->payload);
continue;
}
default:
barf("thread_stack: weird activation record found on stack: %d",
- (int)(info->type));
+ (int)(info->i.type));
+ }
+ }
+}
+
+STATIC_INLINE StgPtr
+thread_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
+{
+ StgPtr p;
+ StgWord bitmap;
+ StgFunInfoTable *fun_info;
+
+ fun_info = itbl_to_fun_itbl(get_threaded_info((StgPtr)fun));
+ ASSERT(fun_info->i.type != PAP);
+
+ p = (StgPtr)payload;
+
+ switch (fun_info->f.fun_type) {
+ case ARG_GEN:
+ bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
+ goto small_bitmap;
+ case ARG_GEN_BIG:
+ thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
+ p += size;
+ break;
+ case ARG_BCO:
+ thread_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
+ p += size;
+ break;
+ default:
+ bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
+ small_bitmap:
+ while (size > 0) {
+ if ((bitmap & 1) == 0) {
+ thread(p);
+ }
+ p++;
+ bitmap = bitmap >> 1;
+ size--;
}
+ break;
}
+
+ return p;
+}
+
+STATIC_INLINE StgPtr
+thread_PAP (StgPAP *pap)
+{
+ StgPtr p;
+ p = thread_PAP_payload(pap->fun, pap->payload, pap->n_args);
+ thread((StgPtr)&pap->fun);
+ return p;
+}
+
+STATIC_INLINE StgPtr
+thread_AP (StgAP *ap)
+{
+ StgPtr p;
+ p = thread_PAP_payload(ap->fun, ap->payload, ap->n_args);
+ thread((StgPtr)&ap->fun);
+ return p;
+}
+
+STATIC_INLINE StgPtr
+thread_AP_STACK (StgAP_STACK *ap)
+{
+ thread((StgPtr)&ap->fun);
+ thread_stack((P_)ap->payload, (P_)ap->payload + ap->size);
+ return (P_)ap + sizeofW(StgAP_STACK) + ap->size;
+}
+
+static StgPtr
+thread_TSO (StgTSO *tso)
+{
+ thread((StgPtr)&tso->link);
+ thread((StgPtr)&tso->global_link);
+
+ if ( tso->why_blocked == BlockedOnMVar
+ || tso->why_blocked == BlockedOnBlackHole
+ || tso->why_blocked == BlockedOnException
+#if defined(PAR)
+ || tso->why_blocked == BlockedOnGA
+ || tso->why_blocked == BlockedOnGA_NoSend
+#endif
+ ) {
+ thread((StgPtr)&tso->block_info.closure);
+ }
+ if ( tso->blocked_exceptions != NULL ) {
+ thread((StgPtr)&tso->blocked_exceptions);
+ }
+
+ thread((StgPtr)&tso->trec);
+
+ thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
+ return (StgPtr)tso + tso_sizeW(tso);
}
+
static void
update_fwd_large( bdescr *bd )
{
for (; bd != NULL; bd = bd->link) {
p = bd->start;
- unthread(p,p);
info = get_itbl((StgClosure *)p);
switch (info->type) {
// nothing to follow
continue;
- case MUT_ARR_PTRS:
+ case MUT_ARR_PTRS_CLEAN:
+ case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
StgPtr next;
}
case TSO:
- {
- StgTSO *tso = (StgTSO *)p;
- thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
+ thread_TSO((StgTSO *)p);
+ continue;
+
+ case AP_STACK:
+ thread_AP_STACK((StgAP_STACK *)p);
continue;
- }
- case AP_UPD:
case PAP:
- {
- StgPAP* pap = (StgPAP *)p;
- thread((StgPtr)&pap->fun);
- thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
+ thread_PAP((StgPAP *)p);
continue;
- }
+
+ case TREC_CHUNK:
+ {
+ StgWord i;
+ StgTRecChunk *tc = (StgTRecChunk *)p;
+ TRecEntry *e = &(tc -> entries[0]);
+ thread((StgPtr)&tc->prev_chunk);
+ for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
+ thread((StgPtr)&e->tvar);
+ thread((StgPtr)&e->expected_value);
+ thread((StgPtr)&e->new_value);
+ }
+ continue;
+ }
default:
barf("update_fwd_large: unknown/strange object %d", (int)(info->type));
}
}
+STATIC_INLINE StgPtr
+thread_obj (StgInfoTable *info, StgPtr p)
+{
+ switch (info->type) {
+ case THUNK_0_1:
+ return p + sizeofW(StgThunk) + 1;
+
+ case FUN_0_1:
+ case CONSTR_0_1:
+ return p + sizeofW(StgHeader) + 1;
+
+ case FUN_1_0:
+ case CONSTR_1_0:
+ thread((StgPtr)&((StgClosure *)p)->payload[0]);
+ return p + sizeofW(StgHeader) + 1;
+
+ case THUNK_1_0:
+ thread((StgPtr)&((StgThunk *)p)->payload[0]);
+ return p + sizeofW(StgThunk) + 1;
+
+ case THUNK_0_2:
+ return p + sizeofW(StgThunk) + 2;
+
+ case FUN_0_2:
+ case CONSTR_0_2:
+ return p + sizeofW(StgHeader) + 2;
+
+ case THUNK_1_1:
+ thread((StgPtr)&((StgThunk *)p)->payload[0]);
+ return p + sizeofW(StgThunk) + 2;
+
+ case FUN_1_1:
+ case CONSTR_1_1:
+ thread((StgPtr)&((StgClosure *)p)->payload[0]);
+ return p + sizeofW(StgHeader) + 2;
+
+ case THUNK_2_0:
+ thread((StgPtr)&((StgThunk *)p)->payload[0]);
+ thread((StgPtr)&((StgThunk *)p)->payload[1]);
+ return p + sizeofW(StgThunk) + 2;
+
+ case FUN_2_0:
+ case CONSTR_2_0:
+ thread((StgPtr)&((StgClosure *)p)->payload[0]);
+ thread((StgPtr)&((StgClosure *)p)->payload[1]);
+ return p + sizeofW(StgHeader) + 2;
+
+ case BCO: {
+ StgBCO *bco = (StgBCO *)p;
+ thread((StgPtr)&bco->instrs);
+ thread((StgPtr)&bco->literals);
+ thread((StgPtr)&bco->ptrs);
+ thread((StgPtr)&bco->itbls);
+ return p + bco_sizeW(bco);
+ }
+
+ case THUNK:
+ {
+ StgPtr end;
+
+ end = (P_)((StgThunk *)p)->payload +
+ info->layout.payload.ptrs;
+ for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
+ thread(p);
+ }
+ return p + info->layout.payload.nptrs;
+ }
+
+ case FUN:
+ case CONSTR:
+ case STABLE_NAME:
+ case IND_PERM:
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY:
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ {
+ StgPtr end;
+
+ end = (P_)((StgClosure *)p)->payload +
+ info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ thread(p);
+ }
+ return p + info->layout.payload.nptrs;
+ }
+
+ case WEAK:
+ {
+ StgWeak *w = (StgWeak *)p;
+ thread((StgPtr)&w->key);
+ thread((StgPtr)&w->value);
+ thread((StgPtr)&w->finalizer);
+ if (w->link != NULL) {
+ thread((StgPtr)&w->link);
+ }
+ return p + sizeofW(StgWeak);
+ }
+
+ case MVAR:
+ {
+ StgMVar *mvar = (StgMVar *)p;
+ thread((StgPtr)&mvar->head);
+ thread((StgPtr)&mvar->tail);
+ thread((StgPtr)&mvar->value);
+ return p + sizeofW(StgMVar);
+ }
+
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ thread((StgPtr)&((StgInd *)p)->indirectee);
+ return p + sizeofW(StgInd);
+
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)p;
+ thread((StgPtr)&s->selectee);
+ return p + THUNK_SELECTOR_sizeW();
+ }
+
+ case AP_STACK:
+ return thread_AP_STACK((StgAP_STACK *)p);
+
+ case PAP:
+ return thread_PAP((StgPAP *)p);
+
+ case AP:
+ return thread_AP((StgAP *)p);
+
+ case ARR_WORDS:
+ return p + arr_words_sizeW((StgArrWords *)p);
+
+ case MUT_ARR_PTRS_CLEAN:
+ case MUT_ARR_PTRS_DIRTY:
+ case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
+ // follow everything
+ {
+ StgPtr next;
+
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ thread(p);
+ }
+ return p;
+ }
+
+ case TSO:
+ return thread_TSO((StgTSO *)p);
+
+ case TVAR_WAIT_QUEUE:
+ {
+ StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
+ thread((StgPtr)&wq->waiting_tso);
+ thread((StgPtr)&wq->next_queue_entry);
+ thread((StgPtr)&wq->prev_queue_entry);
+ return p + sizeofW(StgTVarWaitQueue);
+ }
+
+ case TVAR:
+ {
+ StgTVar *tvar = (StgTVar *)p;
+ thread((StgPtr)&tvar->current_value);
+ thread((StgPtr)&tvar->first_wait_queue_entry);
+ return p + sizeofW(StgTVar);
+ }
+
+ case TREC_HEADER:
+ {
+ StgTRecHeader *trec = (StgTRecHeader *)p;
+ thread((StgPtr)&trec->enclosing_trec);
+ thread((StgPtr)&trec->current_chunk);
+ return p + sizeofW(StgTRecHeader);
+ }
+
+ case TREC_CHUNK:
+ {
+ StgWord i;
+ StgTRecChunk *tc = (StgTRecChunk *)p;
+ TRecEntry *e = &(tc -> entries[0]);
+ thread((StgPtr)&tc->prev_chunk);
+ for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
+ thread((StgPtr)&e->tvar);
+ thread((StgPtr)&e->expected_value);
+ thread((StgPtr)&e->new_value);
+ }
+ return p + sizeofW(StgTRecChunk);
+ }
+
+ default:
+ barf("update_fwd: unknown/strange object %d", (int)(info->type));
+ return NULL;
+ }
+}
+
static void
update_fwd( bdescr *blocks )
{
// linearly scan the objects in this block
while (p < bd->free) {
-
- /* unthread the info ptr */
- unthread(p,p);
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
-
- ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
- || IS_HUGS_CONSTR_INFO(info)));
-
- switch (info->type) {
- case FUN_0_1:
- case CONSTR_0_1:
- p += sizeofW(StgHeader) + 1;
- break;
-
- case FUN_1_0:
- case CONSTR_1_0:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 1;
- break;
-
- case THUNK_1_0:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
- break;
-
- case THUNK_0_1: // MIN_UPD_SIZE
- case THUNK_0_2:
- case FUN_0_2:
- case CONSTR_0_2:
- p += sizeofW(StgHeader) + 2;
- break;
-
- case THUNK_1_1:
- case FUN_1_1:
- case CONSTR_1_1:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2;
- break;
-
- case THUNK_2_0:
- case FUN_2_0:
- case CONSTR_2_0:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- thread((StgPtr)&((StgClosure *)p)->payload[1]);
- p += sizeofW(StgHeader) + 2;
- break;
-
- case FUN:
- case THUNK:
- case CONSTR:
- case FOREIGN:
- case STABLE_NAME:
- case BCO:
- case IND_PERM:
- case MUT_VAR:
- case MUT_CONS:
- case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
- case BLACKHOLE:
- case BLACKHOLE_BQ:
- {
- StgPtr end;
-
- end = (P_)((StgClosure *)p)->payload +
- info->layout.payload.ptrs;
- for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- thread(p);
- }
- p += info->layout.payload.nptrs;
- break;
- }
-
- // the info table for a weak ptr lies about the number of ptrs
- // (because we have special GC routines for them, but we
- // want to use the standard evacuate code). So we have to
- // special case here.
- case WEAK:
- {
- StgWeak *w = (StgWeak *)p;
- thread((StgPtr)&w->key);
- thread((StgPtr)&w->value);
- thread((StgPtr)&w->finalizer);
- if (w->link != NULL) {
- thread((StgPtr)&w->link);
- }
- p += sizeofW(StgWeak);
- break;
- }
-
- // again, the info table for MVar isn't suitable here (it includes
- // the mut_link field as a pointer, and we don't want to
- // thread it).
- case MVAR:
- {
- StgMVar *mvar = (StgMVar *)p;
- thread((StgPtr)&mvar->head);
- thread((StgPtr)&mvar->tail);
- thread((StgPtr)&mvar->value);
- p += sizeofW(StgMVar);
- break;
- }
-
- // specialise this case, because we want to update the
- // mut_link field too.
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
- {
- StgIndOldGen *ind = (StgIndOldGen *)p;
- thread((StgPtr)&ind->indirectee);
- if (ind->mut_link != NULL) {
- thread((StgPtr)&ind->mut_link);
- }
- break;
- }
-
- case THUNK_SELECTOR:
- {
- StgSelector *s = (StgSelector *)p;
- thread((StgPtr)&s->selectee);
- p += THUNK_SELECTOR_sizeW();
- break;
- }
-
- case AP_UPD: // same as PAPs
- case PAP:
- {
- StgPAP* pap = (StgPAP *)p;
-
- thread((P_)&pap->fun);
- thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
- p += pap_sizeW(pap);
- break;
- }
-
- case ARR_WORDS:
- p += arr_words_sizeW((StgArrWords *)p);
- break;
-
- case MUT_ARR_PTRS:
- case MUT_ARR_PTRS_FROZEN:
- // follow everything
- {
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- thread(p);
- }
- break;
- }
-
- case TSO:
- {
- StgTSO *tso = (StgTSO *)p;
- thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
- thread((StgPtr)&tso->link);
- thread((StgPtr)&tso->global_link);
- p += tso_sizeW(tso);
- break;
- }
-
- default:
- barf("update_fwd: unknown/strange object %d", (int)(info->type));
- }
+ p = thread_obj(info, p);
}
}
}
update_fwd_compact( bdescr *blocks )
{
StgPtr p, q, free;
+#if 0
StgWord m;
+#endif
bdescr *bd, *free_bd;
StgInfoTable *info;
nat size;
info = get_threaded_info(p);
q = p;
- ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
- || IS_HUGS_CONSTR_INFO(info)));
- switch (info->type) {
- case FUN_0_1:
- case CONSTR_0_1:
- p += sizeofW(StgHeader) + 1;
- break;
-
- case FUN_1_0:
- case CONSTR_1_0:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 1;
- break;
-
- case THUNK_1_0:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
- break;
-
- case THUNK_0_1: // MIN_UPD_SIZE
- case THUNK_0_2:
- case FUN_0_2:
- case CONSTR_0_2:
- p += sizeofW(StgHeader) + 2;
- break;
-
- case THUNK_1_1:
- case FUN_1_1:
- case CONSTR_1_1:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2;
- break;
-
- case THUNK_2_0:
- case FUN_2_0:
- case CONSTR_2_0:
- thread((StgPtr)&((StgClosure *)p)->payload[0]);
- thread((StgPtr)&((StgClosure *)p)->payload[1]);
- p += sizeofW(StgHeader) + 2;
- break;
-
- case FUN:
- case THUNK:
- case CONSTR:
- case FOREIGN:
- case STABLE_NAME:
- case BCO:
- case IND_PERM:
- case MUT_VAR:
- case MUT_CONS:
- case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
- case BLACKHOLE:
- case BLACKHOLE_BQ:
- {
- StgPtr end;
-
- end = (P_)((StgClosure *)p)->payload +
- info->layout.payload.ptrs;
- for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- thread(p);
- }
- p += info->layout.payload.nptrs;
- break;
- }
-
- case WEAK:
- {
- StgWeak *w = (StgWeak *)p;
- thread((StgPtr)&w->key);
- thread((StgPtr)&w->value);
- thread((StgPtr)&w->finalizer);
- if (w->link != NULL) {
- thread((StgPtr)&w->link);
- }
- p += sizeofW(StgWeak);
- break;
- }
-
- case MVAR:
- {
- StgMVar *mvar = (StgMVar *)p;
- thread((StgPtr)&mvar->head);
- thread((StgPtr)&mvar->tail);
- thread((StgPtr)&mvar->value);
- p += sizeofW(StgMVar);
- break;
- }
-
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
- // specialise this case, because we want to update the
- // mut_link field too.
- {
- StgIndOldGen *ind = (StgIndOldGen *)p;
- thread((StgPtr)&ind->indirectee);
- if (ind->mut_link != NULL) {
- thread((StgPtr)&ind->mut_link);
- }
- p += sizeofW(StgIndOldGen);
- break;
- }
-
- case THUNK_SELECTOR:
- {
- StgSelector *s = (StgSelector *)p;
- thread((StgPtr)&s->selectee);
- p += THUNK_SELECTOR_sizeW();
- break;
- }
-
- case AP_UPD: // same as PAPs
- case PAP:
- {
- StgPAP* pap = (StgPAP *)p;
-
- thread((P_)&pap->fun);
- thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
- p += pap_sizeW(pap);
- break;
- }
-
- case ARR_WORDS:
- p += arr_words_sizeW((StgArrWords *)p);
- break;
-
- case MUT_ARR_PTRS:
- case MUT_ARR_PTRS_FROZEN:
- // follow everything
- {
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- thread(p);
- }
- break;
- }
-
- case TSO:
- {
- StgTSO *tso = (StgTSO *)p;
- thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
- thread((StgPtr)&tso->link);
- thread((StgPtr)&tso->global_link);
- p += tso_sizeW(tso);
- break;
- }
-
- default:
- barf("update_fwd: unknown/strange object %d", (int)(info->type));
- }
+ p = thread_obj(info, p);
size = p - q;
if (free + size > free_bd->start + BLOCK_SIZE_W) {
+ // unset the next bit in the bitmap to indicate that
+ // this object needs to be pushed into the next
+ // block. This saves us having to run down the
+ // threaded info pointer list twice during the next pass.
+ unmark(q+1,bd);
free_bd = free_bd->link;
free = free_bd->start;
+ } else {
+ ASSERT(is_marked(q+1,bd));
}
unthread(q,free);
}
}
-static void
-update_bkwd( bdescr *blocks )
-{
- StgPtr p;
- bdescr *bd;
- StgInfoTable *info;
-
- bd = blocks;
-
-#if defined(PAR)
- barf("update_bkwd: ToDo");
-#endif
-
- // cycle through all the blocks in the step
- for (; bd != NULL; bd = bd->link) {
- p = bd->start;
-
- // linearly scan the objects in this block
- while (p < bd->free) {
-
- // must unthread before we look at the info ptr...
- unthread(p,p);
-
- info = get_itbl((StgClosure *)p);
- ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
- || IS_HUGS_CONSTR_INFO(info)));
-
- p += obj_sizeW((StgClosure *)p,info);
- }
- }
-}
-
static nat
update_bkwd_compact( step *stp )
{
StgPtr p, free;
+#if 0
StgWord m;
+#endif
bdescr *bd, *free_bd;
StgInfoTable *info;
nat size, free_blocks;
- bd = free_bd = stp->blocks;
+ bd = free_bd = stp->old_blocks;
free = free_bd->start;
free_blocks = 1;
}
#endif
- // must unthread before we look at the info ptr...
- info = get_threaded_info(p);
-
- ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
- || IS_HUGS_CONSTR_INFO(info)));
-
- size = obj_sizeW((StgClosure *)p,info);
-
- if (free + size > free_bd->start + BLOCK_SIZE_W) {
+ if (!is_marked(p+1,bd)) {
// don't forget to update the free ptr in the block desc.
free_bd->free = free;
free_bd = free_bd->link;
}
unthread(p,free);
- move(free,p,size);
+ ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)p)->header.info));
+ info = get_itbl((StgClosure *)p);
+ size = obj_sizeW((StgClosure *)p,info);
- // Rebuild the mutable list for the old generation.
- // (the mut_once list is updated using threading, with
- // special cases for IND_OLDGEN and MUT_CONS above).
- if (ip_MUTABLE(info)) {
- recordMutable((StgMutClosure *)free);
+ if (free != p) {
+ move(free,p,size);
}
// relocate TSOs
freeChain(free_bd->link);
free_bd->link = NULL;
}
- stp->n_blocks = free_blocks;
return free_blocks;
-}
-
-static void
-update_bkwd_large( bdescr *blocks )
-{
- bdescr *bd;
-
- for (bd = blocks; bd != NULL; bd = bd->link ) {
- unthread(bd->start, bd->start);
- }
}
-
void
compact( void (*get_roots)(evac_fn) )
{
nat g, s, blocks;
step *stp;
- extern StgWeak *old_weak_ptr_list; // tmp
// 1. thread the roots
get_roots((evac_fn)thread);
// the weak pointer lists...
if (weak_ptr_list != NULL) {
- thread((StgPtr)&weak_ptr_list);
+ thread((StgPtr)(void *)&weak_ptr_list);
}
if (old_weak_ptr_list != NULL) {
- thread((StgPtr)&old_weak_ptr_list); // tmp
+ thread((StgPtr)(void *)&old_weak_ptr_list); // tmp
}
- // mutable lists (ToDo: all gens)
- thread((StgPtr)&oldest_gen->mut_list);
- thread((StgPtr)&oldest_gen->mut_once_list);
+ // mutable lists
+ for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
+ bdescr *bd;
+ StgPtr p;
+ for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
+ for (p = bd->start; p < bd->free; p++) {
+ thread(p);
+ }
+ }
+ }
// the global thread list
- thread((StgPtr)&all_threads);
+ thread((StgPtr)(void *)&all_threads);
+
+ // any threads resurrected during this GC
+ thread((StgPtr)(void *)&resurrected_threads);
+
+ // the task list
+ {
+ Task *task;
+ for (task = all_tasks; task != NULL; task = task->all_link) {
+ if (task->tso) {
+ thread((StgPtr)&task->tso);
+ }
+ }
+ }
// the static objects
thread_static(scavenged_static_objects);
// the stable pointer table
threadStablePtrTable((evac_fn)thread);
+ // the CAF list (used by GHCi)
+ markCAFs((evac_fn)thread);
+
// 2. update forward ptrs
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
+ if (g==0 && s ==0) continue;
stp = &generations[g].steps[s];
- IF_DEBUG(gc, fprintf(stderr,"update_fwd: %d.%d\n", stp->gen->no, stp->no););
+ IF_DEBUG(gc, debugBelch("update_fwd: %d.%d\n", stp->gen->no, stp->no););
- update_fwd(stp->to_blocks);
+ update_fwd(stp->blocks);
update_fwd_large(stp->scavenged_large_objects);
- if (g == RtsFlags.GcFlags.generations-1 && stp->blocks != NULL) {
- IF_DEBUG(gc, fprintf(stderr,"update_fwd: %d.%d (compact)\n", stp->gen->no, stp->no););
- update_fwd_compact(stp->blocks);
+ if (g == RtsFlags.GcFlags.generations-1 && stp->old_blocks != NULL) {
+ IF_DEBUG(gc, debugBelch("update_fwd: %d.%d (compact)\n", stp->gen->no, stp->no););
+ update_fwd_compact(stp->old_blocks);
}
}
}
// 3. update backward ptrs
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- stp = &generations[g].steps[s];
- IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d\n", stp->gen->no, stp->no););
- update_bkwd(stp->to_blocks);
- update_bkwd_large(stp->scavenged_large_objects);
- if (g == RtsFlags.GcFlags.generations-1 && stp->blocks != NULL) {
- IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d (compact)\n", stp->gen->no, stp->no););
- blocks = update_bkwd_compact(stp);
- IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)\n",
- stp->gen->no, stp->no,
- stp->n_blocks, blocks););
- stp->n_blocks = blocks;
- }
- }
+ stp = &oldest_gen->steps[0];
+ if (stp->old_blocks != NULL) {
+ blocks = update_bkwd_compact(stp);
+ IF_DEBUG(gc, debugBelch("update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)\n",
+ stp->gen->no, stp->no,
+ stp->n_old_blocks, blocks););
+ stp->n_old_blocks = blocks;
}
}