/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.160 2003/09/23 15:31:02 simonmar Exp $
*
* (c) The GHC Team 1998-2003
*
#include "RtsUtils.h"
#include "Apply.h"
#include "Storage.h"
-#include "StoragePriv.h"
+#include "LdvProfile.h"
+#include "Updates.h"
#include "Stats.h"
#include "Schedule.h"
-#include "SchedAPI.h" // for ReverCAFs prototype
#include "Sanity.h"
#include "BlockAlloc.h"
#include "MBlock.h"
#include "ProfHeap.h"
#include "SchedAPI.h"
#include "Weak.h"
-#include "StablePriv.h"
#include "Prelude.h"
#include "ParTicky.h" // ToDo: move into Rts.h
#include "GCCompact.h"
#include "Signals.h"
+#include "STM.h"
#if defined(GRAN) || defined(PAR)
# include "GranSimRts.h"
# include "ParallelRts.h"
#endif
#include "RetainerProfile.h"
-#include "LdvProfile.h"
#include <string.h>
// Use a register argument for evacuate, if available.
#if __GNUC__ >= 2
-static StgClosure * evacuate (StgClosure *q) __attribute__((regparm(1)));
+#define REGPARM1 __attribute__((regparm(1)))
#else
-static StgClosure * evacuate (StgClosure *q);
+#define REGPARM1
#endif
+REGPARM1 static StgClosure * evacuate (StgClosure *q);
+
static void zero_static_object_list ( StgClosure* first_static );
static void zero_mutable_list ( StgMutClosure *first );
static bdescr *oldgen_scan_bd;
static StgPtr oldgen_scan;
-static inline rtsBool
+STATIC_INLINE rtsBool
mark_stack_empty(void)
{
return mark_sp == mark_stack;
}
-static inline rtsBool
+STATIC_INLINE rtsBool
mark_stack_full(void)
{
return mark_sp >= mark_splim;
}
-static inline void
+STATIC_INLINE void
reset_mark_stack(void)
{
mark_sp = mark_stack;
}
-static inline void
+STATIC_INLINE void
push_mark_stack(StgPtr p)
{
*mark_sp++ = p;
}
-static inline StgPtr
+STATIC_INLINE StgPtr
pop_mark_stack(void)
{
return *--mark_sp;
#endif
#if defined(DEBUG) && defined(GRAN)
- IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
+ IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
Now, Now));
#endif
blockUserSignals();
#endif
+ // tell the STM to discard any cached closures its hoping to re-use
+ stmPreGCHook();
+
// tell the stats department that we've started a GC
stat_startGC();
stp->bitmap = bitmap_bdescr;
bitmap = bitmap_bdescr->start;
- IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
+ IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
bitmap_size, bitmap););
// don't forget to fill it with zeros!
memset(bitmap, 0, bitmap_size);
- // for each block in this step, point to its bitmap from the
+ // For each block in this step, point to its bitmap from the
// block descriptor.
for (bd=stp->blocks; bd != NULL; bd = bd->link) {
bd->u.bitmap = bitmap;
bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
+
+ // Also at this point we set the BF_COMPACTED flag
+ // for this block. The invariant is that
+ // BF_COMPACTED is always unset, except during GC
+ // when it is set on those blocks which will be
+ // compacted.
+ bd->flags |= BF_COMPACTED;
}
}
}
*/
markStablePtrTable(mark_root);
-#ifdef INTERPRETER
- {
- /* ToDo: To fix the caf leak, we need to make the commented out
- * parts of this code do something sensible - as described in
- * the CAF document.
- */
- extern void markHugsObjects(void);
- markHugsObjects();
- }
-#endif
-
/* -------------------------------------------------------------------------
* Repeatedly scavenge all the areas we know about until there's no
* more scavenging to be done.
// for a compacted step, just shift the new to-space
// onto the front of the now-compacted existing blocks.
for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED; // now from-space
+ bd->flags &= ~BF_EVACUATED; // now from-space
}
// tack the new blocks on the end of the existing blocks
if (stp->blocks == NULL) {
if (next == NULL) {
bd->link = stp->to_blocks;
}
+ // NB. this step might not be compacted next
+ // time, so reset the BF_COMPACTED flags.
+ // They are set before GC if we're going to
+ // compact. (search for BF_COMPACTED above).
+ bd->flags &= ~BF_COMPACTED;
}
}
// add the new blocks to the block tally
stp->blocks = stp->to_blocks;
stp->n_blocks = stp->n_to_blocks;
for (bd = stp->blocks; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED; // now from-space
+ bd->flags &= ~BF_EVACUATED; // now from-space
}
}
stp->to_blocks = NULL;
oldest_gen->steps[0].n_blocks >
(RtsFlags.GcFlags.compactThreshold * max) / 100))) {
oldest_gen->steps[0].is_compacted = 1;
-// fprintf(stderr,"compaction: on\n", live);
+// debugBelch("compaction: on\n", live);
} else {
oldest_gen->steps[0].is_compacted = 0;
-// fprintf(stderr,"compaction: off\n", live);
+// debugBelch("compaction: off\n", live);
}
// if we're going to go over the maximum heap size, reduce the
}
#if 0
- fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
+ debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
min_alloc, size, max);
#endif
int pc_free;
adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
- IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
+ IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
heapOverflow();
w->link = weak_ptr_list;
weak_ptr_list = w;
flag = rtsTrue;
- IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p",
+ IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
w, w->key));
continue;
}
default:
barf("traverse_weak_ptr_list");
+ return rtsTrue;
}
}
}
// check the mark bit for compacted steps
- if (bd->step->is_compacted && is_marked((P_)p,bd)) {
+ if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
return p;
}
*root = evacuate(*root);
}
-static __inline__ void
+STATIC_INLINE void
upd_evacuee(StgClosure *p, StgClosure *dest)
{
// Source object must be in from-space:
ASSERT((Bdescr((P_)p)->flags & BF_EVACUATED) == 0);
// not true: (ToDo: perhaps it should be)
// ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
- p->header.info = &stg_EVACUATED_info;
+ SET_INFO(p, &stg_EVACUATED_info);
((StgEvacuated *)p)->evacuee = dest;
}
-static __inline__ StgClosure *
+STATIC_INLINE StgClosure *
copy(StgClosure *src, nat size, step *stp)
{
P_ to, from, dest;
-------------------------------------------------------------------------- */
-static inline void
+STATIC_INLINE void
evacuate_large(StgPtr p)
{
bdescr *bd = Bdescr(p);
extra reads/writes than we save.
-------------------------------------------------------------------------- */
-static StgClosure *
+REGPARM1 static StgClosure *
evacuate(StgClosure *q)
{
StgClosure *to;
/* If the object is in a step that we're compacting, then we
* need to use an alternative evacuate procedure.
*/
- if (bd->step->is_compacted) {
+ if (bd->flags & BF_COMPACTED) {
if (!is_marked((P_)q,bd)) {
mark((P_)q,bd);
if (mark_stack_full()) {
case UPDATE_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
+ case CATCH_STM_FRAME:
+ case CATCH_RETRY_FRAME:
+ case ATOMICALLY_FRAME:
// shouldn't see these
barf("evacuate: stack frame at %p\n", q);
//ToDo: derive size etc from reverted IP
//to = copy(q,size,stp);
IF_DEBUG(gc,
- belch("@@ evacuate: RBH %p (%s) to %p (%s)",
+ debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
return to;
}
ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
to = copy(q,sizeofW(StgBlockedFetch),stp);
IF_DEBUG(gc,
- belch("@@ evacuate: %p (%s) to %p (%s)",
+ debugBelch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
return to;
ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
to = copy(q,sizeofW(StgFetchMe),stp);
IF_DEBUG(gc,
- belch("@@ evacuate: %p (%s) to %p (%s)",
+ debugBelch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
return to;
ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
IF_DEBUG(gc,
- belch("@@ evacuate: %p (%s) to %p (%s)",
+ debugBelch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
return to;
#endif
+ case TREC_HEADER:
+ return copy(q,sizeofW(StgTRecHeader),stp);
+
+ case TVAR_WAIT_QUEUE:
+ return copy(q,sizeofW(StgTVarWaitQueue),stp);
+
+ case TVAR:
+ return copy(q,sizeofW(StgTVar),stp);
+
+ case TREC_CHUNK:
+ return copy(q,sizeofW(StgTRecChunk),stp);
+
default:
barf("evacuate: strange closure type %d", (int)(info->type));
}
thunk is unchanged.
-------------------------------------------------------------------------- */
+static inline rtsBool
+is_to_space ( StgClosure *p )
+{
+ bdescr *bd;
+
+ bd = Bdescr((StgPtr)p);
+ if (HEAP_ALLOCED(p) &&
+ ((bd->flags & BF_EVACUATED)
+ || ((bd->flags & BF_COMPACTED) &&
+ is_marked((P_)p,bd)))) {
+ return rtsTrue;
+ } else {
+ return rtsFalse;
+ }
+}
+
static StgClosure *
eval_thunk_selector( nat field, StgSelector * p )
{
// eval_thunk_selector(). There are various ways this could
// happen:
//
- // - following an IND_STATIC
+ // 1. following an IND_STATIC
//
- // - when the old generation is compacted, the mark phase updates
- // from-space pointers to be to-space pointers, and we can't
- // reliably tell which we're following (eg. from an IND_STATIC).
+ // 2. when the old generation is compacted, the mark phase updates
+ // from-space pointers to be to-space pointers, and we can't
+ // reliably tell which we're following (eg. from an IND_STATIC).
//
- // So we use the block-descriptor test to find out if we're in
- // to-space.
+ // 3. compacting GC again: if we're looking at a constructor in
+ // the compacted generation, it might point directly to objects
+ // in to-space. We must bale out here, otherwise doing the selection
+ // will result in a to-space pointer being returned.
+ //
+ // (1) is dealt with using a BF_EVACUATED test on the
+ // selectee. (2) and (3): we can tell if we're looking at an
+ // object in the compacted generation that might point to
+ // to-space objects by testing that (a) it is BF_COMPACTED, (b)
+ // the compacted generation is being collected, and (c) the
+ // object is marked. Only a marked object may have pointers that
+ // point to to-space objects, because that happens when
+ // scavenging.
//
- if (HEAP_ALLOCED(selectee) &&
- Bdescr((StgPtr)selectee)->flags & BF_EVACUATED) {
+ // The to-space test is now embodied in the in_to_space() inline
+ // function, as it is re-used below.
+ //
+ if (is_to_space(selectee)) {
goto bale_out;
}
ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
info->layout.payload.nptrs));
- // ToDo: shouldn't we test whether this pointer is in
- // to-space?
- return selectee->payload[field];
+ // Select the right field from the constructor, and check
+ // that the result isn't in to-space. It might be in
+ // to-space if, for example, this constructor contains
+ // pointers to younger-gen objects (and is on the mut-once
+ // list).
+ //
+ {
+ StgClosure *q;
+ q = selectee->payload[field];
+ if (is_to_space(q)) {
+ goto bale_out;
+ } else {
+ return q;
+ }
+ }
case IND:
case IND_PERM:
// For the purposes of LDV profiling, we have destroyed
// the original selector thunk.
SET_INFO(p, info_ptr);
- LDV_recordDead_FILL_SLOP_DYNAMIC(selectee);
+ LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee);
#endif
((StgInd *)selectee)->indirectee = val;
SET_INFO(selectee,&stg_IND_info);
-#ifdef PROFILING
+
// For the purposes of LDV profiling, we have created an
// indirection.
- LDV_recordCreate(selectee);
-#endif
+ LDV_RECORD_CREATE(selectee);
+
selectee = val;
goto selector_loop;
}
* srt field in the info table. That's ok, because we'll
* never dereference it.
*/
-static inline void
+STATIC_INLINE void
scavenge_srt (StgClosure **srt, nat srt_bitmap)
{
nat bitmap;
}
-static inline void
+STATIC_INLINE void
scavenge_thunk_srt(const StgInfoTable *info)
{
StgThunkInfoTable *thunk_info;
thunk_info = itbl_to_thunk_itbl(info);
- scavenge_srt((StgClosure **)thunk_info->srt, thunk_info->i.srt_bitmap);
+ scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
}
-static inline void
+STATIC_INLINE void
scavenge_fun_srt(const StgInfoTable *info)
{
StgFunInfoTable *fun_info;
fun_info = itbl_to_fun_itbl(info);
- scavenge_srt((StgClosure **)fun_info->srt, fun_info->i.srt_bitmap);
+ scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
}
-static inline void
+STATIC_INLINE void
scavenge_ret_srt(const StgInfoTable *info)
{
StgRetInfoTable *ret_info;
ret_info = itbl_to_ret_itbl(info);
- scavenge_srt((StgClosure **)ret_info->srt, ret_info->i.srt_bitmap);
+ scavenge_srt((StgClosure **)GET_SRT(ret_info), ret_info->i.srt_bitmap);
}
/* -----------------------------------------------------------------------------
(StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
}
+ // scavange current transaction record
+ (StgClosure *)tso->trec = evacuate((StgClosure *)tso->trec);
+
// scavenge this thread's stack
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
}
in PAPs.
-------------------------------------------------------------------------- */
-static inline StgPtr
+STATIC_INLINE StgPtr
scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
{
StgPtr p;
nat size;
p = (StgPtr)args;
- switch (fun_info->fun_type) {
+ switch (fun_info->f.fun_type) {
case ARG_GEN:
- bitmap = BITMAP_BITS(fun_info->bitmap);
- size = BITMAP_SIZE(fun_info->bitmap);
+ bitmap = BITMAP_BITS(fun_info->f.bitmap);
+ size = BITMAP_SIZE(fun_info->f.bitmap);
goto small_bitmap;
case ARG_GEN_BIG:
- size = ((StgLargeBitmap *)fun_info->bitmap)->size;
- scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
+ size = GET_FUN_LARGE_BITMAP(fun_info)->size;
+ scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
p += size;
break;
default:
- bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
- size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->fun_type]);
+ bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
+ size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
small_bitmap:
while (size > 0) {
if ((bitmap & 1) == 0) {
return p;
}
-static inline StgPtr
+STATIC_INLINE StgPtr
scavenge_PAP (StgPAP *pap)
{
StgPtr p;
p = (StgPtr)pap->payload;
size = pap->n_args;
- switch (fun_info->fun_type) {
+ switch (fun_info->f.fun_type) {
case ARG_GEN:
- bitmap = BITMAP_BITS(fun_info->bitmap);
+ bitmap = BITMAP_BITS(fun_info->f.bitmap);
goto small_bitmap;
case ARG_GEN_BIG:
- scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
+ scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
p += size;
break;
case ARG_BCO:
p += size;
break;
default:
- bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
+ bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
small_bitmap:
size = pap->n_args;
while (size > 0) {
LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
#endif
//
- // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
//
SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
-#ifdef PROFILING
- // @LDV profiling
+
// We pretend that p has just been created.
- LDV_recordCreate((StgClosure *)p);
-#endif
+ LDV_RECORD_CREATE((StgClosure *)p);
}
// fall through
case IND_OLDGEN_PERM:
{
StgPtr next;
+ // Set the mut_link field to NULL, so that we will put this
+ // array back on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
+ ((StgMutArrPtrs*)p)->mut_link = NULL;
+
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
recordMutable((StgMutClosure *)to);
failed_to_evac = rtsFalse; // mutable anyhow.
IF_DEBUG(gc,
- belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
// ToDo: use size of reverted closure here!
p += BLACKHOLE_sizeW();
recordMutable((StgMutClosure *)bf);
}
IF_DEBUG(gc,
- belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
+ debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
p += sizeofW(StgBlockedFetch);
recordMutable((StgMutClosure *)fmbq);
}
IF_DEBUG(gc,
- belch("@@ scavenge: %p (%s) exciting, isn't it",
+ debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
p += sizeofW(StgFetchMeBlockingQueue);
break;
}
#endif
+ case TVAR_WAIT_QUEUE:
+ {
+ StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
+ evac_gen = 0;
+ (StgClosure *)wq->waiting_tso = evacuate((StgClosure*)wq->waiting_tso);
+ (StgClosure *)wq->next_queue_entry = evacuate((StgClosure*)wq->next_queue_entry);
+ (StgClosure *)wq->prev_queue_entry = evacuate((StgClosure*)wq->prev_queue_entry);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)wq);
+ failed_to_evac = rtsFalse; // mutable
+ p += sizeofW(StgTVarWaitQueue);
+ break;
+ }
+
+ case TVAR:
+ {
+ StgTVar *tvar = ((StgTVar *) p);
+ evac_gen = 0;
+ (StgClosure *)tvar->current_value = evacuate((StgClosure*)tvar->current_value);
+ (StgClosure *)tvar->first_wait_queue_entry = evacuate((StgClosure*)tvar->first_wait_queue_entry);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)tvar);
+ failed_to_evac = rtsFalse; // mutable
+ p += sizeofW(StgTVar);
+ break;
+ }
+
+ case TREC_HEADER:
+ {
+ StgTRecHeader *trec = ((StgTRecHeader *) p);
+ evac_gen = 0;
+ (StgClosure *)trec->enclosing_trec = evacuate((StgClosure*)trec->enclosing_trec);
+ (StgClosure *)trec->current_chunk = evacuate((StgClosure*)trec->current_chunk);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)trec);
+ failed_to_evac = rtsFalse; // mutable
+ p += sizeofW(StgTRecHeader);
+ break;
+ }
+
+ case TREC_CHUNK:
+ {
+ StgWord i;
+ StgTRecChunk *tc = ((StgTRecChunk *) p);
+ TRecEntry *e = &(tc -> entries[0]);
+ evac_gen = 0;
+ (StgClosure *)tc->prev_chunk = evacuate((StgClosure*)tc->prev_chunk);
+ for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
+ (StgClosure *)e->tvar = evacuate((StgClosure*)e->tvar);
+ (StgClosure *)e->expected_value = evacuate((StgClosure*)e->expected_value);
+ (StgClosure *)e->new_value = evacuate((StgClosure*)e->new_value);
+ }
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)tc);
+ failed_to_evac = rtsFalse; // mutable
+ p += sizeofW(StgTRecChunk);
+ break;
+ }
+
default:
barf("scavenge: unimplemented/strange closure type %d @ %p",
info->type, p);
{
StgPtr next;
+ // Set the mut_link field to NULL, so that we will put this
+ // array on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
+ ((StgMutArrPtrs*)p)->mut_link = NULL;
+
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
recordMutable((StgMutClosure *)rbh);
failed_to_evac = rtsFalse; // mutable anyhow.
IF_DEBUG(gc,
- belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
break;
}
recordMutable((StgMutClosure *)bf);
}
IF_DEBUG(gc,
- belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
+ debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
bf->node, info_type(bf->node)));
break;
recordMutable((StgMutClosure *)fmbq);
}
IF_DEBUG(gc,
- belch("@@ scavenge: %p (%s) exciting, isn't it",
+ debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
break;
}
#endif // PAR
+ case TVAR_WAIT_QUEUE:
+ {
+ StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
+ evac_gen = 0;
+ (StgClosure *)wq->waiting_tso = evacuate((StgClosure*)wq->waiting_tso);
+ (StgClosure *)wq->next_queue_entry = evacuate((StgClosure*)wq->next_queue_entry);
+ (StgClosure *)wq->prev_queue_entry = evacuate((StgClosure*)wq->prev_queue_entry);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)wq);
+ failed_to_evac = rtsFalse; // mutable
+ break;
+ }
+
+ case TVAR:
+ {
+ StgTVar *tvar = ((StgTVar *) p);
+ evac_gen = 0;
+ (StgClosure *)tvar->current_value = evacuate((StgClosure*)tvar->current_value);
+ (StgClosure *)tvar->first_wait_queue_entry = evacuate((StgClosure*)tvar->first_wait_queue_entry);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)tvar);
+ failed_to_evac = rtsFalse; // mutable
+ break;
+ }
+
+ case TREC_CHUNK:
+ {
+ StgWord i;
+ StgTRecChunk *tc = ((StgTRecChunk *) p);
+ TRecEntry *e = &(tc -> entries[0]);
+ evac_gen = 0;
+ (StgClosure *)tc->prev_chunk = evacuate((StgClosure*)tc->prev_chunk);
+ for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
+ (StgClosure *)e->tvar = evacuate((StgClosure*)e->tvar);
+ (StgClosure *)e->expected_value = evacuate((StgClosure*)e->expected_value);
+ (StgClosure *)e->new_value = evacuate((StgClosure*)e->new_value);
+ }
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)tc);
+ failed_to_evac = rtsFalse; // mutable
+ break;
+ }
+
+ case TREC_HEADER:
+ {
+ StgTRecHeader *trec = ((StgTRecHeader *) p);
+ evac_gen = 0;
+ (StgClosure *)trec->enclosing_trec = evacuate((StgClosure*)trec->enclosing_trec);
+ (StgClosure *)trec->current_chunk = evacuate((StgClosure*)trec->current_chunk);
+ evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)trec);
+ failed_to_evac = rtsFalse; // mutable
+ break;
+ }
+
default:
barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
info->type, p);
// start a new linear scan if the mark stack overflowed at some point
if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
- IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
+ IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
mark_stack_overflowed = rtsFalse;
oldgen_scan_bd = oldest_gen->steps[0].blocks;
oldgen_scan = oldgen_scan_bd->start;
// follow everything
StgPtr next;
+ // Set the mut_link field to NULL, so that we will put this
+ // array on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
+ ((StgMutArrPtrs*)p)->mut_link = NULL;
+
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
} else {
size = gen->steps[0].scan - start;
}
- belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
+ debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
}
#endif
(StgClosure *)*q = evacuate((StgClosure *)*q);
}
evac_gen = 0;
+ // Set the mut_link field to NULL, so that we will put this
+ // array back on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
p->mut_link = NULL;
if (failed_to_evac) {
failed_to_evac = rtsFalse;
}
#endif
+ case TVAR_WAIT_QUEUE:
+ {
+ StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
+ (StgClosure *)wq->waiting_tso = evacuate((StgClosure*)wq->waiting_tso);
+ (StgClosure *)wq->next_queue_entry = evacuate((StgClosure*)wq->next_queue_entry);
+ (StgClosure *)wq->prev_queue_entry = evacuate((StgClosure*)wq->prev_queue_entry);
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
+ continue;
+ }
+
+ case TVAR:
+ {
+ StgTVar *tvar = ((StgTVar *) p);
+ (StgClosure *)tvar->current_value = evacuate((StgClosure*)tvar->current_value);
+ (StgClosure *)tvar->first_wait_queue_entry = evacuate((StgClosure*)tvar->first_wait_queue_entry);
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
+ continue;
+ }
+
+ case TREC_CHUNK:
+ {
+ StgWord i;
+ StgTRecChunk *tc = ((StgTRecChunk *) p);
+ TRecEntry *e = &(tc -> entries[0]);
+ (StgClosure *)tc->prev_chunk = evacuate((StgClosure*)tc->prev_chunk);
+ for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
+ (StgClosure *)e->tvar = evacuate((StgClosure*)e->tvar);
+ (StgClosure *)e->expected_value = evacuate((StgClosure*)e->expected_value);
+ (StgClosure *)e->new_value = evacuate((StgClosure*)e->new_value);
+ }
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
+ continue;
+ }
+
+ case TREC_HEADER:
+ {
+ StgTRecHeader *trec = ((StgTRecHeader *) p);
+ (StgClosure *)trec->enclosing_trec = evacuate((StgClosure*)trec->enclosing_trec);
+ (StgClosure *)trec->current_chunk = evacuate((StgClosure*)trec->current_chunk);
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
+ continue;
+ }
+
default:
// shouldn't have anything else on the mutables list
barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
}
}
-static inline StgPtr
+STATIC_INLINE StgPtr
scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
{
while (size > 0) {
StgWord bitmap;
nat size;
- //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
+ //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end));
/*
* Each time around this loop, we are looking at a chunk of stack
continue;
// small bitmap (< 32 entries, or 64 on a 64-bit machine)
+ case CATCH_STM_FRAME:
+ case CATCH_RETRY_FRAME:
+ case ATOMICALLY_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
case RET_SMALL:
p = scavenge_small_bitmap(p, size, bitmap);
follow_srt:
- scavenge_srt((StgClosure **)info->srt, info->i.srt_bitmap);
+ scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
continue;
case RET_BCO: {
{
nat size;
- size = info->i.layout.large_bitmap->size;
+ size = GET_LARGE_BITMAP(&info->i)->size;
p++;
- scavenge_large_bitmap(p, info->i.layout.large_bitmap, size);
+ scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
p += size;
// and don't forget to follow the SRT
goto follow_srt;
dyn = ((StgRetDyn *)p)->liveness;
// traverse the bitmap first
- bitmap = GET_LIVENESS(dyn);
+ bitmap = RET_DYN_LIVENESS(dyn);
p = (P_)&((StgRetDyn *)p)->payload[0];
size = RET_DYN_BITMAP_SIZE;
p = scavenge_small_bitmap(p, size, bitmap);
// skip over the non-ptr words
- p += GET_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
+ p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
// follow the ptr words
- for (size = GET_PTRS(dyn); size > 0; size--) {
+ for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
p++;
}
for (c = (StgIndStatic *)caf_list; c != NULL;
c = (StgIndStatic *)c->static_link)
{
- c->header.info = c->saved_info;
+ SET_INFO(c, c->saved_info);
c->saved_info = NULL;
// could, but not necessary: c->static_link = NULL;
}
ASSERT(info->type == IND_STATIC);
if (STATIC_LINK(info,p) == NULL) {
- IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
+ IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p));
// black hole it
SET_INFO(p,&stg_BLACKHOLE_info);
p = STATIC_LINK2(info,p);
}
- // belch("%d CAFs live", i);
+ // debugBelch("%d CAFs live", i);
}
#endif
if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
- belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
+ debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
#ifdef PROFILING
// @LDV profiling
LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
#endif
SET_INFO(bh,&stg_BLACKHOLE_info);
-#ifdef PROFILING
- // @LDV profiling
+
// We pretend that bh has just been created.
- LDV_recordCreate(bh);
-#endif
+ LDV_RECORD_CREATE(bh);
}
frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
- belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
+ debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
#ifdef DEBUG
/* zero out the slop so that the sanity checker can tell
* same size as a BLACKHOLE in any case.
*/
if (bh_info->type != THUNK_SELECTOR) {
- for (i = np; i < np + nw; i++) {
- ((StgClosure *)bh)->payload[i] = 0;
+ for (i = 0; i < np + nw; i++) {
+ ((StgClosure *)bh)->payload[i] = INVALID_OBJECT;
}
}
}
// We pretend that bh is now dead.
LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
#endif
- // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
SET_INFO(bh,&stg_BLACKHOLE_info);
-#ifdef PROFILING
+
// We pretend that bh has just been created.
- LDV_recordCreate(bh);
-#endif
+ LDV_RECORD_CREATE(bh);
}
prev_was_update_frame = rtsTrue;
void *gap_start, *next_gap_start, *gap_end;
nat chunk_size;
- next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
+ next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
sp = next_gap_start;
while ((StgPtr)gap > tso->sp) {
// we're working in *bytes* now...
gap_start = next_gap_start;
- gap_end = gap_start - gap->gap_size * sizeof(W_);
+ gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_));
gap = gap->next_gap;
- next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
+ next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
- chunk_size = gap_end - next_gap_start;
- sp -= chunk_size;
+ chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start;
+ (unsigned char*)sp -= chunk_size;
memmove(sp, next_gap_start, chunk_size);
}
p = gen->mut_once_list;
next = p->mut_link;
- fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
+ debugBelch("@@ Mut once list %p: ", gen->mut_once_list);
for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
- fprintf(stderr, "%p (%s), ",
+ debugBelch("%p (%s), ",
p, info_type((StgClosure *)p));
}
- fputc('\n', stderr);
+ debugBelch("\n");
}
void
p = gen->mut_list;
next = p->mut_link;
- fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
+ debugBelch("@@ Mutable list %p: ", gen->mut_list);
for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
- fprintf(stderr, "%p (%s), ",
+ debugBelch("%p (%s), ",
p, info_type((StgClosure *)p));
}
- fputc('\n', stderr);
+ debugBelch("\n");
}
-static inline rtsBool
+STATIC_INLINE rtsBool
maybeLarge(StgClosure *closure)
{
StgInfoTable *info = get_itbl(closure);