/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team 1998-2006
+ * (c) The GHC Team 1998-2008
*
* Generational garbage collector: evacuation functions
*
#include "MBlock.h"
#include "Evac.h"
#include "GC.h"
+#include "GCThread.h"
#include "GCUtils.h"
#include "Compact.h"
#include "Prelude.h"
#include "LdvProfile.h"
+#include "Trace.h"
+
+#if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC)
+StgWord64 whitehole_spin = 0;
+#endif
+
+#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
+#define evacuate(p) evacuate1(p)
+#define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p)
+#endif
+
+#if !defined(PARALLEL_GC)
+#define copy_tag_nolock(p, info, src, size, stp, tag) \
+ copy_tag(p, info, src, size, stp, tag)
+#endif
/* Used to avoid long recursion due to selector thunks
*/
-lnat thunk_selector_depth = 0;
#define MAX_THUNK_SELECTOR_DEPTH 16
-static StgClosure * eval_thunk_selector (StgSelector * p, rtsBool);
+static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
+STATIC_INLINE void evacuate_large(StgPtr p);
+
+/* -----------------------------------------------------------------------------
+ Allocate some space in which to copy an object.
+ -------------------------------------------------------------------------- */
-STATIC_INLINE void
-upd_evacuee(StgClosure *p, StgClosure *dest)
+STATIC_INLINE StgPtr
+alloc_for_copy (nat size, step *stp)
{
- // not true: (ToDo: perhaps it should be)
- // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
- SET_INFO(p, &stg_EVACUATED_info);
- ((StgEvacuated *)p)->evacuee = dest;
+ StgPtr to;
+ step_workspace *ws;
+
+ /* Find out where we're going, using the handy "to" pointer in
+ * the step of the source object. If it turns out we need to
+ * evacuate to an older generation, adjust it here (see comment
+ * by evacuate()).
+ */
+ if (stp < gct->evac_step) {
+ if (gct->eager_promotion) {
+ stp = gct->evac_step;
+ } else {
+ gct->failed_to_evac = rtsTrue;
+ }
+ }
+
+ ws = &gct->steps[stp->abs_no];
+ // this compiles to a single mem access to stp->abs_no only
+
+ /* chain a new block onto the to-space for the destination step if
+ * necessary.
+ */
+ to = ws->todo_free;
+ if (to + size > ws->todo_lim) {
+ to = todo_block_full(size, ws);
+ }
+ ws->todo_free = to + size;
+ ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
+
+ return to;
}
+/* -----------------------------------------------------------------------------
+ The evacuate() code
+ -------------------------------------------------------------------------- */
-STATIC_INLINE StgClosure *
-copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
+STATIC_INLINE GNUC_ATTR_HOT void
+copy_tag(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, step *stp, StgWord tag)
{
- StgPtr to, from;
- nat i;
-#ifdef PROFILING
- // @LDV profiling
- nat size_org = size;
-#endif
+ StgPtr to, from;
+ nat i;
- TICK_GC_WORDS_COPIED(size);
- /* Find out where we're going, using the handy "to" pointer in
- * the step of the source object. If it turns out we need to
- * evacuate to an older generation, adjust it here (see comment
- * by evacuate()).
- */
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
- } else {
- failed_to_evac = rtsTrue;
- }
- }
-
- /* chain a new block onto the to-space for the destination step if
- * necessary.
- */
- if (stp->hp + size >= stp->hpLim) {
- gc_alloc_block(stp);
- }
+ to = alloc_for_copy(size,stp);
+
+ TICK_GC_WORDS_COPIED(size);
- to = stp->hp;
- from = (StgPtr)src;
- stp->hp = to + size;
- for (i = 0; i < size; i++) { // unroll for small i
- to[i] = from[i];
- }
+ from = (StgPtr)src;
+ to[0] = (W_)info;
+ for (i = 1; i < size; i++) { // unroll for small i
+ to[i] = from[i];
+ }
- /* retag pointer before updating EVACUATE closure and returning */
- to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+// if (to+size+2 < bd->start + BLOCK_SIZE_W) {
+// __builtin_prefetch(to + size + 2, 1);
+// }
- upd_evacuee((StgClosure *)from,(StgClosure *)to);
+#if defined(PARALLEL_GC)
+ {
+ const StgInfoTable *new_info;
+ new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
+ if (new_info != info) {
+ return evacuate(p); // does the failed_to_evac stuff
+ } else {
+ *p = TAG_CLOSURE(tag,(StgClosure*)to);
+ }
+ }
+#else
+ src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
+ *p = TAG_CLOSURE(tag,(StgClosure*)to);
+#endif
#ifdef PROFILING
- // We store the size of the just evacuated object in the LDV word so that
- // the profiler can guess the position of the next object later.
- SET_EVACUAEE_FOR_LDV(from, size_org);
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(from, size);
#endif
- return (StgClosure *)to;
}
-// Same as copy() above, except the object will be allocated in memory
-// that will not be scavenged. Used for object that have no pointer
-// fields.
-STATIC_INLINE StgClosure *
-copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
+#if defined(PARALLEL_GC)
+STATIC_INLINE void
+copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, step *stp, StgWord tag)
{
- StgPtr to, from;
- nat i;
-#ifdef PROFILING
- // @LDV profiling
- nat size_org = size;
-#endif
-
- TICK_GC_WORDS_COPIED(size);
- /* Find out where we're going, using the handy "to" pointer in
- * the step of the source object. If it turns out we need to
- * evacuate to an older generation, adjust it here (see comment
- * by evacuate()).
- */
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
- } else {
- failed_to_evac = rtsTrue;
- }
- }
+ StgPtr to, from;
+ nat i;
- /* chain a new block onto the to-space for the destination step if
- * necessary.
- */
- if (stp->scavd_hp + size >= stp->scavd_hpLim) {
- gc_alloc_scavd_block(stp);
- }
-
- to = stp->scavd_hp;
- from = (StgPtr)src;
- stp->scavd_hp = to + size;
- for (i = 0; i < size; i++) { // unroll for small i
- to[i] = from[i];
- }
+ to = alloc_for_copy(size,stp);
+ *p = TAG_CLOSURE(tag,(StgClosure*)to);
+ src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
+
+ TICK_GC_WORDS_COPIED(size);
- /* retag pointer before updating EVACUATE closure and returning */
- to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ from = (StgPtr)src;
+ to[0] = (W_)info;
+ for (i = 1; i < size; i++) { // unroll for small i
+ to[i] = from[i];
+ }
- upd_evacuee((StgClosure *)from,(StgClosure *)to);
+// if (to+size+2 < bd->start + BLOCK_SIZE_W) {
+// __builtin_prefetch(to + size + 2, 1);
+// }
#ifdef PROFILING
- // We store the size of the just evacuated object in the LDV word so that
- // the profiler can guess the position of the next object later.
- SET_EVACUAEE_FOR_LDV(from, size_org);
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(from, size);
#endif
- return (StgClosure *)to;
}
+#endif
/* Special version of copy() for when we only want to copy the info
* pointer of an object, but reserve some padding after it. This is
* used to optimise evacuation of BLACKHOLEs.
*/
-
-
-static StgClosure *
-copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
+static rtsBool
+copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
- P_ dest, to, from;
-#ifdef PROFILING
- // @LDV profiling
- nat size_to_copy_org = size_to_copy;
+ StgPtr to, from;
+ nat i;
+ StgWord info;
+
+#if defined(PARALLEL_GC)
+spin:
+ info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
+ if (info == (W_)&stg_WHITEHOLE_info) {
+#ifdef PROF_SPIN
+ whitehole_spin++;
+#endif
+ goto spin;
+ }
+ if (IS_FORWARDING_PTR(info)) {
+ src->header.info = (const StgInfoTable *)info;
+ evacuate(p); // does the failed_to_evac stuff
+ return rtsFalse;
+ }
+#else
+ info = (W_)src->header.info;
#endif
- TICK_GC_WORDS_COPIED(size_to_copy);
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
- } else {
- failed_to_evac = rtsTrue;
- }
- }
+ to = alloc_for_copy(size_to_reserve, stp);
+ *p = (StgClosure *)to;
- if (stp->hp + size_to_reserve >= stp->hpLim) {
- gc_alloc_block(stp);
- }
+ TICK_GC_WORDS_COPIED(size_to_copy);
- for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
- *to++ = *from++;
- }
-
- dest = stp->hp;
- stp->hp += size_to_reserve;
- upd_evacuee(src,(StgClosure *)dest);
+ from = (StgPtr)src;
+ to[0] = info;
+ for (i = 1; i < size_to_copy; i++) { // unroll for small i
+ to[i] = from[i];
+ }
+
+#if defined(PARALLEL_GC)
+ write_barrier();
+#endif
+ src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
+
#ifdef PROFILING
- // We store the size of the just evacuated object in the LDV word so that
- // the profiler can guess the position of the next object later.
- // size_to_copy_org is wrong because the closure already occupies size_to_reserve
- // words.
- SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
- // fill the slop
- if (size_to_reserve - size_to_copy_org > 0)
- LDV_FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
+ // fill the slop
+ if (size_to_reserve - size_to_copy > 0)
+ LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
#endif
- return (StgClosure *)dest;
+
+ return rtsTrue;
}
/* Copy wrappers that don't tag the closure after copying */
-STATIC_INLINE StgClosure *
-copy(StgClosure *src, nat size, step *stp)
+STATIC_INLINE GNUC_ATTR_HOT void
+copy(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, step *stp)
{
- return copy_tag(src,size,stp,0);
-}
-
-STATIC_INLINE StgClosure *
-copy_noscav(StgClosure *src, nat size, step *stp)
-{
- return copy_noscav_tag(src,size,stp,0);
+ copy_tag(p,info,src,size,stp,0);
}
/* -----------------------------------------------------------------------------
that has been evacuated, or unset otherwise.
-------------------------------------------------------------------------- */
-
STATIC_INLINE void
evacuate_large(StgPtr p)
{
bdescr *bd = Bdescr(p);
- step *stp;
-
- // object must be at the beginning of the block (or be a ByteArray)
- ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
- (((W_)p & BLOCK_MASK) == 0));
+ step *stp, *new_stp;
+ step_workspace *ws;
+
+ stp = bd->step;
+ ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
// already evacuated?
if (bd->flags & BF_EVACUATED) {
- /* Don't forget to set the failed_to_evac flag if we didn't get
+ /* Don't forget to set the gct->failed_to_evac flag if we didn't get
* the desired destination (see comments in evacuate()).
*/
- if (bd->gen_no < evac_gen) {
- failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
+ if (stp < gct->evac_step) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
}
+ RELEASE_SPIN_LOCK(&stp->sync_large_objects);
return;
}
- stp = bd->step;
// remove from large_object list
if (bd->u.back) {
bd->u.back->link = bd->link;
/* link it on to the evacuated large object list of the destination step
*/
- stp = bd->step->to;
- if (stp->gen_no < evac_gen) {
- if (eager_promotion) {
- stp = &generations[evac_gen].steps[0];
+ new_stp = stp->to;
+ if (new_stp < gct->evac_step) {
+ if (gct->eager_promotion) {
+ new_stp = gct->evac_step;
} else {
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
}
}
- bd->step = stp;
- bd->gen_no = stp->gen_no;
- bd->link = stp->new_large_objects;
- stp->new_large_objects = bd;
+ ws = &gct->steps[new_stp->abs_no];
+
bd->flags |= BF_EVACUATED;
+ bd->step = new_stp;
+ bd->gen_no = new_stp->gen_no;
+
+ // If this is a block of pinned objects, we don't have to scan
+ // these objects, because they aren't allowed to contain any
+ // pointers. For these blocks, we skip the scavenge stage and put
+ // them straight on the scavenged_large_objects list.
+ if (bd->flags & BF_PINNED) {
+ ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
+ dbl_link_onto(bd, &ws->step->scavenged_large_objects);
+ ws->step->n_scavenged_large_blocks += bd->blocks;
+ } else {
+ bd->link = ws->todo_large_objects;
+ ws->todo_large_objects = bd;
+ }
+
+ RELEASE_SPIN_LOCK(&stp->sync_large_objects);
}
-/* -----------------------------------------------------------------------------
+/* ----------------------------------------------------------------------------
Evacuate
This is called (eventually) for every live object in the system.
The caller to evacuate specifies a desired generation in the
- evac_gen global variable. The following conditions apply to
+ gct->evac_step thread-local variable. The following conditions apply to
evacuating an object which resides in generation M when we're
collecting up to generation N
- if M >= evac_gen
+ if M >= gct->evac_step
if M > N do nothing
else evac to step->to
- if M < evac_gen evac to evac_gen, step 0
+ if M < gct->evac_step evac to gct->evac_step, step 0
if the object is already evacuated, then we check which generation
it now resides in.
- if M >= evac_gen do nothing
- if M < evac_gen set failed_to_evac flag to indicate that we
- didn't manage to evacuate this object into evac_gen.
+ if M >= gct->evac_step do nothing
+ if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
+ didn't manage to evacuate this object into gct->evac_step.
OPTIMISATION NOTES:
it doesn't help. One reason is that the (StgClosure **) pointer
gets spilled to the stack inside evacuate(), resulting in far more
extra reads/writes than we save.
- -------------------------------------------------------------------------- */
+ ------------------------------------------------------------------------- */
-REGPARM1 StgClosure *
-evacuate(StgClosure *q)
+REGPARM1 GNUC_ATTR_HOT void
+evacuate(StgClosure **p)
{
bdescr *bd = NULL;
step *stp;
+ StgClosure *q;
const StgInfoTable *info;
StgWord tag;
+ q = *p;
+
loop:
/* The tag and the pointer are split, to be merged after evacing */
tag = GET_CLOSURE_TAG(q);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
- if (!HEAP_ALLOCED(q)) {
+ if (!HEAP_ALLOCED_GC(q)) {
- if (!major_gc) return TAG_CLOSURE(tag,q);
+ if (!major_gc) return;
info = get_itbl(q);
switch (info->type) {
case THUNK_STATIC:
- if (info->srt_bitmap != 0 &&
- *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
- *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+ if (info->srt_bitmap != 0) {
+ if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
+#ifndef THREADED_RTS
+ *THUNK_STATIC_LINK((StgClosure *)q) = gct->static_objects;
+ gct->static_objects = (StgClosure *)q;
+#else
+ StgPtr link;
+ link = (StgPtr)cas((StgPtr)THUNK_STATIC_LINK((StgClosure *)q),
+ (StgWord)NULL,
+ (StgWord)gct->static_objects);
+ if (link == NULL) {
+ gct->static_objects = (StgClosure *)q;
+ }
+#endif
+ }
}
- return q;
-
+ return;
+
case FUN_STATIC:
- if (info->srt_bitmap != 0 &&
+ if (info->srt_bitmap != 0 &&
*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
- *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+#ifndef THREADED_RTS
+ *FUN_STATIC_LINK((StgClosure *)q) = gct->static_objects;
+ gct->static_objects = (StgClosure *)q;
+#else
+ StgPtr link;
+ link = (StgPtr)cas((StgPtr)FUN_STATIC_LINK((StgClosure *)q),
+ (StgWord)NULL,
+ (StgWord)gct->static_objects);
+ if (link == NULL) {
+ gct->static_objects = (StgClosure *)q;
+ }
+#endif
}
- return q;
+ return;
case IND_STATIC:
/* If q->saved_info != NULL, then it's a revertible CAF - it'll be
* on the CAF list, so don't do anything with it here (we'll
* scavenge it later).
*/
- if (((StgIndStatic *)q)->saved_info == NULL
- && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
- *IND_STATIC_LINK((StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+ if (((StgIndStatic *)q)->saved_info == NULL) {
+ if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
+#ifndef THREADED_RTS
+ *IND_STATIC_LINK((StgClosure *)q) = gct->static_objects;
+ gct->static_objects = (StgClosure *)q;
+#else
+ StgPtr link;
+ link = (StgPtr)cas((StgPtr)IND_STATIC_LINK((StgClosure *)q),
+ (StgWord)NULL,
+ (StgWord)gct->static_objects);
+ if (link == NULL) {
+ gct->static_objects = (StgClosure *)q;
+ }
+#endif
+ }
}
- return q;
+ return;
case CONSTR_STATIC:
if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
- *STATIC_LINK(info,(StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
- /* I am assuming that static_objects pointers are not
- * written to other objects, and thus, no need to retag. */
- }
- return TAG_CLOSURE(tag,q);
+#ifndef THREADED_RTS
+ *STATIC_LINK(info,(StgClosure *)q) = gct->static_objects;
+ gct->static_objects = (StgClosure *)q;
+#else
+ StgPtr link;
+ link = (StgPtr)cas((StgPtr)STATIC_LINK(info,(StgClosure *)q),
+ (StgWord)NULL,
+ (StgWord)gct->static_objects);
+ if (link == NULL) {
+ gct->static_objects = (StgClosure *)q;
+ }
+#endif
+ }
+ /* I am assuming that static_objects pointers are not
+ * written to other objects, and thus, no need to retag. */
+ return;
case CONSTR_NOCAF_STATIC:
/* no need to put these on the static linked list, they don't need
* to be scavenged.
*/
- return TAG_CLOSURE(tag,q);
+ return;
default:
barf("evacuate(static): strange closure type %d", (int)(info->type));
bd = Bdescr((P_)q);
- if (bd->gen_no > N) {
- /* Can't evacuate this object, because it's in a generation
- * older than the ones we're collecting. Let's hope that it's
- * in evac_gen or older, or we will have to arrange to track
- * this pointer using the mutable list.
- */
- if (bd->gen_no < evac_gen) {
- // nope
- failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
- }
- return TAG_CLOSURE(tag,q);
- }
-
- if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
+ if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {
- /* pointer into to-space: just return it. This normally
- * shouldn't happen, but alllowing it makes certain things
- * slightly easier (eg. the mutable list can contain the same
- * object twice, for example).
- */
+ // pointer into to-space: just return it. It might be a pointer
+ // into a generation that we aren't collecting (> N), or it
+ // might just be a pointer into to-space. The latter doesn't
+ // happen often, but allowing it makes certain things a bit
+ // easier; e.g. scavenging an object is idempotent, so it's OK to
+ // have an object on the mutable list multiple times.
if (bd->flags & BF_EVACUATED) {
- if (bd->gen_no < evac_gen) {
- failed_to_evac = rtsTrue;
+ // We aren't copying this object, so we have to check
+ // whether it is already in the target generation. (this is
+ // the write barrier).
+ if (bd->step < gct->evac_step) {
+ gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
- return TAG_CLOSURE(tag,q);
+ return;
}
/* evacuate large objects by re-linking them onto a different list.
info = get_itbl(q);
if (info->type == TSO &&
((StgTSO *)q)->what_next == ThreadRelocated) {
- q = (StgClosure *)((StgTSO *)q)->link;
+ q = (StgClosure *)((StgTSO *)q)->_link;
+ *p = q;
goto loop;
}
evacuate_large((P_)q);
- return TAG_CLOSURE(tag,q);
+ return;
}
/* If the object is in a step that we're compacting, then we
* need to use an alternative evacuate procedure.
*/
- if (bd->flags & BF_COMPACTED) {
- if (!is_marked((P_)q,bd)) {
- mark((P_)q,bd);
- if (mark_stack_full()) {
- mark_stack_overflowed = rtsTrue;
- reset_mark_stack();
- }
- push_mark_stack((P_)q);
- }
- return TAG_CLOSURE(tag,q);
+ if (!is_marked((P_)q,bd)) {
+ mark((P_)q,bd);
+ if (mark_stack_full()) {
+ debugTrace(DEBUG_gc,"mark stack overflowed");
+ mark_stack_overflowed = rtsTrue;
+ reset_mark_stack();
+ }
+ push_mark_stack((P_)q);
}
+ return;
}
stp = bd->step->to;
- info = get_itbl(q);
-
- switch (info->type) {
+ info = q->header.info;
+ if (IS_FORWARDING_PTR(info))
+ {
+ /* Already evacuated, just return the forwarding address.
+ * HOWEVER: if the requested destination generation (gct->evac_step) is
+ * older than the actual generation (because the object was
+ * already evacuated to a younger generation) then we have to
+ * set the gct->failed_to_evac flag to indicate that we couldn't
+ * manage to promote the object to the desired generation.
+ */
+ /*
+ * Optimisation: the check is fairly expensive, but we can often
+ * shortcut it if either the required generation is 0, or the
+ * current object (the EVACUATED) is in a high enough generation.
+ * We know that an EVACUATED always points to an object in the
+ * same or an older generation. stp is the lowest step that the
+ * current object would be evacuated to, so we only do the full
+ * check if stp is too low.
+ */
+ StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
+ *p = TAG_CLOSURE(tag,e);
+ if (stp < gct->evac_step) { // optimisation
+ if (Bdescr((P_)e)->step < gct->evac_step) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
+ }
+ return;
+ }
+
+ switch (INFO_PTR_TO_STRUCT(info)->type) {
+
+ case WHITEHOLE:
+ goto loop;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
- case MVAR:
- return copy(q,sizeW_fromITBL(info),stp);
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
+ return;
case CONSTR_0_1:
{
StgWord w = (StgWord)q->payload[0];
- if (q->header.info == Czh_con_info &&
+ if (info == Czh_con_info &&
// unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
(StgChar)w <= MAX_CHARLIKE) {
- return TAG_CLOSURE(tag,
- (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
- );
+ *p = TAG_CLOSURE(tag,
+ (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
+ );
}
- if (q->header.info == Izh_con_info &&
+ else if (info == Izh_con_info &&
(StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
- return TAG_CLOSURE(tag,
+ *p = TAG_CLOSURE(tag,
(StgClosure *)INTLIKE_CLOSURE((StgInt)w)
);
}
- // else
- return copy_noscav_tag(q,sizeofW(StgHeader)+1,stp,tag);
+ else {
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
+ }
+ return;
}
case FUN_0_1:
case FUN_1_0:
case CONSTR_1_0:
- return copy_tag(q,sizeofW(StgHeader)+1,stp,tag);
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
+ return;
case THUNK_1_0:
case THUNK_0_1:
- return copy(q,sizeofW(StgThunk)+1,stp);
+ copy(p,info,q,sizeofW(StgThunk)+1,stp);
+ return;
case THUNK_1_1:
case THUNK_2_0:
stp = bd->step;
}
#endif
- return copy(q,sizeofW(StgThunk)+2,stp);
+ copy(p,info,q,sizeofW(StgThunk)+2,stp);
+ return;
case FUN_1_1:
case FUN_2_0:
case FUN_0_2:
case CONSTR_1_1:
case CONSTR_2_0:
- return copy_tag(q,sizeofW(StgHeader)+2,stp,tag);
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
+ return;
case CONSTR_0_2:
- return copy_noscav_tag(q,sizeofW(StgHeader)+2,stp,tag);
+ copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
+ return;
case THUNK:
- return copy(q,thunk_sizeW_fromITBL(info),stp);
+ copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
+ return;
case FUN:
case IND_PERM:
case IND_OLDGEN_PERM:
+ case CONSTR:
+ copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
+ return;
+
case WEAK:
case STABLE_NAME:
- case CONSTR:
- return copy_tag(q,sizeW_fromITBL(info),stp,tag);
+ copy_tag(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
+ return;
case BCO:
- return copy(q,bco_sizeW((StgBCO *)q),stp);
+ copy(p,info,q,bco_sizeW((StgBCO *)q),stp);
+ return;
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
- return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
+ copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
+ return;
case THUNK_SELECTOR:
- return eval_thunk_selector((StgSelector *)q, rtsTrue);
+ eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
+ return;
case IND:
case IND_OLDGEN:
// follow chains of indirections, don't evacuate them
q = ((StgInd*)q)->indirectee;
+ *p = q;
goto loop;
case RET_BCO:
barf("evacuate: stack frame at %p\n", q);
case PAP:
- return copy(q,pap_sizeW((StgPAP*)q),stp);
+ copy(p,info,q,pap_sizeW((StgPAP*)q),stp);
+ return;
case AP:
- return copy(q,ap_sizeW((StgAP*)q),stp);
+ copy(p,info,q,ap_sizeW((StgAP*)q),stp);
+ return;
case AP_STACK:
- return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
-
- case EVACUATED:
- /* Already evacuated, just return the forwarding address.
- * HOWEVER: if the requested destination generation (evac_gen) is
- * older than the actual generation (because the object was
- * already evacuated to a younger generation) then we have to
- * set the failed_to_evac flag to indicate that we couldn't
- * manage to promote the object to the desired generation.
- */
- /*
- * Optimisation: the check is fairly expensive, but we can often
- * shortcut it if either the required generation is 0, or the
- * current object (the EVACUATED) is in a high enough generation.
- * We know that an EVACUATED always points to an object in the
- * same or an older generation. stp is the lowest step that the
- * current object would be evacuated to, so we only do the full
- * check if stp is too low.
- */
- if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
- StgClosure *p = ((StgEvacuated*)q)->evacuee;
- if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
- failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
- }
- }
- return ((StgEvacuated*)q)->evacuee;
+ copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
+ return;
case ARR_WORDS:
// just copy the block
- return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
+ copy(p,info,q,arr_words_sizeW((StgArrWords *)q),stp);
+ return;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
// just copy the block
- return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+ copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+ return;
case TSO:
{
/* Deal with redirected TSOs (a TSO that's had its stack enlarged).
*/
if (tso->what_next == ThreadRelocated) {
- q = (StgClosure *)tso->link;
+ q = (StgClosure *)tso->_link;
+ *p = q;
goto loop;
}
*/
{
StgTSO *new_tso;
- StgPtr p, q;
-
- new_tso = (StgTSO *)copyPart((StgClosure *)tso,
- tso_sizeW(tso),
- sizeofW(StgTSO), stp);
- move_TSO(tso, new_tso);
- for (p = tso->sp, q = new_tso->sp;
- p < tso->stack+tso->stack_size;) {
- *q++ = *p++;
- }
-
- return (StgClosure *)new_tso;
+ StgPtr r, s;
+ rtsBool mine;
+
+ mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso),
+ sizeofW(StgTSO), stp);
+ if (mine) {
+ new_tso = (StgTSO *)*p;
+ move_TSO(tso, new_tso);
+ for (r = tso->sp, s = new_tso->sp;
+ r < tso->stack+tso->stack_size;) {
+ *s++ = *r++;
+ }
+ }
+ return;
}
}
case TREC_HEADER:
- return copy(q,sizeofW(StgTRecHeader),stp);
+ copy(p,info,q,sizeofW(StgTRecHeader),stp);
+ return;
case TVAR_WATCH_QUEUE:
- return copy(q,sizeofW(StgTVarWatchQueue),stp);
+ copy(p,info,q,sizeofW(StgTVarWatchQueue),stp);
+ return;
case TVAR:
- return copy(q,sizeofW(StgTVar),stp);
+ copy(p,info,q,sizeofW(StgTVar),stp);
+ return;
case TREC_CHUNK:
- return copy(q,sizeofW(StgTRecChunk),stp);
+ copy(p,info,q,sizeofW(StgTRecChunk),stp);
+ return;
case ATOMIC_INVARIANT:
- return copy(q,sizeofW(StgAtomicInvariant),stp);
+ copy(p,info,q,sizeofW(StgAtomicInvariant),stp);
+ return;
case INVARIANT_CHECK_QUEUE:
- return copy(q,sizeofW(StgInvariantCheckQueue),stp);
+ copy(p,info,q,sizeofW(StgInvariantCheckQueue),stp);
+ return;
default:
- barf("evacuate: strange closure type %d", (int)(info->type));
+ barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
}
barf("evacuate");
}
+/* -----------------------------------------------------------------------------
+ Evaluate a THUNK_SELECTOR if possible.
+
+ p points to a THUNK_SELECTOR that we want to evaluate. The
+ result of "evaluating" it will be evacuated and a pointer to the
+ to-space closure will be returned.
+
+ If the THUNK_SELECTOR could not be evaluated (its selectee is still
+ a THUNK, for example), then the THUNK_SELECTOR itself will be
+ evacuated.
+ -------------------------------------------------------------------------- */
static void
unchain_thunk_selectors(StgSelector *p, StgClosure *val)
{
prev = NULL;
while (p)
{
+#ifdef THREADED_RTS
+ ASSERT(p->header.info == &stg_WHITEHOLE_info);
+#else
ASSERT(p->header.info == &stg_BLACKHOLE_info);
+#endif
+ // val must be in to-space. Not always: when we recursively
+ // invoke eval_thunk_selector(), the recursive calls will not
+ // evacuate the value (because we want to select on the value,
+ // not evacuate it), so in this case val is in from-space.
+ // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
+
prev = (StgSelector*)((StgClosure *)p)->payload[0];
// Update the THUNK_SELECTOR with an indirection to the
- // EVACUATED closure now at p. Why do this rather than
- // upd_evacuee(q,p)? Because we have an invariant that an
- // EVACUATED closure always points to an object in the
- // same or an older generation (required by the short-cut
- // test in the EVACUATED case, below).
- SET_INFO(p, &stg_IND_info);
- ((StgInd *)p)->indirectee = val;
+ // value. The value is still in from-space at this stage.
+ //
+ // (old note: Why not do upd_evacuee(q,p)? Because we have an
+ // invariant that an EVACUATED closure always points to an
+ // object in the same or an older generation (required by
+ // the short-cut test in the EVACUATED case, below).
+ if ((StgClosure *)p == val) {
+ // must be a loop; just leave a BLACKHOLE in place. This
+ // can happen when we have a chain of selectors that
+ // eventually loops back on itself. We can't leave an
+ // indirection pointing to itself, and we want the program
+ // to deadlock if it ever enters this closure, so
+ // BLACKHOLE is correct.
+ SET_INFO(p, &stg_BLACKHOLE_info);
+ } else {
+ ((StgInd *)p)->indirectee = val;
+ write_barrier();
+ SET_INFO(p, &stg_IND_info);
+ }
// For the purposes of LDV profiling, we have created an
// indirection.
}
}
-/* -----------------------------------------------------------------------------
- Evaluate a THUNK_SELECTOR if possible.
-
- p points to a THUNK_SELECTOR that we want to evaluate. The
- result of "evaluating" it will be evacuated and a pointer to the
- to-space closure will be returned.
-
- If the THUNK_SELECTOR could not be evaluated (its selectee is still
- a THUNK, for example), then the THUNK_SELECTOR itself will be
- evacuated.
- -------------------------------------------------------------------------- */
-
-static StgClosure *
-eval_thunk_selector (StgSelector * p, rtsBool evac)
+static void
+eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
+ // NB. for legacy reasons, p & q are swapped around :(
{
nat field;
StgInfoTable *info;
- const StgInfoTable *info_ptr;
+ StgWord info_ptr;
StgClosure *selectee;
StgSelector *prev_thunk_selector;
bdescr *bd;
selector_chain:
- // The selectee might be a constructor closure,
- // so we untag the pointer.
- selectee = UNTAG_CLOSURE(p->selectee);
-
- // Save the real info pointer (NOTE: not the same as get_itbl()).
- info_ptr = p->header.info;
- field = get_itbl(p)->layout.selector_offset;
-
- // If the THUNK_SELECTOR is in to-space or in a generation that we
- // are not collecting, then bail out early. We won't be able to
- // save any space in any case, and updating with an indirection is
- // trickier in an old gen.
bd = Bdescr((StgPtr)p);
- if (HEAP_ALLOCED(p) &&
- ((bd->gen_no > N)
- || (bd->flags & BF_EVACUATED)
- || ((bd->flags & BF_COMPACTED) &&
- is_marked((P_)p,bd)))) {
- unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
- return (StgClosure *)p;
+ if (HEAP_ALLOCED_GC(p)) {
+ // If the THUNK_SELECTOR is in to-space or in a generation that we
+ // are not collecting, then bale out early. We won't be able to
+ // save any space in any case, and updating with an indirection is
+ // trickier in a non-collected gen: we would have to update the
+ // mutable list.
+ if (bd->flags & BF_EVACUATED) {
+ unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
+ *q = (StgClosure *)p;
+ // shortcut, behave as for: if (evac) evacuate(q);
+ if (evac && bd->step < gct->evac_step) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
+ return;
+ }
+ // we don't update THUNK_SELECTORS in the compacted
+ // generation, because compaction does not remove the INDs
+ // that result, this causes confusion later
+ // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
+ // bit is very tricky to get right. If you make changes
+ // around here, test by compiling stage 3 with +RTS -c -RTS.
+ if (bd->flags & BF_MARKED) {
+ // must call evacuate() to mark this closure if evac==rtsTrue
+ *q = (StgClosure *)p;
+ if (evac) evacuate(q);
+ unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
+ return;
+ }
}
+
// BLACKHOLE the selector thunk, since it is now under evaluation.
// This is important to stop us going into an infinite loop if
// this selector thunk eventually refers to itself.
+#if defined(THREADED_RTS)
+ // In threaded mode, we'll use WHITEHOLE to lock the selector
+ // thunk while we evaluate it.
+ {
+ do {
+ info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
+ } while (info_ptr == (W_)&stg_WHITEHOLE_info);
+
+ // make sure someone else didn't get here first...
+ if (IS_FORWARDING_PTR(p) ||
+ INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
+ // v. tricky now. The THUNK_SELECTOR has been evacuated
+ // by another thread, and is now either a forwarding ptr or IND.
+ // We need to extract ourselves from the current situation
+ // as cleanly as possible.
+ // - unlock the closure
+ // - update *q, we may have done *some* evaluation
+ // - if evac, we need to call evacuate(), because we
+ // need the write-barrier stuff.
+ // - undo the chain we've built to point to p.
+ SET_INFO(p, (const StgInfoTable *)info_ptr);
+ *q = (StgClosure *)p;
+ if (evac) evacuate(q);
+ unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
+ return;
+ }
+ }
+#else
+ // Save the real info pointer (NOTE: not the same as get_itbl()).
+ info_ptr = (StgWord)p->header.info;
SET_INFO(p,&stg_BLACKHOLE_info);
+#endif
+
+ field = INFO_PTR_TO_STRUCT(info_ptr)->layout.selector_offset;
+
+ // The selectee might be a constructor closure,
+ // so we untag the pointer.
+ selectee = UNTAG_CLOSURE(p->selectee);
selector_loop:
// selectee now points to the closure that we're trying to select
// from-space during marking, for example. We rely on the property
// that evacuate() doesn't mind if it gets passed a to-space pointer.
- info = get_itbl(selectee);
+ info = (StgInfoTable*)selectee->header.info;
+
+ if (IS_FORWARDING_PTR(info)) {
+ // We don't follow pointers into to-space; the constructor
+ // has already been evacuated, so we won't save any space
+ // leaks by evaluating this selector thunk anyhow.
+ goto bale_out;
+ }
+
+ info = INFO_PTR_TO_STRUCT(info);
switch (info->type) {
+ case WHITEHOLE:
+ goto bale_out; // about to be evacuated by another thread (or a loop).
+
case CONSTR:
case CONSTR_1_0:
case CONSTR_0_1:
#ifdef PROFILING
// For the purposes of LDV profiling, we have destroyed
// the original selector thunk, p.
- SET_INFO(p, info_ptr);
+ SET_INFO(p, (StgInfoTable *)info_ptr);
LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
+#if defined(THREADED_RTS)
+ SET_INFO(p, &stg_WHITEHOLE_info);
+#else
SET_INFO(p, &stg_BLACKHOLE_info);
#endif
+#endif
// the closure in val is now the "value" of the
// THUNK_SELECTOR in p. However, val may itself be a
// evaluating until we find the real value, and then
// update the whole chain to point to the value.
val_loop:
- info = get_itbl(UNTAG_CLOSURE(val));
- switch (info->type) {
- case IND:
- case IND_PERM:
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
- case IND_STATIC:
- val = ((StgInd *)val)->indirectee;
- goto val_loop;
- case THUNK_SELECTOR:
- ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
- prev_thunk_selector = p;
- p = (StgSelector*)val;
- goto selector_chain;
- default:
- ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
- prev_thunk_selector = p;
-
- if (evac) val = evacuate(val);
- // evacuate() cannot recurse through
- // eval_thunk_selector(), because we know val is not
- // a THUNK_SELECTOR.
- unchain_thunk_selectors(prev_thunk_selector, val);
- return val;
+ info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
+ if (!IS_FORWARDING_PTR(info_ptr))
+ {
+ info = INFO_PTR_TO_STRUCT(info_ptr);
+ switch (info->type) {
+ case IND:
+ case IND_PERM:
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ case IND_STATIC:
+ val = ((StgInd *)val)->indirectee;
+ goto val_loop;
+ case THUNK_SELECTOR:
+ ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
+ prev_thunk_selector = p;
+ p = (StgSelector*)val;
+ goto selector_chain;
+ default:
+ break;
+ }
}
+ ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
+ prev_thunk_selector = p;
+
+ *q = val;
+
+ // update the other selectors in the chain *before*
+ // evacuating the value. This is necessary in the case
+ // where the value turns out to be one of the selectors
+ // in the chain (i.e. we have a loop), and evacuating it
+ // would corrupt the chain.
+ unchain_thunk_selectors(prev_thunk_selector, val);
+
+ // evacuate() cannot recurse through
+ // eval_thunk_selector(), because we know val is not
+ // a THUNK_SELECTOR.
+ if (evac) evacuate(q);
+ return;
}
case IND:
selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
goto selector_loop;
- case EVACUATED:
- // We don't follow pointers into to-space; the constructor
- // has already been evacuated, so we won't save any space
- // leaks by evaluating this selector thunk anyhow.
- goto bale_out;
-
case THUNK_SELECTOR:
{
StgClosure *val;
- // we don't update THUNK_SELECTORS in the compacted
- // generation, because compaction does not remove the INDs
- // that result, this causes confusion later
- // (scavenge_mark_stack doesn't deal with IND).
- if (Bdescr((P_)selectee)->flags && BF_COMPACTED) {
- goto bale_out;
- }
-
// recursively evaluate this selector. We don't want to
// recurse indefinitely, so we impose a depth bound.
- if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
+ if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
goto bale_out;
}
- thunk_selector_depth++;
+ gct->thunk_selector_depth++;
// rtsFalse says "don't evacuate the result". It will,
// however, update any THUNK_SELECTORs that are evaluated
// along the way.
- val = eval_thunk_selector((StgSelector *)selectee, rtsFalse);
- thunk_selector_depth--;
+ eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
+ gct->thunk_selector_depth--;
// did we actually manage to evaluate it?
if (val == selectee) goto bale_out;
case THUNK_0_2:
case THUNK_STATIC:
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
// not evaluated yet
goto bale_out;
bale_out:
// We didn't manage to evaluate this thunk; restore the old info
// pointer. But don't forget: we still need to evacuate the thunk itself.
- SET_INFO(p, info_ptr);
+ SET_INFO(p, (const StgInfoTable *)info_ptr);
+ // THREADED_RTS: we just unlocked the thunk, so another thread
+ // might get in and update it. copy() will lock it again and
+ // check whether it was updated in the meantime.
+ *q = (StgClosure *)p;
if (evac) {
- val = copy((StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
- } else {
- val = (StgClosure *)p;
+ copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
}
- unchain_thunk_selectors(prev_thunk_selector, val);
- return val;
-}
-
-/* -----------------------------------------------------------------------------
- move_TSO is called to update the TSO structure after it has been
- moved from one place to another.
- -------------------------------------------------------------------------- */
-
-void
-move_TSO (StgTSO *src, StgTSO *dest)
-{
- ptrdiff_t diff;
-
- // relocate the stack pointer...
- diff = (StgPtr)dest - (StgPtr)src; // In *words*
- dest->sp = (StgPtr)dest->sp + diff;
+ unchain_thunk_selectors(prev_thunk_selector, *q);
+ return;
}
-