*/
#define MAX_THUNK_SELECTOR_DEPTH 16
-static StgClosure * eval_thunk_selector (StgSelector * p, rtsBool);
+static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
-STATIC_INLINE void
-upd_evacuee(StgClosure *p, StgClosure *dest)
+STATIC_INLINE StgPtr
+alloc_for_copy (nat size, step *stp)
{
- // not true: (ToDo: perhaps it should be)
- // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
- SET_INFO(p, &stg_EVACUATED_info);
- ((StgEvacuated *)p)->evacuee = dest;
-}
+ StgPtr to;
+ step_workspace *ws;
+ bdescr *bd;
+ /* Find out where we're going, using the handy "to" pointer in
+ * the step of the source object. If it turns out we need to
+ * evacuate to an older generation, adjust it here (see comment
+ * by evacuate()).
+ */
+ if (stp->gen_no < gct->evac_gen) {
+ if (gct->eager_promotion) {
+ stp = &generations[gct->evac_gen].steps[0];
+ } else {
+ gct->failed_to_evac = rtsTrue;
+ }
+ }
+
+ ws = &gct->steps[stp->gen_no][stp->no];
+
+ /* chain a new block onto the to-space for the destination step if
+ * necessary.
+ */
+ bd = ws->todo_bd;
+ to = bd->free;
+ if (to + size >= bd->start + BLOCK_SIZE_W) {
+ bd = gc_alloc_todo_block(ws);
+ to = bd->free;
+ }
+ bd->free = to + size;
-STATIC_INLINE StgClosure *
-copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
+ return to;
+}
+
+STATIC_INLINE StgPtr
+alloc_for_copy_noscav (nat size, step *stp)
{
- StgPtr to, from;
- nat i;
- step_workspace *ws;
- bdescr *bd;
+ StgPtr to;
+ step_workspace *ws;
+ bdescr *bd;
- TICK_GC_WORDS_COPIED(size);
- /* Find out where we're going, using the handy "to" pointer in
- * the step of the source object. If it turns out we need to
- * evacuate to an older generation, adjust it here (see comment
- * by evacuate()).
- */
- if (stp->gen_no < gct->evac_gen) {
- if (gct->eager_promotion) {
- stp = &generations[gct->evac_gen].steps[0];
- } else {
- gct->failed_to_evac = rtsTrue;
- }
- }
+ /* Find out where we're going, using the handy "to" pointer in
+ * the step of the source object. If it turns out we need to
+ * evacuate to an older generation, adjust it here (see comment
+ * by evacuate()).
+ */
+ if (stp->gen_no < gct->evac_gen) {
+ if (gct->eager_promotion) {
+ stp = &generations[gct->evac_gen].steps[0];
+ } else {
+ gct->failed_to_evac = rtsTrue;
+ }
+ }
+
+ ws = &gct->steps[stp->gen_no][stp->no];
+
+ /* chain a new block onto the to-space for the destination step if
+ * necessary.
+ */
+ bd = ws->scavd_list;
+ to = bd->free;
+ if (to + size >= bd->start + BLOCK_SIZE_W) {
+ bd = gc_alloc_scavd_block(ws);
+ to = bd->free;
+ }
+ bd->free = to + size;
- ws = &gct->steps[stp->gen_no][stp->no];
+ return to;
+}
+
+STATIC_INLINE void
+copy_tag(StgClosure **p, StgClosure *src, nat size, step *stp,StgWord tag)
+{
+ StgPtr to, tagged_to, from;
+ nat i;
+ StgWord info;
+
+#ifdef THREADED_RTS
+ do {
+ info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
+ // so.. what is it?
+ } while (info == (W_)&stg_WHITEHOLE_info);
+ if (info == (W_)&stg_EVACUATED_info) {
+ src->header.info = (const StgInfoTable *)info;
+ return evacuate(src); // does the failed_to_evac stuff
+ }
+#else
+ info = (W_)src->header.info;
+ src->header.info = &stg_EVACUATED_info;
+#endif
- /* chain a new block onto the to-space for the destination step if
- * necessary.
- */
- bd = ws->todo_bd;
- to = bd->free;
- if (to + size >= bd->start + BLOCK_SIZE_W) {
- bd = gc_alloc_todo_block(ws);
- to = bd->free;
- }
+ to = alloc_for_copy(size,stp);
+ tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ *p = (StgClosure *)tagged_to;
+
+ TICK_GC_WORDS_COPIED(size);
- from = (StgPtr)src;
- bd->free = to + size;
- for (i = 0; i < size; i++) { // unroll for small i
- to[i] = from[i];
- }
+ from = (StgPtr)src;
+ to[0] = info;
+ for (i = 1; i < size; i++) { // unroll for small i
+ to[i] = from[i];
+ }
+
+ ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
- /* retag pointer before updating EVACUATE closure and returning */
- to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ // retag pointer before updating EVACUATE closure and returning
- upd_evacuee((StgClosure *)from,(StgClosure *)to);
+// if (to+size+2 < bd->start + BLOCK_SIZE_W) {
+// __builtin_prefetch(to + size + 2, 1);
+// }
+
+#ifdef THREADED_RTS
+ write_barrier();
+ ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
+#endif
#ifdef PROFILING
- // We store the size of the just evacuated object in the LDV word so that
- // the profiler can guess the position of the next object later.
- SET_EVACUAEE_FOR_LDV(from, size);
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(from, size);
#endif
- return (StgClosure *)to;
}
+
// Same as copy() above, except the object will be allocated in memory
// that will not be scavenged. Used for object that have no pointer
// fields.
-STATIC_INLINE StgClosure *
-copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
+STATIC_INLINE void
+copy_noscav_tag(StgClosure **p, StgClosure *src, nat size, step *stp, StgWord tag)
{
- StgPtr to, from;
- nat i;
- step_workspace *ws;
- bdescr *bd;
-
- TICK_GC_WORDS_COPIED(size);
- /* Find out where we're going, using the handy "to" pointer in
- * the step of the source object. If it turns out we need to
- * evacuate to an older generation, adjust it here (see comment
- * by evacuate()).
- */
- if (stp->gen_no < gct->evac_gen) {
- if (gct->eager_promotion) {
- stp = &generations[gct->evac_gen].steps[0];
- } else {
- gct->failed_to_evac = rtsTrue;
- }
- }
-
- ws = &gct->steps[stp->gen_no][stp->no];
-
- /* chain a new block onto the to-space for the destination step if
- * necessary.
- */
- bd = ws->scavd_list;
- to = bd->free;
- if (to + size >= bd->start + BLOCK_SIZE_W) {
- bd = gc_alloc_scavd_block(ws);
- to = bd->free;
- }
-
- from = (StgPtr)src;
- bd->free = to + size;
- for (i = 0; i < size; i++) { // unroll for small i
- to[i] = from[i];
- }
+ StgPtr to, tagged_to, from;
+ nat i;
+ StgWord info;
+
+#ifdef THREADED_RTS
+ do {
+ info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
+ } while (info == (W_)&stg_WHITEHOLE_info);
+ if (info == (W_)&stg_EVACUATED_info) {
+ src->header.info = (const StgInfoTable *)info;
+ return evacuate(src); // does the failed_to_evac stuff
+ }
+#else
+ info = (W_)src->header.info;
+ src->header.info = &stg_EVACUATED_info;
+#endif
+
+ to = alloc_for_copy_noscav(size,stp);
+ tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ *p = (StgClosure *)tagged_to;
- /* retag pointer before updating EVACUATE closure and returning */
- to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
+ TICK_GC_WORDS_COPIED(size);
+
+ from = (StgPtr)src;
+ to[0] = info;
+ for (i = 1; i < size; i++) { // unroll for small i
+ to[i] = from[i];
+ }
- upd_evacuee((StgClosure *)from,(StgClosure *)to);
+ ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
+#ifdef THREADED_RTS
+ write_barrier();
+ ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
+#endif
+
#ifdef PROFILING
- // We store the size of the just evacuated object in the LDV word so that
- // the profiler can guess the position of the next object later.
- SET_EVACUAEE_FOR_LDV(from, size);
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(from, size);
#endif
- return (StgClosure *)to;
}
* pointer of an object, but reserve some padding after it. This is
* used to optimise evacuation of BLACKHOLEs.
*/
-static StgClosure *
-copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
+static void
+copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
- StgPtr to, from;
- nat i;
- step_workspace *ws;
- bdescr *bd;
-
- TICK_GC_WORDS_COPIED(size_to_copy);
- if (stp->gen_no < gct->evac_gen) {
- if (gct->eager_promotion) {
- stp = &generations[gct->evac_gen].steps[0];
- } else {
- gct->failed_to_evac = rtsTrue;
- }
- }
-
- ws = &gct->steps[stp->gen_no][stp->no];
-
- bd = ws->todo_bd;
- to = bd->free;
- if (to + size_to_reserve >= bd->start + BLOCK_SIZE_W) {
- bd = gc_alloc_todo_block(ws);
- to = bd->free;
- }
+ StgPtr to, from;
+ nat i;
+ StgWord info;
+
+#ifdef THREADED_RTS
+ do {
+ info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
+ } while (info == (W_)&stg_WHITEHOLE_info);
+ if (info == (W_)&stg_EVACUATED_info) {
+ src->header.info = (const StgInfoTable *)info;
+ return evacuate(src); // does the failed_to_evac stuff
+ }
+#else
+ info = (W_)src->header.info;
+ src->header.info = &stg_EVACUATED_info;
+#endif
+
+ to = alloc_for_copy(size_to_reserve, stp);
+ *p = (StgClosure *)to;
- from = (StgPtr)src;
- bd->free = to + size_to_reserve;
- for (i = 0; i < size_to_copy; i++) { // unroll for small i
- to[i] = from[i];
- }
-
- upd_evacuee((StgClosure *)from,(StgClosure *)to);
+ TICK_GC_WORDS_COPIED(size_to_copy);
+ from = (StgPtr)src;
+ to[0] = info;
+ for (i = 1; i < size_to_copy; i++) { // unroll for small i
+ to[i] = from[i];
+ }
+
+ ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
+#ifdef THREADED_RTS
+ write_barrier();
+ ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
+#endif
+
#ifdef PROFILING
- // We store the size of the just evacuated object in the LDV word so that
- // the profiler can guess the position of the next object later.
- SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
- // fill the slop
- if (size_to_reserve - size_to_copy > 0)
- LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
+ // fill the slop
+ if (size_to_reserve - size_to_copy > 0)
+ LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
#endif
- return (StgClosure *)to;
}
/* Copy wrappers that don't tag the closure after copying */
-STATIC_INLINE StgClosure *
-copy(StgClosure *src, nat size, step *stp)
+STATIC_INLINE void
+copy(StgClosure **p, StgClosure *src, nat size, step *stp)
{
- return copy_tag(src,size,stp,0);
+ copy_tag(p,src,size,stp,0);
}
-STATIC_INLINE StgClosure *
-copy_noscav(StgClosure *src, nat size, step *stp)
+STATIC_INLINE void
+copy_noscav(StgClosure **p, StgClosure *src, nat size, step *stp)
{
- return copy_noscav_tag(src,size,stp,0);
+ copy_noscav_tag(p,src,size,stp,0);
}
/* -----------------------------------------------------------------------------
extra reads/writes than we save.
-------------------------------------------------------------------------- */
-REGPARM1 StgClosure *
-evacuate(StgClosure *q)
+REGPARM1 void
+evacuate(StgClosure **p)
{
bdescr *bd = NULL;
step *stp;
+ StgClosure *q;
const StgInfoTable *info;
StgWord tag;
+ q = *p;
+
loop:
/* The tag and the pointer are split, to be merged after evacing */
tag = GET_CLOSURE_TAG(q);
if (!HEAP_ALLOCED(q)) {
- if (!major_gc) return TAG_CLOSURE(tag,q);
+ if (!major_gc) return;
info = get_itbl(q);
switch (info->type) {
case THUNK_STATIC:
- if (info->srt_bitmap != 0 &&
+ if (info->srt_bitmap != 0 &&
*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
- *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+ ACQUIRE_SPIN_LOCK(&static_objects_sync);
+ if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
+ *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
+ static_objects = (StgClosure *)q;
+ }
+ RELEASE_SPIN_LOCK(&static_objects_sync);
}
- return q;
+ return;
case FUN_STATIC:
- if (info->srt_bitmap != 0 &&
+ if (info->srt_bitmap != 0 &&
*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
- *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+ ACQUIRE_SPIN_LOCK(&static_objects_sync);
+ if (*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
+ *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
+ static_objects = (StgClosure *)q;
+ }
+ RELEASE_SPIN_LOCK(&static_objects_sync);
}
- return q;
+ return;
case IND_STATIC:
/* If q->saved_info != NULL, then it's a revertible CAF - it'll be
* on the CAF list, so don't do anything with it here (we'll
* scavenge it later).
*/
- if (((StgIndStatic *)q)->saved_info == NULL
- && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
- *IND_STATIC_LINK((StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+ if (((StgIndStatic *)q)->saved_info == NULL) {
+ ACQUIRE_SPIN_LOCK(&static_objects_sync);
+ if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
+ *IND_STATIC_LINK((StgClosure *)q) = static_objects;
+ static_objects = (StgClosure *)q;
+ }
+ RELEASE_SPIN_LOCK(&static_objects_sync);
}
- return q;
+ return;
case CONSTR_STATIC:
if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
- *STATIC_LINK(info,(StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+ ACQUIRE_SPIN_LOCK(&static_objects_sync);
+ // re-test, after acquiring lock
+ if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
+ *STATIC_LINK(info,(StgClosure *)q) = static_objects;
+ static_objects = (StgClosure *)q;
+ }
+ RELEASE_SPIN_LOCK(&static_objects_sync);
/* I am assuming that static_objects pointers are not
* written to other objects, and thus, no need to retag. */
}
- return TAG_CLOSURE(tag,q);
+ return;
case CONSTR_NOCAF_STATIC:
/* no need to put these on the static linked list, they don't need
* to be scavenged.
*/
- return TAG_CLOSURE(tag,q);
+ return;
default:
barf("evacuate(static): strange closure type %d", (int)(info->type));
gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
- return TAG_CLOSURE(tag,q);
+ return;
}
if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
gct->failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
- return TAG_CLOSURE(tag,q);
+ return;
}
/* evacuate large objects by re-linking them onto a different list.
if (info->type == TSO &&
((StgTSO *)q)->what_next == ThreadRelocated) {
q = (StgClosure *)((StgTSO *)q)->link;
+ *p = q;
goto loop;
}
evacuate_large((P_)q);
- return TAG_CLOSURE(tag,q);
+ return;
}
/* If the object is in a step that we're compacting, then we
}
push_mark_stack((P_)q);
}
- return TAG_CLOSURE(tag,q);
+ return;
}
}
switch (info->type) {
+ case WHITEHOLE:
+ goto loop;
+
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MVAR_CLEAN:
case MVAR_DIRTY:
- return copy(q,sizeW_fromITBL(info),stp);
+ copy(p,q,sizeW_fromITBL(info),stp);
+ return;
case CONSTR_0_1:
{
if (q->header.info == Czh_con_info &&
// unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
(StgChar)w <= MAX_CHARLIKE) {
- return TAG_CLOSURE(tag,
- (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
- );
+ *p = TAG_CLOSURE(tag,
+ (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
+ );
}
if (q->header.info == Izh_con_info &&
(StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
- return TAG_CLOSURE(tag,
+ *p = TAG_CLOSURE(tag,
(StgClosure *)INTLIKE_CLOSURE((StgInt)w)
);
}
- // else
- return copy_noscav_tag(q,sizeofW(StgHeader)+1,stp,tag);
+ else {
+ copy_noscav_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
+ }
+ return;
}
case FUN_0_1:
case FUN_1_0:
case CONSTR_1_0:
- return copy_tag(q,sizeofW(StgHeader)+1,stp,tag);
+ copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
+ return;
case THUNK_1_0:
case THUNK_0_1:
- return copy(q,sizeofW(StgThunk)+1,stp);
+ copy(p,q,sizeofW(StgThunk)+1,stp);
+ return;
case THUNK_1_1:
case THUNK_2_0:
stp = bd->step;
}
#endif
- return copy(q,sizeofW(StgThunk)+2,stp);
+ copy(p,q,sizeofW(StgThunk)+2,stp);
+ return;
case FUN_1_1:
case FUN_2_0:
case FUN_0_2:
case CONSTR_1_1:
case CONSTR_2_0:
- return copy_tag(q,sizeofW(StgHeader)+2,stp,tag);
+ copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
+ return;
case CONSTR_0_2:
- return copy_noscav_tag(q,sizeofW(StgHeader)+2,stp,tag);
+ copy_noscav_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
+ return;
case THUNK:
- return copy(q,thunk_sizeW_fromITBL(info),stp);
+ copy(p,q,thunk_sizeW_fromITBL(info),stp);
+ return;
case FUN:
case IND_PERM:
case WEAK:
case STABLE_NAME:
case CONSTR:
- return copy_tag(q,sizeW_fromITBL(info),stp,tag);
+ copy_tag(p,q,sizeW_fromITBL(info),stp,tag);
+ return;
case BCO:
- return copy(q,bco_sizeW((StgBCO *)q),stp);
+ copy(p,q,bco_sizeW((StgBCO *)q),stp);
+ return;
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
+ copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
+ return;
case THUNK_SELECTOR:
- return eval_thunk_selector((StgSelector *)q, rtsTrue);
+ eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
+ return;
case IND:
case IND_OLDGEN:
// follow chains of indirections, don't evacuate them
q = ((StgInd*)q)->indirectee;
+ *p = q;
goto loop;
case RET_BCO:
barf("evacuate: stack frame at %p\n", q);
case PAP:
- return copy(q,pap_sizeW((StgPAP*)q),stp);
+ copy(p,q,pap_sizeW((StgPAP*)q),stp);
+ return;
case AP:
- return copy(q,ap_sizeW((StgAP*)q),stp);
+ copy(p,q,ap_sizeW((StgAP*)q),stp);
+ return;
case AP_STACK:
- return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
+ copy(p,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
+ return;
case EVACUATED:
/* Already evacuated, just return the forwarding address.
* current object would be evacuated to, so we only do the full
* check if stp is too low.
*/
- if (gct->evac_gen > 0 && stp->gen_no < gct->evac_gen) { // optimisation
- StgClosure *p = ((StgEvacuated*)q)->evacuee;
- if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < gct->evac_gen) {
- gct->failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
+ {
+ StgClosure *e = ((StgEvacuated*)q)->evacuee;
+ *p = e;
+ if (gct->evac_gen > 0 && stp->gen_no < gct->evac_gen) { // optimisation
+ if (HEAP_ALLOCED(e) && Bdescr((P_)e)->gen_no < gct->evac_gen) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
}
- }
- return ((StgEvacuated*)q)->evacuee;
+ return;
+ }
case ARR_WORDS:
// just copy the block
- return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
+ copy_noscav(p,q,arr_words_sizeW((StgArrWords *)q),stp);
+ return;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
// just copy the block
- return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+ copy(p,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
+ return;
case TSO:
{
*/
if (tso->what_next == ThreadRelocated) {
q = (StgClosure *)tso->link;
+ *p = q;
goto loop;
}
*/
{
StgTSO *new_tso;
- StgPtr p, q;
+ StgPtr r, s;
- new_tso = (StgTSO *)copyPart((StgClosure *)tso,
- tso_sizeW(tso),
- sizeofW(StgTSO), stp);
+ copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
+ new_tso = (StgTSO *)*p;
move_TSO(tso, new_tso);
- for (p = tso->sp, q = new_tso->sp;
- p < tso->stack+tso->stack_size;) {
- *q++ = *p++;
+ for (r = tso->sp, s = new_tso->sp;
+ r < tso->stack+tso->stack_size;) {
+ *s++ = *r++;
}
-
- return (StgClosure *)new_tso;
+ return;
}
}
case TREC_HEADER:
- return copy(q,sizeofW(StgTRecHeader),stp);
+ copy(p,q,sizeofW(StgTRecHeader),stp);
+ return;
case TVAR_WATCH_QUEUE:
- return copy(q,sizeofW(StgTVarWatchQueue),stp);
+ copy(p,q,sizeofW(StgTVarWatchQueue),stp);
+ return;
case TVAR:
- return copy(q,sizeofW(StgTVar),stp);
+ copy(p,q,sizeofW(StgTVar),stp);
+ return;
case TREC_CHUNK:
- return copy(q,sizeofW(StgTRecChunk),stp);
+ copy(p,q,sizeofW(StgTRecChunk),stp);
+ return;
case ATOMIC_INVARIANT:
- return copy(q,sizeofW(StgAtomicInvariant),stp);
+ copy(p,q,sizeofW(StgAtomicInvariant),stp);
+ return;
case INVARIANT_CHECK_QUEUE:
- return copy(q,sizeofW(StgInvariantCheckQueue),stp);
+ copy(p,q,sizeofW(StgInvariantCheckQueue),stp);
+ return;
default:
barf("evacuate: strange closure type %d", (int)(info->type));
evacuated.
-------------------------------------------------------------------------- */
-static StgClosure *
-eval_thunk_selector (StgSelector * p, rtsBool evac)
+static void
+eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
+ // NB. for legacy reasons, p & q are swapped around :(
{
nat field;
StgInfoTable *info;
// mutable list.
if ((bd->gen_no > N) || (bd->flags & BF_EVACUATED)) {
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
- return (StgClosure *)p;
+ *q = (StgClosure *)p;
+ return;
}
// we don't update THUNK_SELECTORS in the compacted
// generation, because compaction does not remove the INDs
// around here, test by compiling stage 3 with +RTS -c -RTS.
if (bd->flags & BF_COMPACTED) {
// must call evacuate() to mark this closure if evac==rtsTrue
- if (evac) p = (StgSelector *)evacuate((StgClosure *)p);
+ *q = (StgClosure *)p;
+ if (evac) evacuate(q);
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
- return (StgClosure *)p;
+ return;
}
}
((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
prev_thunk_selector = p;
- if (evac) val = evacuate(val);
+ *q = val;
+ if (evac) evacuate(q);
+ val = *q;
// evacuate() cannot recurse through
// eval_thunk_selector(), because we know val is not
// a THUNK_SELECTOR.
unchain_thunk_selectors(prev_thunk_selector, val);
- return val;
+ return;
}
}
// rtsFalse says "don't evacuate the result". It will,
// however, update any THUNK_SELECTORs that are evaluated
// along the way.
- val = eval_thunk_selector((StgSelector *)selectee, rtsFalse);
+ eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
gct->thunk_selector_depth--;
// did we actually manage to evaluate it?
// pointer. But don't forget: we still need to evacuate the thunk itself.
SET_INFO(p, info_ptr);
if (evac) {
- val = copy((StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
+ copy(&val,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
} else {
val = (StgClosure *)p;
}
+ *q = val;
unchain_thunk_selectors(prev_thunk_selector, val);
- return val;
+ return;
}
/* -----------------------------------------------------------------------------