*
* ---------------------------------------------------------------------------*/
+#include "PosixSource.h"
#include "Rts.h"
-#include "Storage.h"
-#include "MBlock.h"
+
#include "Evac.h"
+#include "Storage.h"
#include "GC.h"
#include "GCThread.h"
#include "GCUtils.h"
#include "Compact.h"
+#include "MarkStack.h"
#include "Prelude.h"
+#include "Trace.h"
#include "LdvProfile.h"
#if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC)
#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
#define evacuate(p) evacuate1(p)
+#define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p)
#endif
#if !defined(PARALLEL_GC)
* necessary.
*/
to = ws->todo_free;
- if (to + size > ws->todo_lim) {
+ ws->todo_free += size;
+ if (ws->todo_free > ws->todo_lim) {
to = todo_block_full(size, ws);
}
- ws->todo_free = to + size;
ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
return to;
The evacuate() code
-------------------------------------------------------------------------- */
-STATIC_INLINE void
+STATIC_INLINE GNUC_ATTR_HOT void
copy_tag(StgClosure **p, const StgInfoTable *info,
StgClosure *src, nat size, step *stp, StgWord tag)
{
to = alloc_for_copy(size,stp);
- TICK_GC_WORDS_COPIED(size);
-
from = (StgPtr)src;
to[0] = (W_)info;
for (i = 1; i < size; i++) { // unroll for small i
*p = TAG_CLOSURE(tag,(StgClosure*)to);
src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
- TICK_GC_WORDS_COPIED(size);
-
from = (StgPtr)src;
to[0] = (W_)info;
for (i = 1; i < size; i++) { // unroll for small i
* pointer of an object, but reserve some padding after it. This is
* used to optimise evacuation of BLACKHOLEs.
*/
-static void
+static rtsBool
copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
StgPtr to, from;
if (IS_FORWARDING_PTR(info)) {
src->header.info = (const StgInfoTable *)info;
evacuate(p); // does the failed_to_evac stuff
- return ;
+ return rtsFalse;
}
#else
info = (W_)src->header.info;
to = alloc_for_copy(size_to_reserve, stp);
*p = (StgClosure *)to;
- TICK_GC_WORDS_COPIED(size_to_copy);
-
from = (StgPtr)src;
to[0] = info;
for (i = 1; i < size_to_copy; i++) { // unroll for small i
SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
// fill the slop
if (size_to_reserve - size_to_copy > 0)
- LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
+ LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
#endif
+
+ return rtsTrue;
}
/* Copy wrappers that don't tag the closure after copying */
-STATIC_INLINE void
+STATIC_INLINE GNUC_ATTR_HOT void
copy(StgClosure **p, const StgInfoTable *info,
StgClosure *src, nat size, step *stp)
{
copy_tag(p,info,src,size,stp,0);
}
+/* -----------------------------------------------------------------------------
+ Evacuate a large object
+
+ This just consists of removing the object from the (doubly-linked)
+ step->large_objects list, and linking it on to the (singly-linked)
+ step->new_large_objects list, from where it will be scavenged later.
+
+ Convention: bd->flags has BF_EVACUATED set for a large object
+ that has been evacuated, or unset otherwise.
+ -------------------------------------------------------------------------- */
+
+STATIC_INLINE void
+evacuate_large(StgPtr p)
+{
+ bdescr *bd = Bdescr(p);
+ step *stp, *new_stp;
+ step_workspace *ws;
+
+ stp = bd->step;
+ ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
+
+ // already evacuated?
+ if (bd->flags & BF_EVACUATED) {
+ /* Don't forget to set the gct->failed_to_evac flag if we didn't get
+ * the desired destination (see comments in evacuate()).
+ */
+ if (stp < gct->evac_step) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
+ RELEASE_SPIN_LOCK(&stp->sync_large_objects);
+ return;
+ }
+
+ // remove from large_object list
+ if (bd->u.back) {
+ bd->u.back->link = bd->link;
+ } else { // first object in the list
+ stp->large_objects = bd->link;
+ }
+ if (bd->link) {
+ bd->link->u.back = bd->u.back;
+ }
+
+ /* link it on to the evacuated large object list of the destination step
+ */
+ new_stp = stp->to;
+ if (new_stp < gct->evac_step) {
+ if (gct->eager_promotion) {
+ new_stp = gct->evac_step;
+ } else {
+ gct->failed_to_evac = rtsTrue;
+ }
+ }
+
+ ws = &gct->steps[new_stp->abs_no];
+
+ bd->flags |= BF_EVACUATED;
+ bd->step = new_stp;
+ bd->gen_no = new_stp->gen_no;
+
+ // If this is a block of pinned objects, we don't have to scan
+ // these objects, because they aren't allowed to contain any
+ // pointers. For these blocks, we skip the scavenge stage and put
+ // them straight on the scavenged_large_objects list.
+ if (bd->flags & BF_PINNED) {
+ ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
+ if (new_stp != stp) { ACQUIRE_SPIN_LOCK(&new_stp->sync_large_objects); }
+ dbl_link_onto(bd, &new_stp->scavenged_large_objects);
+ new_stp->n_scavenged_large_blocks += bd->blocks;
+ if (new_stp != stp) { RELEASE_SPIN_LOCK(&new_stp->sync_large_objects); }
+ } else {
+ bd->link = ws->todo_large_objects;
+ ws->todo_large_objects = bd;
+ }
+
+ RELEASE_SPIN_LOCK(&stp->sync_large_objects);
+}
+
/* ----------------------------------------------------------------------------
Evacuate
extra reads/writes than we save.
------------------------------------------------------------------------- */
-REGPARM1 void
+REGPARM1 GNUC_ATTR_HOT void
evacuate(StgClosure **p)
{
bdescr *bd = NULL;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
- if (!HEAP_ALLOCED(q)) {
+ if (!HEAP_ALLOCED_GC(q)) {
if (!major_gc) return;
bd = Bdescr((P_)q);
- if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
+ if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {
// pointer into to-space: just return it. It might be a pointer
// into a generation that we aren't collecting (> N), or it
/* If the object is in a step that we're compacting, then we
* need to use an alternative evacuate procedure.
*/
- if (bd->flags & BF_COMPACTED) {
- if (!is_marked((P_)q,bd)) {
- mark((P_)q,bd);
- if (mark_stack_full()) {
- mark_stack_overflowed = rtsTrue;
- reset_mark_stack();
- }
- push_mark_stack((P_)q);
- }
- return;
+ if (!is_marked((P_)q,bd)) {
+ mark((P_)q,bd);
+ push_mark_stack((P_)q);
}
+ return;
}
stp = bd->step->to;
return;
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
return;
goto loop;
}
- /* To evacuate a small TSO, we need to relocate the update frame
- * list it contains.
+ /* To evacuate a small TSO, we need to adjust the stack pointer
*/
{
StgTSO *new_tso;
StgPtr r, s;
-
- copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
- new_tso = (StgTSO *)*p;
- move_TSO(tso, new_tso);
- for (r = tso->sp, s = new_tso->sp;
- r < tso->stack+tso->stack_size;) {
- *s++ = *r++;
- }
+ rtsBool mine;
+
+ mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso),
+ sizeofW(StgTSO), stp);
+ if (mine) {
+ new_tso = (StgTSO *)*p;
+ move_TSO(tso, new_tso);
+ for (r = tso->sp, s = new_tso->sp;
+ r < tso->stack+tso->stack_size;) {
+ *s++ = *r++;
+ }
+ }
return;
}
}
}
/* -----------------------------------------------------------------------------
- Evacuate a large object
-
- This just consists of removing the object from the (doubly-linked)
- step->large_objects list, and linking it on to the (singly-linked)
- step->new_large_objects list, from where it will be scavenged later.
-
- Convention: bd->flags has BF_EVACUATED set for a large object
- that has been evacuated, or unset otherwise.
- -------------------------------------------------------------------------- */
-
-STATIC_INLINE void
-evacuate_large(StgPtr p)
-{
- bdescr *bd = Bdescr(p);
- step *stp, *new_stp;
- step_workspace *ws;
-
- stp = bd->step;
- ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
-
- // object must be at the beginning of the block (or be a ByteArray)
- ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
- (((W_)p & BLOCK_MASK) == 0));
-
- // already evacuated?
- if (bd->flags & BF_EVACUATED) {
- /* Don't forget to set the gct->failed_to_evac flag if we didn't get
- * the desired destination (see comments in evacuate()).
- */
- if (stp < gct->evac_step) {
- gct->failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
- }
- RELEASE_SPIN_LOCK(&stp->sync_large_objects);
- return;
- }
-
- // remove from large_object list
- if (bd->u.back) {
- bd->u.back->link = bd->link;
- } else { // first object in the list
- stp->large_objects = bd->link;
- }
- if (bd->link) {
- bd->link->u.back = bd->u.back;
- }
-
- /* link it on to the evacuated large object list of the destination step
- */
- new_stp = stp->to;
- if (new_stp < gct->evac_step) {
- if (gct->eager_promotion) {
- new_stp = gct->evac_step;
- } else {
- gct->failed_to_evac = rtsTrue;
- }
- }
-
- ws = &gct->steps[new_stp->abs_no];
- bd->flags |= BF_EVACUATED;
- bd->step = new_stp;
- bd->gen_no = new_stp->gen_no;
- bd->link = ws->todo_large_objects;
- ws->todo_large_objects = bd;
-
- RELEASE_SPIN_LOCK(&stp->sync_large_objects);
-}
-
-/* -----------------------------------------------------------------------------
Evaluate a THUNK_SELECTOR if possible.
p points to a THUNK_SELECTOR that we want to evaluate. The
// invoke eval_thunk_selector(), the recursive calls will not
// evacuate the value (because we want to select on the value,
// not evacuate it), so in this case val is in from-space.
- // ASSERT(!HEAP_ALLOCED(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
+ // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
prev = (StgSelector*)((StgClosure *)p)->payload[0];
// Update the THUNK_SELECTOR with an indirection to the
- // EVACUATED closure now at p. Why do this rather than
- // upd_evacuee(q,p)? Because we have an invariant that an
- // EVACUATED closure always points to an object in the
- // same or an older generation (required by the short-cut
- // test in the EVACUATED case, below).
- ((StgInd *)p)->indirectee = val;
- write_barrier();
- SET_INFO(p, &stg_IND_info);
+ // value. The value is still in from-space at this stage.
+ //
+ // (old note: Why not do upd_evacuee(q,p)? Because we have an
+ // invariant that an EVACUATED closure always points to an
+ // object in the same or an older generation (required by
+ // the short-cut test in the EVACUATED case, below).
+ if ((StgClosure *)p == val) {
+ // must be a loop; just leave a BLACKHOLE in place. This
+ // can happen when we have a chain of selectors that
+ // eventually loops back on itself. We can't leave an
+ // indirection pointing to itself, and we want the program
+ // to deadlock if it ever enters this closure, so
+ // BLACKHOLE is correct.
+ SET_INFO(p, &stg_BLACKHOLE_info);
+ } else {
+ ((StgInd *)p)->indirectee = val;
+ write_barrier();
+ SET_INFO(p, &stg_IND_info);
+ }
// For the purposes of LDV profiling, we have created an
// indirection.
selector_chain:
bd = Bdescr((StgPtr)p);
- if (HEAP_ALLOCED(p)) {
+ if (HEAP_ALLOCED_GC(p)) {
// If the THUNK_SELECTOR is in to-space or in a generation that we
// are not collecting, then bale out early. We won't be able to
// save any space in any case, and updating with an indirection is
if (bd->flags & BF_EVACUATED) {
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
*q = (StgClosure *)p;
+ // shortcut, behave as for: if (evac) evacuate(q);
+ if (evac && bd->step < gct->evac_step) {
+ gct->failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
return;
}
// we don't update THUNK_SELECTORS in the compacted
// (scavenge_mark_stack doesn't deal with IND). BEWARE! This
// bit is very tricky to get right. If you make changes
// around here, test by compiling stage 3 with +RTS -c -RTS.
- if (bd->flags & BF_COMPACTED) {
+ if (bd->flags & BF_MARKED) {
// must call evacuate() to mark this closure if evac==rtsTrue
*q = (StgClosure *)p;
if (evac) evacuate(q);
// the original selector thunk, p.
SET_INFO(p, (StgInfoTable *)info_ptr);
LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
+#if defined(THREADED_RTS)
+ SET_INFO(p, &stg_WHITEHOLE_info);
+#else
SET_INFO(p, &stg_BLACKHOLE_info);
#endif
+#endif
// the closure in val is now the "value" of the
// THUNK_SELECTOR in p. However, val may itself be a
prev_thunk_selector = p;
*q = val;
- if (evac) evacuate(q);
- val = *q;
+
+ // update the other selectors in the chain *before*
+ // evacuating the value. This is necessary in the case
+ // where the value turns out to be one of the selectors
+ // in the chain (i.e. we have a loop), and evacuating it
+ // would corrupt the chain.
+ unchain_thunk_selectors(prev_thunk_selector, val);
+
// evacuate() cannot recurse through
// eval_thunk_selector(), because we know val is not
// a THUNK_SELECTOR.
- unchain_thunk_selectors(prev_thunk_selector, val);
+ if (evac) evacuate(q);
return;
}
case THUNK_0_2:
case THUNK_STATIC:
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
// not evaluated yet
goto bale_out;