*
* ---------------------------------------------------------------------------*/
+#include "PosixSource.h"
#include "Rts.h"
-#include "RtsFlags.h"
+
#include "Storage.h"
-#include "MBlock.h"
#include "GC.h"
#include "GCThread.h"
#include "GCUtils.h"
#include "Compact.h"
+#include "MarkStack.h"
#include "Evac.h"
#include "Scav.h"
#include "Apply.h"
#include "Trace.h"
-#include "LdvProfile.h"
#include "Sanity.h"
+#include "Capability.h"
+#include "LdvProfile.h"
static void scavenge_stack (StgPtr p, StgPtr stack_end);
# define evacuate(a) evacuate1(a)
# define recordMutableGen_GC(a,b) recordMutableGen(a,b)
# define scavenge_loop(a) scavenge_loop1(a)
-# define scavenge_mutable_list(g) scavenge_mutable_list1(g)
+# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
+# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
#endif
/* -----------------------------------------------------------------------------
return;
}
+ debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
+
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
+ tso->dirty = 1;
scavenge_TSO_link(tso);
} else {
- tso->flags &= ~TSO_DIRTY;
+ tso->dirty = 0;
scavenge_TSO_link(tso);
if (gct->failed_to_evac) {
tso->flags |= TSO_LINK_DIRTY;
return p;
}
-STATIC_INLINE StgPtr
+STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
{
StgPtr p;
return p;
}
-STATIC_INLINE StgPtr
+STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP (StgPAP *pap)
{
evacuate(&pap->fun);
* srt field in the info table. That's ok, because we'll
* never dereference it.
*/
-STATIC_INLINE void
+STATIC_INLINE GNUC_ATTR_HOT void
scavenge_srt (StgClosure **srt, nat srt_bitmap)
{
nat bitmap;
// If the SRT entry hasn't got bit 0 set, the SRT entry points to a
// closure that's fixed at link-time, and no extra magic is required.
if ( (unsigned long)(*srt) & 0x1 ) {
- evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
+ evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
} else {
evacuate(p);
}
}
-STATIC_INLINE void
+STATIC_INLINE GNUC_ATTR_HOT void
scavenge_thunk_srt(const StgInfoTable *info)
{
StgThunkInfoTable *thunk_info;
scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
}
-STATIC_INLINE void
+STATIC_INLINE GNUC_ATTR_HOT void
scavenge_fun_srt(const StgInfoTable *info)
{
StgFunInfoTable *fun_info;
idea.
-------------------------------------------------------------------------- */
-static void
+static GNUC_ATTR_HOT void
scavenge_block (bdescr *bd)
{
StgPtr p, q;
// time around the loop.
while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
+ ASSERT(bd->link == NULL);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
break;
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
p += BLACKHOLE_sizeW();
break;
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
if (bd->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
+ recordMutableGen_GC((StgClosure *)q, bd->gen_no);
}
}
}
gct->evac_step = &oldest_gen->steps[0];
saved_evac_step = gct->evac_step;
-linear_scan:
- while (!mark_stack_empty()) {
- p = pop_mark_stack();
+ while ((p = pop_mark_stack())) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
q = p;
- switch (((volatile StgWord *)info)[1] & 0xffff) {
+ switch (info->type) {
case MVAR_CLEAN:
case MVAR_DIRTY:
}
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
case ARR_WORDS:
break;
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
if (gct->evac_step) {
- recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
+ recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
}
}
-
- // mark the next bit to indicate "scavenged"
- mark(q+1, Bdescr(q));
-
- } // while (!mark_stack_empty())
-
- // start a new linear scan if the mark stack overflowed at some point
- if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
- debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
- mark_stack_overflowed = rtsFalse;
- oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
- oldgen_scan = oldgen_scan_bd->start;
- }
-
- if (oldgen_scan_bd) {
- // push a new thing on the mark stack
- loop:
- // find a closure that is marked but not scavenged, and start
- // from there.
- while (oldgen_scan < oldgen_scan_bd->free
- && !is_marked(oldgen_scan,oldgen_scan_bd)) {
- oldgen_scan++;
- }
-
- if (oldgen_scan < oldgen_scan_bd->free) {
-
- // already scavenged?
- if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
- oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
- goto loop;
- }
- push_mark_stack(oldgen_scan);
- // ToDo: bump the linear scan by the actual size of the object
- oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
- goto linear_scan;
- }
-
- oldgen_scan_bd = oldgen_scan_bd->link;
- if (oldgen_scan_bd != NULL) {
- oldgen_scan = oldgen_scan_bd->start;
- goto loop;
- }
- }
+ } // while (p = pop_mark_stack())
}
/* -----------------------------------------------------------------------------
}
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
break;
break;
}
+ case IND:
+ // IND can happen, for example, when the interpreter allocates
+ // a gigantic AP closure (more than one block), which ends up
+ // on the large-object list and then gets updated. See #3424.
case IND_OLDGEN:
case IND_OLDGEN_PERM:
case IND_STATIC:
- {
- /* Careful here: a THUNK can be on the mutable list because
- * it contains pointers to young gen objects. If such a thunk
- * is updated, the IND_OLDGEN will be added to the mutable
- * list again, and we'll scavenge it twice. evacuate()
- * doesn't check whether the object has already been
- * evacuated, so we perform that check here.
- */
- StgClosure *q = ((StgInd *)p)->indirectee;
- if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
- break;
- }
evacuate(&((StgInd *)p)->indirectee);
- }
#if 0 && defined(DEBUG)
if (RtsFlags.DebugFlags.gc)
-------------------------------------------------------------------------- */
void
-scavenge_mutable_list(generation *gen)
+scavenge_mutable_list(bdescr *bd, generation *gen)
{
- bdescr *bd;
StgPtr p, q;
- bd = gen->saved_mut_list;
-
gct->evac_step = &gen->steps[0];
for (; bd != NULL; bd = bd->link) {
for (q = bd->start; q < bd->free; q++) {
// definitely doesn't point into a young generation.
// Clean objects don't need to be scavenged. Some clean
// objects (MUT_VAR_CLEAN) are not kept on the mutable
- // list at all; others, such as MUT_ARR_PTRS_CLEAN and
- // TSO, are always on the mutable list.
+ // list at all; others, such as MUT_ARR_PTRS_CLEAN
+ // are always on the mutable list.
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
- recordMutableGen_GC((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
continue;
case TSO: {
StgTSO *tso = (StgTSO *)p;
- if ((tso->flags & TSO_DIRTY) == 0) {
+ if (tso->dirty == 0) {
// Must be on the mutable list because its link
// field is dirty.
ASSERT(tso->flags & TSO_LINK_DIRTY);
scavenge_TSO_link(tso);
if (gct->failed_to_evac) {
- recordMutableGen_GC((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
gct->failed_to_evac = rtsFalse;
} else {
tso->flags &= ~TSO_LINK_DIRTY;
if (scavenge_one(p)) {
// didn't manage to promote everything, so put the
// object back on the list.
- recordMutableGen_GC((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
}
}
}
+}
+
+void
+scavenge_capability_mut_lists (Capability *cap)
+{
+ nat g;
- // free the old mut_list
- freeChain_sync(gen->saved_mut_list);
- gen->saved_mut_list = NULL;
+ /* Mutable lists from each generation > N
+ * we want to *scavenge* these roots, not evacuate them: they're not
+ * going to move in this GC.
+ * Also do them in reverse generation order, for the usual reason:
+ * namely to reduce the likelihood of spurious old->new pointers.
+ */
+ for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
+ scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
+ freeChain_sync(cap->saved_mut_lists[g]);
+ cap->saved_mut_lists[g] = NULL;
+ }
}
/* -----------------------------------------------------------------------------
*/
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
- recordMutableGen_GC((StgClosure *)p,oldest_gen);
+ recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
}
break;
}
// the indirection into an IND_PERM, so that evacuate will
// copy the indirection into the old generation instead of
// discarding it.
+ //
+ // Note [upd-black-hole]
+ // One slight hiccup is that the THUNK_SELECTOR machinery can
+ // overwrite the updatee with an IND. In parallel GC, this
+ // could even be happening concurrently, so we can't check for
+ // the IND. Fortunately if we assume that blackholing is
+ // happening (either lazy or eager), then we can be sure that
+ // the updatee is never a THUNK_SELECTOR and we're ok.
+ // NB. this is a new invariant: blackholing is not optional.
{
nat type;
const StgInfoTable *i;
+ StgClosure *updatee;
- i = ((StgUpdateFrame *)p)->updatee->header.info;
+ updatee = ((StgUpdateFrame *)p)->updatee;
+ i = updatee->header.info;
if (!IS_FORWARDING_PTR(i)) {
- type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
+ type = get_itbl(updatee)->type;
if (type == IND) {
- ((StgUpdateFrame *)p)->updatee->header.info =
- (StgInfoTable *)&stg_IND_PERM_info;
+ updatee->header.info = &stg_IND_PERM_info;
} else if (type == IND_OLDGEN) {
- ((StgUpdateFrame *)p)->updatee->header.info =
- (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
+ updatee->header.info = &stg_IND_OLDGEN_PERM_info;
}
- evacuate(&((StgUpdateFrame *)p)->updatee);
- p += sizeofW(StgUpdateFrame);
- continue;
}
+ evacuate(&((StgUpdateFrame *)p)->updatee);
+ ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
+ p += sizeofW(StgUpdateFrame);
+ continue;
}
// small bitmap (< 32 entries, or 64 on a 64-bit machine)
p = bd->start;
if (scavenge_one(p)) {
if (ws->step->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)p, ws->step->gen);
+ recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
}
}
break;
}
- if ((bd = grab_todo_block(ws)) != NULL) {
+ if ((bd = grab_local_todo_block(ws)) != NULL) {
scavenge_block(bd);
did_something = rtsTrue;
break;
did_anything = rtsTrue;
goto loop;
}
+
+#if defined(THREADED_RTS)
+ if (work_stealing) {
+ // look for work to steal
+ for (s = total_steps-1; s >= 0; s--) {
+ if (s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ if ((bd = steal_todo_block(s)) != NULL) {
+ scavenge_block(bd);
+ did_something = rtsTrue;
+ break;
+ }
+ }
+
+ if (did_something) {
+ did_anything = rtsTrue;
+ goto loop;
+ }
+ }
+#endif
+
// only return when there is no more work to do
return did_anything;
}
// scavenge objects in compacted generation
- if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
- (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ if (mark_stack_bd != NULL && !mark_stack_empty()) {
scavenge_mark_stack();
work_to_do = rtsTrue;
}