/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.156 2003/06/19 12:47:08 simonmar Exp $
+ * $Id: GC.c,v 1.167 2004/05/21 13:28:59 simonmar Exp $
*
* (c) The GHC Team 1998-2003
*
// Use a register argument for evacuate, if available.
#if __GNUC__ >= 2
-static StgClosure * evacuate (StgClosure *q) __attribute__((regparm(1)));
+#define REGPARM1 __attribute__((regparm(1)))
#else
-static StgClosure * evacuate (StgClosure *q);
+#define REGPARM1
#endif
+REGPARM1 static StgClosure * evacuate (StgClosure *q);
+
static void zero_static_object_list ( StgClosure* first_static );
static void zero_mutable_list ( StgMutClosure *first );
static bdescr *oldgen_scan_bd;
static StgPtr oldgen_scan;
-static inline rtsBool
+STATIC_INLINE rtsBool
mark_stack_empty(void)
{
return mark_sp == mark_stack;
}
-static inline rtsBool
+STATIC_INLINE rtsBool
mark_stack_full(void)
{
return mark_sp >= mark_splim;
}
-static inline void
+STATIC_INLINE void
reset_mark_stack(void)
{
mark_sp = mark_stack;
}
-static inline void
+STATIC_INLINE void
push_mark_stack(StgPtr p)
{
*mark_sp++ = p;
}
-static inline StgPtr
+STATIC_INLINE StgPtr
pop_mark_stack(void)
{
return *--mark_sp;
if (RtsFlags.GcFlags.generations == 1) {
old_to_blocks = g0s0->to_blocks;
g0s0->to_blocks = NULL;
+ g0s0->n_to_blocks = 0;
}
/* Keep a count of how many new blocks we allocated during this GC
// mark the large objects as not evacuated yet
for (bd = stp->large_objects; bd; bd = bd->link) {
- bd->flags = BF_LARGE;
+ bd->flags &= ~BF_EVACUATED;
}
// for a compacted step, we need to allocate the bitmap
// don't forget to fill it with zeros!
memset(bitmap, 0, bitmap_size);
- // for each block in this step, point to its bitmap from the
+ // For each block in this step, point to its bitmap from the
// block descriptor.
for (bd=stp->blocks; bd != NULL; bd = bd->link) {
bd->u.bitmap = bitmap;
bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
+
+ // Also at this point we set the BF_COMPACTED flag
+ // for this block. The invariant is that
+ // BF_COMPACTED is always unset, except during GC
+ // when it is set on those blocks which will be
+ // compacted.
+ bd->flags |= BF_COMPACTED;
}
}
}
// for a compacted step, just shift the new to-space
// onto the front of the now-compacted existing blocks.
for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED; // now from-space
+ bd->flags &= ~BF_EVACUATED; // now from-space
}
// tack the new blocks on the end of the existing blocks
if (stp->blocks == NULL) {
if (next == NULL) {
bd->link = stp->to_blocks;
}
+ // NB. this step might not be compacted next
+ // time, so reset the BF_COMPACTED flags.
+ // They are set before GC if we're going to
+ // compact. (search for BF_COMPACTED above).
+ bd->flags &= ~BF_COMPACTED;
}
}
// add the new blocks to the block tally
stp->blocks = stp->to_blocks;
stp->n_blocks = stp->n_to_blocks;
for (bd = stp->blocks; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED; // now from-space
+ bd->flags &= ~BF_EVACUATED; // now from-space
}
}
stp->to_blocks = NULL;
default:
barf("traverse_weak_ptr_list");
+ return rtsTrue;
}
}
}
// check the mark bit for compacted steps
- if (bd->step->is_compacted && is_marked((P_)p,bd)) {
+ if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
return p;
}
*root = evacuate(*root);
}
-static __inline__ void
+STATIC_INLINE void
upd_evacuee(StgClosure *p, StgClosure *dest)
{
// Source object must be in from-space:
}
-static __inline__ StgClosure *
+STATIC_INLINE StgClosure *
copy(StgClosure *src, nat size, step *stp)
{
P_ to, from, dest;
-------------------------------------------------------------------------- */
-static inline void
+STATIC_INLINE void
evacuate_large(StgPtr p)
{
bdescr *bd = Bdescr(p);
extra reads/writes than we save.
-------------------------------------------------------------------------- */
-static StgClosure *
+REGPARM1 static StgClosure *
evacuate(StgClosure *q)
{
StgClosure *to;
/* If the object is in a step that we're compacting, then we
* need to use an alternative evacuate procedure.
*/
- if (bd->step->is_compacted) {
+ if (bd->flags & BF_COMPACTED) {
if (!is_marked((P_)q,bd)) {
mark((P_)q,bd);
if (mark_stack_full()) {
thunk is unchanged.
-------------------------------------------------------------------------- */
+static inline rtsBool
+is_to_space ( StgClosure *p )
+{
+ bdescr *bd;
+
+ bd = Bdescr((StgPtr)p);
+ if (HEAP_ALLOCED(p) &&
+ ((bd->flags & BF_EVACUATED)
+ || ((bd->flags & BF_COMPACTED) &&
+ is_marked((P_)p,bd)))) {
+ return rtsTrue;
+ } else {
+ return rtsFalse;
+ }
+}
+
static StgClosure *
eval_thunk_selector( nat field, StgSelector * p )
{
// eval_thunk_selector(). There are various ways this could
// happen:
//
- // - following an IND_STATIC
+ // 1. following an IND_STATIC
//
- // - when the old generation is compacted, the mark phase updates
- // from-space pointers to be to-space pointers, and we can't
- // reliably tell which we're following (eg. from an IND_STATIC).
+ // 2. when the old generation is compacted, the mark phase updates
+ // from-space pointers to be to-space pointers, and we can't
+ // reliably tell which we're following (eg. from an IND_STATIC).
//
- // So we use the block-descriptor test to find out if we're in
- // to-space.
+ // 3. compacting GC again: if we're looking at a constructor in
+ // the compacted generation, it might point directly to objects
+ // in to-space. We must bale out here, otherwise doing the selection
+ // will result in a to-space pointer being returned.
+ //
+ // (1) is dealt with using a BF_EVACUATED test on the
+ // selectee. (2) and (3): we can tell if we're looking at an
+ // object in the compacted generation that might point to
+ // to-space objects by testing that (a) it is BF_COMPACTED, (b)
+ // the compacted generation is being collected, and (c) the
+ // object is marked. Only a marked object may have pointers that
+ // point to to-space objects, because that happens when
+ // scavenging.
//
- if (HEAP_ALLOCED(selectee) &&
- Bdescr((StgPtr)selectee)->flags & BF_EVACUATED) {
+ // The to-space test is now embodied in the in_to_space() inline
+ // function, as it is re-used below.
+ //
+ if (is_to_space(selectee)) {
goto bale_out;
}
ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
info->layout.payload.nptrs));
- // ToDo: shouldn't we test whether this pointer is in
- // to-space?
- return selectee->payload[field];
+ // Select the right field from the constructor, and check
+ // that the result isn't in to-space. It might be in
+ // to-space if, for example, this constructor contains
+ // pointers to younger-gen objects (and is on the mut-once
+ // list).
+ //
+ {
+ StgClosure *q;
+ q = selectee->payload[field];
+ if (is_to_space(q)) {
+ goto bale_out;
+ } else {
+ return q;
+ }
+ }
case IND:
case IND_PERM:
}
case AP:
+ case AP_STACK:
case THUNK:
case THUNK_1_0:
case THUNK_0_1:
b = 0;
bitmap = large_srt->l.bitmap[b];
size = (nat)large_srt->l.size;
- p = large_srt->srt;
+ p = (StgClosure **)large_srt->srt;
for (i = 0; i < size; ) {
if ((bitmap & 1) != 0) {
evacuate(*p);
* srt field in the info table. That's ok, because we'll
* never dereference it.
*/
-static inline void
+STATIC_INLINE void
scavenge_srt (StgClosure **srt, nat srt_bitmap)
{
nat bitmap;
}
-static inline void
+STATIC_INLINE void
scavenge_thunk_srt(const StgInfoTable *info)
{
StgThunkInfoTable *thunk_info;
scavenge_srt((StgClosure **)thunk_info->srt, thunk_info->i.srt_bitmap);
}
-static inline void
+STATIC_INLINE void
scavenge_fun_srt(const StgInfoTable *info)
{
StgFunInfoTable *fun_info;
scavenge_srt((StgClosure **)fun_info->srt, fun_info->i.srt_bitmap);
}
-static inline void
+STATIC_INLINE void
scavenge_ret_srt(const StgInfoTable *info)
{
StgRetInfoTable *ret_info;
in PAPs.
-------------------------------------------------------------------------- */
-static inline StgPtr
+STATIC_INLINE StgPtr
scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
{
StgPtr p;
return p;
}
-static inline StgPtr
+STATIC_INLINE StgPtr
scavenge_PAP (StgPAP *pap)
{
StgPtr p;
{
StgPtr next;
+ // Set the mut_link field to NULL, so that we will put this
+ // array back on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
+ ((StgMutArrPtrs*)p)->mut_link = NULL;
+
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
{
StgPtr next;
+ // Set the mut_link field to NULL, so that we will put this
+ // array on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
+ ((StgMutArrPtrs*)p)->mut_link = NULL;
+
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
// follow everything
StgPtr next;
+ // Set the mut_link field to NULL, so that we will put this
+ // array on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
+ ((StgMutArrPtrs*)p)->mut_link = NULL;
+
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
(StgClosure *)*q = evacuate((StgClosure *)*q);
}
evac_gen = 0;
+ // Set the mut_link field to NULL, so that we will put this
+ // array back on the mutable list if it is subsequently thawed
+ // by unsafeThaw#.
p->mut_link = NULL;
if (failed_to_evac) {
failed_to_evac = rtsFalse;
}
}
-static inline StgPtr
+STATIC_INLINE StgPtr
scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
{
while (size > 0) {
void *gap_start, *next_gap_start, *gap_end;
nat chunk_size;
- next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
+ next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
sp = next_gap_start;
while ((StgPtr)gap > tso->sp) {
// we're working in *bytes* now...
gap_start = next_gap_start;
- gap_end = gap_start - gap->gap_size * sizeof(W_);
+ gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_));
gap = gap->next_gap;
- next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
+ next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
- chunk_size = gap_end - next_gap_start;
- sp -= chunk_size;
+ chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start;
+ (unsigned char*)sp -= chunk_size;
memmove(sp, next_gap_start, chunk_size);
}
fputc('\n', stderr);
}
-static inline rtsBool
+STATIC_INLINE rtsBool
maybeLarge(StgClosure *closure)
{
StgInfoTable *info = get_itbl(closure);