/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.115 2001/08/07 10:49:49 simonmar Exp $
+ * $Id: GC.c,v 1.143 2002/09/18 06:34:07 mthomas Exp $
*
* (c) The GHC Team 1998-1999
*
*
* ---------------------------------------------------------------------------*/
+#include "PosixSource.h"
#include "Rts.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Prelude.h"
#include "ParTicky.h" // ToDo: move into Rts.h
#include "GCCompact.h"
+#include "Signals.h"
#if defined(GRAN) || defined(PAR)
# include "GranSimRts.h"
# include "ParallelRts.h"
#include "FrontPanel.h"
#endif
+#include "RetainerProfile.h"
+#include "LdvProfile.h"
+
+#include <string.h>
+
/* STATIC OBJECT LIST.
*
* During GC:
* We build up a static object list while collecting generations 0..N,
* which is then appended to the static object list of generation N+1.
*/
-StgClosure* static_objects; // live static objects
-StgClosure* scavenged_static_objects; // static objects scavenged so far
+static StgClosure* static_objects; // live static objects
+StgClosure* scavenged_static_objects; // static objects scavenged so far
/* N is the oldest generation being collected, where the generations
* are numbered starting at 0. A major GC (indicated by the major_gc
/* Weak pointers
*/
StgWeak *old_weak_ptr_list; // also pending finaliser list
-static rtsBool weak_done; // all done for this pass
+
+/* Which stage of processing various kinds of weak pointer are we at?
+ * (see traverse_weak_ptr_list() below for discussion).
+ */
+typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
+static WeakStage weak_stage;
/* List of all threads during GC
*/
static StgTSO *old_all_threads;
-static StgTSO *resurrected_threads;
+StgTSO *resurrected_threads;
/* Flag indicating failure to evacuate an object to the desired
* generation.
/* Old to-space (used for two-space collector only)
*/
-bdescr *old_to_blocks;
+static bdescr *old_to_blocks;
/* Data used for allocation area sizing.
*/
-lnat new_blocks; // blocks allocated during this GC
-lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
+static lnat new_blocks; // blocks allocated during this GC
+static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
/* Used to avoid long recursion due to selector thunks
*/
-lnat thunk_selector_depth = 0;
-#define MAX_THUNK_SELECTOR_DEPTH 256
+static lnat thunk_selector_depth = 0;
+#define MAX_THUNK_SELECTOR_DEPTH 8
/* -----------------------------------------------------------------------------
Static function declarations
static rtsBool traverse_weak_ptr_list ( void );
static void mark_weak_ptr_list ( StgWeak **list );
+static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
+
static void scavenge ( step * );
static void scavenge_mark_stack ( void );
static void scavenge_stack ( StgPtr p, StgPtr stack_end );
static void scavenge_static ( void );
static void scavenge_mutable_list ( generation *g );
static void scavenge_mut_once_list ( generation *g );
-static void scavengeCAFs ( void );
#if 0 && defined(DEBUG)
static void gcCAFs ( void );
- free from-space in each step, and set from-space = to-space.
+ Locks held: sched_mutex
+
-------------------------------------------------------------------------- */
void
Now, Now));
#endif
+#ifndef mingw32_TARGET_OS
+ // block signals
+ blockUserSignals();
+#endif
+
// tell the stats department that we've started a GC
stat_startGC();
}
}
- scavengeCAFs();
+ /* follow roots from the CAF list (used by GHCi)
+ */
+ evac_gen = 0;
+ markCAFs(mark_root);
/* follow all the roots that the application knows about.
*/
mark_weak_ptr_list(&weak_ptr_list);
old_weak_ptr_list = weak_ptr_list;
weak_ptr_list = NULL;
- weak_done = rtsFalse;
+ weak_stage = WeakPtrs;
/* The all_threads list is like the weak_ptr_list.
* See traverse_weak_ptr_list() for the details.
if (flag) { goto loop; }
- // must be last...
+ // must be last... invariant is that everything is fully
+ // scavenged at this point.
if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
goto loop;
}
}
+ /* Update the pointers from the "main thread" list - these are
+ * treated as weak pointers because we want to allow a main thread
+ * to get a BlockedOnDeadMVar exception in the same way as any other
+ * thread. Note that the threads should all have been retained by
+ * GC by virtue of being on the all_threads list, we're just
+ * updating pointers here.
+ */
+ {
+ StgMainThread *m;
+ StgTSO *tso;
+ for (m = main_threads; m != NULL; m = m->link) {
+ tso = (StgTSO *) isAlive((StgClosure *)m->tso);
+ if (tso == NULL) {
+ barf("main thread has been GC'd");
+ }
+ m->tso = tso;
+ }
+ }
+
#if defined(PAR)
// Reconstruct the Global Address tables used in GUM
rebuildGAtables(major_gc);
}
}
+#ifdef PROFILING
+ // We call processHeapClosureForDead() on every closure destroyed during
+ // the current garbage collection, so we invoke LdvCensusForDead().
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
+ || RtsFlags.ProfFlags.bioSelector != NULL)
+ LdvCensusForDead(N);
+#endif
+
// NO MORE EVACUATION AFTER THIS POINT!
// Finally: compaction of the oldest generation.
if (major_gc && oldest_gen->steps[0].is_compacted) {
RtsFlags.GcFlags.minOldGenSize);
// minimum size for generation zero
- min_alloc = (RtsFlags.GcFlags.pcFreeHeap * max) / 200;
+ min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
+ RtsFlags.GcFlags.minAllocAreaSize);
+
+ // Auto-enable compaction when the residency reaches a
+ // certain percentage of the maximum heap size (default: 30%).
+ if (RtsFlags.GcFlags.generations > 1 &&
+ (RtsFlags.GcFlags.compact ||
+ (max > 0 &&
+ oldest_gen->steps[0].n_blocks >
+ (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
+ oldest_gen->steps[0].is_compacted = 1;
+// fprintf(stderr,"compaction: on\n", live);
+ } else {
+ oldest_gen->steps[0].is_compacted = 0;
+// fprintf(stderr,"compaction: off\n", live);
+ }
// if we're going to go over the maximum heap size, reduce the
// size of the generations accordingly. The calculation is
// different if compaction is turned on, because we don't need
// to double the space required to collect the old generation.
if (max != 0) {
- if (RtsFlags.GcFlags.compact) {
+
+ // this test is necessary to ensure that the calculations
+ // below don't have any negative results - we're working
+ // with unsigned values here.
+ if (max < min_alloc) {
+ heapOverflow();
+ }
+
+ if (oldest_gen->steps[0].is_compacted) {
if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
size = (max - min_alloc) / ((gens - 1) * 2 - 1);
}
for (g = 0; g < gens; g++) {
generations[g].max_blocks = size;
}
-
- // Auto-enable compaction when the residency reaches a
- // certain percentage of the maximum heap size (default: 30%).
- if (RtsFlags.GcFlags.compact &&
- oldest_gen->steps[0].n_blocks >
- (RtsFlags.GcFlags.compactThreshold * max) / 100) {
- oldest_gen->steps[0].is_compacted = 1;
-// fprintf(stderr,"compaction: on\n", live);
- } else {
- oldest_gen->steps[0].is_compacted = 0;
-// fprintf(stderr,"compaction: off\n", live);
- }
}
// Guess the amount of live data for stats.
alloc_HpLim = NULL;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
+ // Start a new pinned_object_block
+ pinned_object_block = NULL;
+
/* Free the mark stack.
*/
if (mark_stack_bdescr != NULL) {
*/
blocks = g0s0->n_to_blocks;
- if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
- RtsFlags.GcFlags.maxHeapSize ) {
+ if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
+ blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
+ RtsFlags.GcFlags.maxHeapSize ) {
long adjusted_blocks; // signed on purpose
int pc_free;
}
resizeNursery((nat)blocks);
+
+ } else {
+ // we might have added extra large blocks to the nursery, so
+ // resize back to minAllocAreaSize again.
+ resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
}
}
if (major_gc) { gcCAFs(); }
#endif
+#ifdef PROFILING
+ // resetStaticObjectForRetainerProfiling() must be called before
+ // zeroing below.
+ resetStaticObjectForRetainerProfiling();
+#endif
+
// zero the scavenged static object list
if (major_gc) {
zero_static_object_list(scavenged_static_objects);
// Reset the nursery
resetNurseries();
+ RELEASE_LOCK(&sched_mutex);
+
// start any pending finalizers
scheduleFinalizers(old_weak_ptr_list);
// send exceptions to any threads which were about to die
resurrectThreads(resurrected_threads);
+
+ ACQUIRE_LOCK(&sched_mutex);
// Update the stable pointer hash table.
updateStablePtrTable(major_gc);
// restore enclosing cost centre
#ifdef PROFILING
- heapCensus();
CCCS = prev_CCS;
#endif
// ok, GC over: tell the stats department what happened.
stat_endGC(allocated, collected, live, copied, N);
+#ifndef mingw32_TARGET_OS
+ // unblock signals again
+ unblockUserSignals();
+#endif
+
//PAR_TICKY_TP();
}
older generations than the one we're collecting. This could
probably be optimised by keeping per-generation lists of weak
pointers, but for a few weak pointers this scheme will work.
+
+ There are three distinct stages to processing weak pointers:
+
+ - weak_stage == WeakPtrs
+
+ We process all the weak pointers whos keys are alive (evacuate
+ their values and finalizers), and repeat until we can find no new
+ live keys. If no live keys are found in this pass, then we
+ evacuate the finalizers of all the dead weak pointers in order to
+ run them.
+
+ - weak_stage == WeakThreads
+
+ Now, we discover which *threads* are still alive. Pointers to
+ threads from the all_threads and main thread lists are the
+ weakest of all: a pointers from the finalizer of a dead weak
+ pointer can keep a thread alive. Any threads found to be unreachable
+ are evacuated and placed on the resurrected_threads list so we
+ can send them a signal later.
+
+ - weak_stage == WeakDone
+
+ No more evacuation is done.
+
-------------------------------------------------------------------------- */
static rtsBool
StgClosure *new;
rtsBool flag = rtsFalse;
- if (weak_done) { return rtsFalse; }
+ switch (weak_stage) {
- /* doesn't matter where we evacuate values/finalizers to, since
- * these pointers are treated as roots (iff the keys are alive).
- */
- evac_gen = 0;
+ case WeakDone:
+ return rtsFalse;
- last_w = &old_weak_ptr_list;
- for (w = old_weak_ptr_list; w != NULL; w = next_w) {
-
- /* There might be a DEAD_WEAK on the list if finalizeWeak# was
- * called on a live weak pointer object. Just remove it.
- */
- if (w->header.info == &stg_DEAD_WEAK_info) {
- next_w = ((StgDeadWeak *)w)->link;
- *last_w = next_w;
- continue;
- }
-
- ASSERT(get_itbl(w)->type == WEAK);
-
- /* Now, check whether the key is reachable.
- */
- new = isAlive(w->key);
- if (new != NULL) {
- w->key = new;
- // evacuate the value and finalizer
- w->value = evacuate(w->value);
- w->finalizer = evacuate(w->finalizer);
- // remove this weak ptr from the old_weak_ptr list
- *last_w = w->link;
- // and put it on the new weak ptr list
- next_w = w->link;
- w->link = weak_ptr_list;
- weak_ptr_list = w;
- flag = rtsTrue;
- IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
- continue;
- }
- else {
- last_w = &(w->link);
- next_w = w->link;
- continue;
- }
- }
-
- /* Now deal with the all_threads list, which behaves somewhat like
- * the weak ptr list. If we discover any threads that are about to
- * become garbage, we wake them up and administer an exception.
- */
- {
- StgTSO *t, *tmp, *next, **prev;
+ case WeakPtrs:
+ /* doesn't matter where we evacuate values/finalizers to, since
+ * these pointers are treated as roots (iff the keys are alive).
+ */
+ evac_gen = 0;
+
+ last_w = &old_weak_ptr_list;
+ for (w = old_weak_ptr_list; w != NULL; w = next_w) {
+
+ /* There might be a DEAD_WEAK on the list if finalizeWeak# was
+ * called on a live weak pointer object. Just remove it.
+ */
+ if (w->header.info == &stg_DEAD_WEAK_info) {
+ next_w = ((StgDeadWeak *)w)->link;
+ *last_w = next_w;
+ continue;
+ }
+
+ switch (get_itbl(w)->type) {
- prev = &old_all_threads;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ case EVACUATED:
+ next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
+ *last_w = next_w;
+ continue;
- (StgClosure *)tmp = isAlive((StgClosure *)t);
-
- if (tmp != NULL) {
- t = tmp;
- }
+ case WEAK:
+ /* Now, check whether the key is reachable.
+ */
+ new = isAlive(w->key);
+ if (new != NULL) {
+ w->key = new;
+ // evacuate the value and finalizer
+ w->value = evacuate(w->value);
+ w->finalizer = evacuate(w->finalizer);
+ // remove this weak ptr from the old_weak_ptr list
+ *last_w = w->link;
+ // and put it on the new weak ptr list
+ next_w = w->link;
+ w->link = weak_ptr_list;
+ weak_ptr_list = w;
+ flag = rtsTrue;
+ IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p",
+ w, w->key));
+ continue;
+ }
+ else {
+ last_w = &(w->link);
+ next_w = w->link;
+ continue;
+ }
- ASSERT(get_itbl(t)->type == TSO);
- switch (t->what_next) {
- case ThreadRelocated:
- next = t->link;
- *prev = next;
- continue;
- case ThreadKilled:
- case ThreadComplete:
- // finshed or died. The thread might still be alive, but we
- // don't keep it on the all_threads list. Don't forget to
- // stub out its global_link field.
- next = t->global_link;
- t->global_link = END_TSO_QUEUE;
- *prev = next;
- continue;
- default:
- ;
+ default:
+ barf("traverse_weak_ptr_list: not WEAK");
+ }
}
+
+ /* If we didn't make any changes, then we can go round and kill all
+ * the dead weak pointers. The old_weak_ptr list is used as a list
+ * of pending finalizers later on.
+ */
+ if (flag == rtsFalse) {
+ for (w = old_weak_ptr_list; w; w = w->link) {
+ w->finalizer = evacuate(w->finalizer);
+ }
- if (tmp == NULL) {
- // not alive (yet): leave this thread on the old_all_threads list.
- prev = &(t->global_link);
- next = t->global_link;
- }
- else {
- // alive: move this thread onto the all_threads list.
- next = t->global_link;
- t->global_link = all_threads;
- all_threads = t;
- *prev = next;
+ // Next, move to the WeakThreads stage after fully
+ // scavenging the finalizers we've just evacuated.
+ weak_stage = WeakThreads;
}
- }
- }
- /* If we didn't make any changes, then we can go round and kill all
- * the dead weak pointers. The old_weak_ptr list is used as a list
- * of pending finalizers later on.
- */
- if (flag == rtsFalse) {
- for (w = old_weak_ptr_list; w; w = w->link) {
- w->finalizer = evacuate(w->finalizer);
- }
+ return rtsTrue;
- /* And resurrect any threads which were about to become garbage.
- */
- {
- StgTSO *t, *tmp, *next;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
- (StgClosure *)tmp = evacuate((StgClosure *)t);
- tmp->global_link = resurrected_threads;
- resurrected_threads = tmp;
+ case WeakThreads:
+ /* Now deal with the all_threads list, which behaves somewhat like
+ * the weak ptr list. If we discover any threads that are about to
+ * become garbage, we wake them up and administer an exception.
+ */
+ {
+ StgTSO *t, *tmp, *next, **prev;
+
+ prev = &old_all_threads;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+
+ (StgClosure *)tmp = isAlive((StgClosure *)t);
+
+ if (tmp != NULL) {
+ t = tmp;
+ }
+
+ ASSERT(get_itbl(t)->type == TSO);
+ switch (t->what_next) {
+ case ThreadRelocated:
+ next = t->link;
+ *prev = next;
+ continue;
+ case ThreadKilled:
+ case ThreadComplete:
+ // finshed or died. The thread might still be alive, but we
+ // don't keep it on the all_threads list. Don't forget to
+ // stub out its global_link field.
+ next = t->global_link;
+ t->global_link = END_TSO_QUEUE;
+ *prev = next;
+ continue;
+ default:
+ ;
+ }
+
+ if (tmp == NULL) {
+ // not alive (yet): leave this thread on the
+ // old_all_threads list.
+ prev = &(t->global_link);
+ next = t->global_link;
+ }
+ else {
+ // alive: move this thread onto the all_threads list.
+ next = t->global_link;
+ t->global_link = all_threads;
+ all_threads = t;
+ *prev = next;
+ }
+ }
}
- }
+
+ /* And resurrect any threads which were about to become garbage.
+ */
+ {
+ StgTSO *t, *tmp, *next;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->global_link;
+ (StgClosure *)tmp = evacuate((StgClosure *)t);
+ tmp->global_link = resurrected_threads;
+ resurrected_threads = tmp;
+ }
+ }
+
+ weak_stage = WeakDone; // *now* we're done,
+ return rtsTrue; // but one more round of scavenging, please
- weak_done = rtsTrue;
+ default:
+ barf("traverse_weak_ptr_list");
}
- return rtsTrue;
}
/* -----------------------------------------------------------------------------
last_w = list;
for (w = *list; w; w = w->link) {
+ // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
+ ASSERT(w->header.info == &stg_DEAD_WEAK_info
+ || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
(StgClosure *)w = evacuate((StgClosure *)w);
*last_w = w;
last_w = &(w->link);
loop:
bd = Bdescr((P_)p);
+
// ignore closures in generations that we're not collecting.
if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
return p;
copy(StgClosure *src, nat size, step *stp)
{
P_ to, from, dest;
+#ifdef PROFILING
+ // @LDV profiling
+ nat size_org = size;
+#endif
TICK_GC_WORDS_COPIED(size);
/* Find out where we're going, using the handy "to" pointer in
dest = stp->hp;
stp->hp = to;
upd_evacuee(src,(StgClosure *)dest);
+#ifdef PROFILING
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(src, size_org);
+#endif
return (StgClosure *)dest;
}
*/
-static __inline__ StgClosure *
+static StgClosure *
copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
P_ dest, to, from;
+#ifdef PROFILING
+ // @LDV profiling
+ nat size_to_copy_org = size_to_copy;
+#endif
TICK_GC_WORDS_COPIED(size_to_copy);
if (stp->gen_no < evac_gen) {
dest = stp->hp;
stp->hp += size_to_reserve;
upd_evacuee(src,(StgClosure *)dest);
+#ifdef PROFILING
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ // size_to_copy_org is wrong because the closure already occupies size_to_reserve
+ // words.
+ SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
+ // fill the slop
+ if (size_to_reserve - size_to_copy_org > 0)
+ FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
+#endif
return (StgClosure *)dest;
}
Evacuate a large object
This just consists of removing the object from the (doubly-linked)
- large_alloc_list, and linking it on to the (singly-linked)
- new_large_objects list, from where it will be scavenged later.
+ step->large_objects list, and linking it on to the (singly-linked)
+ step->new_large_objects list, from where it will be scavenged later.
Convention: bd->flags has BF_EVACUATED set for a large object
that has been evacuated, or unset otherwise.
bdescr *bd = Bdescr(p);
step *stp;
- // should point to the beginning of the block
- ASSERT(((W_)p & BLOCK_MASK) == 0);
-
+ // object must be at the beginning of the block (or be a ByteArray)
+ ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
+ (((W_)p & BLOCK_MASK) == 0));
+
// already evacuated?
if (bd->flags & BF_EVACUATED) {
/* Don't forget to set the failed_to_evac flag if we didn't get
case THUNK_SELECTOR:
{
- const StgInfoTable* selectee_info;
- StgClosure* selectee = ((StgSelector*)q)->selectee;
-
- selector_loop:
- selectee_info = get_itbl(selectee);
- switch (selectee_info->type) {
- case CONSTR:
- case CONSTR_1_0:
- case CONSTR_0_1:
- case CONSTR_2_0:
- case CONSTR_1_1:
- case CONSTR_0_2:
- case CONSTR_STATIC:
- {
- StgWord offset = info->layout.selector_offset;
-
- // check that the size is in range
- ASSERT(offset <
- (StgWord32)(selectee_info->layout.payload.ptrs +
- selectee_info->layout.payload.nptrs));
-
- // perform the selection!
- q = selectee->payload[offset];
-
- /* if we're already in to-space, there's no need to continue
- * with the evacuation, just update the source address with
- * a pointer to the (evacuated) constructor field.
- */
- if (HEAP_ALLOCED(q)) {
- bdescr *bd = Bdescr((P_)q);
- if (bd->flags & BF_EVACUATED) {
- if (bd->gen_no < evac_gen) {
- failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
- }
- return q;
- }
- }
+ StgClosure *p;
- /* otherwise, carry on and evacuate this constructor field,
- * (but not the constructor itself)
- */
- goto loop;
+ if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
+ return copy(q,THUNK_SELECTOR_sizeW(),stp);
}
- case IND:
- case IND_STATIC:
- case IND_PERM:
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
- selectee = ((StgInd *)selectee)->indirectee;
- goto selector_loop;
-
- case EVACUATED:
- selectee = ((StgEvacuated *)selectee)->evacuee;
- goto selector_loop;
+ p = eval_thunk_selector(info->layout.selector_offset,
+ (StgSelector *)q);
- case THUNK_SELECTOR:
-# if 0
- /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
- something) to go into an infinite loop when the nightly
- stage2 compiles PrelTup.lhs. */
-
- /* we can't recurse indefinitely in evacuate(), so set a
- * limit on the number of times we can go around this
- * loop.
- */
- if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
- bdescr *bd;
- bd = Bdescr((P_)selectee);
- if (!bd->flags & BF_EVACUATED) {
- thunk_selector_depth++;
- selectee = evacuate(selectee);
- thunk_selector_depth--;
- goto selector_loop;
- }
- }
- // otherwise, fall through...
-# endif
-
- case AP_UPD:
- case THUNK:
- case THUNK_1_0:
- case THUNK_0_1:
- case THUNK_2_0:
- case THUNK_1_1:
- case THUNK_0_2:
- case THUNK_STATIC:
- case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
- case BLACKHOLE:
- case BLACKHOLE_BQ:
- // not evaluated yet
- break;
-
-#if defined(PAR)
- // a copy of the top-level cases below
- case RBH: // cf. BLACKHOLE_BQ
- {
- //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
- to = copy(q,BLACKHOLE_sizeW(),stp);
- //ToDo: derive size etc from reverted IP
- //to = copy(q,size,stp);
- // recordMutable((StgMutClosure *)to);
- return to;
- }
-
- case BLOCKED_FETCH:
- ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
- to = copy(q,sizeofW(StgBlockedFetch),stp);
- return to;
-
-# ifdef DIST
- case REMOTE_REF:
-# endif
- case FETCH_ME:
- ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
- to = copy(q,sizeofW(StgFetchMe),stp);
- return to;
-
- case FETCH_ME_BQ:
- ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
- to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
- return to;
-#endif
-
- default:
- barf("evacuate: THUNK_SELECTOR: strange selectee %d",
- (int)(selectee_info->type));
- }
+ if (p == NULL) {
+ return copy(q,THUNK_SELECTOR_sizeW(),stp);
+ } else {
+ // q is still BLACKHOLE'd.
+ thunk_selector_depth++;
+ p = evacuate(p);
+ thunk_selector_depth--;
+ upd_evacuee(q,p);
+ return p;
+ }
}
- return copy(q,THUNK_SELECTOR_sizeW(),stp);
case IND:
case IND_OLDGEN:
*/
if (evac_gen > 0) { // optimisation
StgClosure *p = ((StgEvacuated*)q)->evacuee;
- if (Bdescr((P_)p)->gen_no < evac_gen) {
+ if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
}
/* -----------------------------------------------------------------------------
+ Evaluate a THUNK_SELECTOR if possible.
+
+ returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
+ a closure pointer if we evaluated it and this is the result. Note
+ that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
+ reducing it to HNF, just that we have eliminated the selection.
+ The result might be another thunk, or even another THUNK_SELECTOR.
+
+ If the return value is non-NULL, the original selector thunk has
+ been BLACKHOLE'd, and should be updated with an indirection or a
+ forwarding pointer. If the return value is NULL, then the selector
+ thunk is unchanged.
+ -------------------------------------------------------------------------- */
+
+static StgClosure *
+eval_thunk_selector( nat field, StgSelector * p )
+{
+ StgInfoTable *info;
+ const StgInfoTable *info_ptr;
+ StgClosure *selectee;
+
+ selectee = p->selectee;
+
+ // Save the real info pointer (NOTE: not the same as get_itbl()).
+ info_ptr = p->header.info;
+
+ // If the THUNK_SELECTOR is in a generation that we are not
+ // collecting, then bail out early. We won't be able to save any
+ // space in any case, and updating with an indirection is trickier
+ // in an old gen.
+ if (Bdescr((StgPtr)p)->gen_no > N) {
+ return NULL;
+ }
+
+ // BLACKHOLE the selector thunk, since it is now under evaluation.
+ // This is important to stop us going into an infinite loop if
+ // this selector thunk eventually refers to itself.
+ SET_INFO(p,&stg_BLACKHOLE_info);
+
+selector_loop:
+
+ info = get_itbl(selectee);
+ switch (info->type) {
+ case CONSTR:
+ case CONSTR_1_0:
+ case CONSTR_0_1:
+ case CONSTR_2_0:
+ case CONSTR_1_1:
+ case CONSTR_0_2:
+ case CONSTR_STATIC:
+ case CONSTR_NOCAF_STATIC:
+ // check that the size is in range
+ ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
+ info->layout.payload.nptrs));
+
+ return selectee->payload[field];
+
+ case IND:
+ case IND_STATIC:
+ case IND_PERM:
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ selectee = ((StgInd *)selectee)->indirectee;
+ goto selector_loop;
+
+ case EVACUATED:
+ // We don't follow pointers into to-space; the constructor
+ // has already been evacuated, so we won't save any space
+ // leaks by evaluating this selector thunk anyhow.
+ break;
+
+ case THUNK_SELECTOR:
+ {
+ StgClosure *val;
+
+ // check that we don't recurse too much, re-using the
+ // depth bound also used in evacuate().
+ thunk_selector_depth++;
+ if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
+ break;
+ }
+
+ val = eval_thunk_selector(info->layout.selector_offset,
+ (StgSelector *)selectee);
+
+ thunk_selector_depth--;
+
+ if (val == NULL) {
+ break;
+ } else {
+ // We evaluated this selector thunk, so update it with
+ // an indirection. NOTE: we don't use UPD_IND here,
+ // because we are guaranteed that p is in a generation
+ // that we are collecting, and we never want to put the
+ // indirection on a mutable list.
+ ((StgInd *)selectee)->indirectee = val;
+ SET_INFO(selectee,&stg_IND_info);
+ selectee = val;
+ goto selector_loop;
+ }
+ }
+
+ case AP_UPD:
+ case THUNK:
+ case THUNK_1_0:
+ case THUNK_0_1:
+ case THUNK_2_0:
+ case THUNK_1_1:
+ case THUNK_0_2:
+ case THUNK_STATIC:
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ case BLACKHOLE_BQ:
+#if defined(PAR)
+ case RBH:
+ case BLOCKED_FETCH:
+# ifdef DIST
+ case REMOTE_REF:
+# endif
+ case FETCH_ME:
+ case FETCH_ME_BQ:
+#endif
+ // not evaluated yet
+ break;
+
+ default:
+ barf("eval_thunk_selector: strange selectee %d",
+ (int)(info->type));
+ }
+
+ // We didn't manage to evaluate this thunk; restore the old info pointer
+ SET_INFO(p, info_ptr);
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
move_TSO is called to update the TSO structure after it has been
moved from one place to another.
-------------------------------------------------------------------------- */
info = get_itbl((StgClosure *)p);
ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
+ ASSERT(thunk_selector_depth == 0);
+
q = p;
switch (info->type) {
}
case IND_PERM:
- if (stp->gen_no != 0) {
- SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
- }
+ if (stp->gen->no != 0) {
+#ifdef PROFILING
+ // @LDV profiling
+ // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
+ // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
+ LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
+#endif
+ //
+ // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ //
+ SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that p has just been created.
+ LDV_recordCreate((StgClosure *)p);
+#endif
+ }
// fall through
case IND_OLDGEN_PERM:
((StgIndOldGen *)p)->indirectee =
}
void
-scavengeCAFs( void )
+markCAFs( evac_fn evac )
{
StgIndStatic *c;
- evac_gen = 0;
for (c = (StgIndStatic *)caf_list; c != NULL;
c = (StgIndStatic *)c->static_link)
{
- c->indirectee = evacuate(c->indirectee);
+ evac(&c->indirectee);
}
}
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh is now dead.
+ LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
+#endif
SET_INFO(bh,&stg_BLACKHOLE_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh has just been created.
+ LDV_recordCreate(bh);
+#endif
}
update_frame = update_frame->link;
}
}
#endif
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh is now dead.
+ LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
+#endif
+ //
+ // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ //
SET_INFO(bh,&stg_BLACKHOLE_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh has just been created.
+ LDV_recordCreate(bh);
+#endif
}
}