/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.72 2000/01/22 18:00:03 simonmar Exp $
+ * $Id: GC.c,v 1.86 2000/11/01 11:41:47 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
#include "SchedAPI.h"
#include "Weak.h"
#include "StablePriv.h"
+#include "Prelude.h"
#if defined(GRAN) || defined(PAR)
# include "GranSimRts.h"
# include "ParallelRts.h"
# include "ParallelDebug.h"
# endif
#endif
-
-StgCAF* enteredCAFs;
+#if defined(GHCI)
+# include "HsFFI.h"
+# include "Linker.h"
+#endif
+#if defined(RTS_GTK_FRONTPANEL)
+#include "FrontPanel.h"
+#endif
//@node STATIC OBJECT LIST, Static function declarations, Includes
//@subsection STATIC OBJECT LIST
*/
static nat evac_gen;
-/* WEAK POINTERS
+/* Weak pointers
*/
static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
static rtsBool weak_done; /* all done for this pass */
+/* List of all threads during GC
+ */
+static StgTSO *old_all_threads;
+static StgTSO *resurrected_threads;
+
/* Flag indicating failure to evacuate an object to the desired
* generation.
*/
static StgClosure * evacuate ( StgClosure *q );
static void zero_static_object_list ( StgClosure* first_static );
static void zero_mutable_list ( StgMutClosure *first );
-static void revert_dead_CAFs ( void );
static rtsBool traverse_weak_ptr_list ( void );
static void cleanup_weak_ptr_list ( StgWeak **list );
-------------------------------------------------------------------------- */
//@cindex GarbageCollect
-void GarbageCollect(void (*get_roots)(void))
+void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
{
bdescr *bd;
step *step;
#if defined(DEBUG) && defined(GRAN)
IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
- Now, Now))
+ Now, Now));
#endif
/* tell the stats department that we've started a GC */
CCCS = CCS_GC;
#endif
- /* Approximate how much we allocated */
+ /* Approximate how much we allocated.
+ * Todo: only when generating stats?
+ */
allocated = calcAllocated();
/* Figure out which generation to collect
*/
- N = 0;
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
- N = g;
+ if (force_major_gc) {
+ N = RtsFlags.GcFlags.generations - 1;
+ major_gc = rtsTrue;
+ } else {
+ N = 0;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
+ N = g;
+ }
}
+ major_gc = (N == RtsFlags.GcFlags.generations-1);
+ }
+
+#ifdef RTS_GTK_FRONTPANEL
+ if (RtsFlags.GcFlags.frontpanel) {
+ updateFrontPanelBeforeGC(N);
}
- major_gc = (N == RtsFlags.GcFlags.generations-1);
+#endif
/* check stack sanity *before* GC (ToDo: check all threads) */
#if defined(GRAN)
// ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
#endif
- IF_DEBUG(sanity, checkFreeListSanity());
+ IF_DEBUG(sanity, checkFreeListSanity());
/* Initialise the static object lists
*/
weak_ptr_list = NULL;
weak_done = rtsFalse;
+ /* The all_threads list is like the weak_ptr_list.
+ * See traverse_weak_ptr_list() for the details.
+ */
+ old_all_threads = all_threads;
+ all_threads = END_TSO_QUEUE;
+ resurrected_threads = END_TSO_QUEUE;
+
/* Mark the stable pointer table.
*/
markStablePtrTable(major_gc);
/* scavenge static objects */
if (major_gc && static_objects != END_OF_STATIC_LIST) {
+ IF_DEBUG(sanity,
+ checkStaticObjects());
scavenge_static();
}
*/
gcStablePtrTable(major_gc);
- /* revert dead CAFs and update enteredCAFs list */
- revert_dead_CAFs();
-
+#if defined(PAR)
+ /* Reconstruct the Global Address tables used in GUM */
+ rebuildGAtables(major_gc);
+ IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
+ IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
+#endif
+
/* Set the maximum blocks for the oldest generation, based on twice
* the amount of live data now, adjusted to fit the maximum heap
* size if necessary.
*/
resetNurseries();
-#if defined(PAR)
- /* Reconstruct the Global Address tables used in GUM */
- RebuildGAtables(major_gc);
-#endif
-
/* start any pending finalizers */
scheduleFinalizers(old_weak_ptr_list);
+ /* send exceptions to any threads which were about to die */
+ resurrectThreads(resurrected_threads);
+
/* check sanity after GC */
IF_DEBUG(sanity, checkSanity(N));
/* check for memory leaks if sanity checking is on */
IF_DEBUG(sanity, memInventory());
+#ifdef RTS_GTK_VISUALS
+ if (RtsFlags.GcFlags.visuals) {
+ updateFrontPanelAfterGC( N, live );
+ }
+#endif
+
/* ok, GC over: tell the stats department what happened. */
stat_endGC(allocated, collected, live, copied, N);
}
continue;
}
}
-
+
+ /* Now deal with the all_threads list, which behaves somewhat like
+ * the weak ptr list. If we discover any threads that are about to
+ * become garbage, we wake them up and administer an exception.
+ */
+ {
+ StgTSO *t, *tmp, *next, **prev;
+
+ prev = &old_all_threads;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+
+ /* Threads which have finished or died get dropped from
+ * the list.
+ */
+ switch (t->what_next) {
+ case ThreadRelocated:
+ next = t->link;
+ *prev = next;
+ continue;
+ case ThreadKilled:
+ case ThreadComplete:
+ next = t->global_link;
+ *prev = next;
+ continue;
+ default:
+ }
+
+ /* Threads which have already been determined to be alive are
+ * moved onto the all_threads list.
+ */
+ (StgClosure *)tmp = isAlive((StgClosure *)t);
+ if (tmp != NULL) {
+ next = tmp->global_link;
+ tmp->global_link = all_threads;
+ all_threads = tmp;
+ *prev = next;
+ } else {
+ prev = &(t->global_link);
+ next = t->global_link;
+ }
+ }
+ }
+
/* If we didn't make any changes, then we can go round and kill all
* the dead weak pointers. The old_weak_ptr list is used as a list
* of pending finalizers later on.
for (w = old_weak_ptr_list; w; w = w->link) {
w->finalizer = evacuate(w->finalizer);
}
+
+ /* And resurrect any threads which were about to become garbage.
+ */
+ {
+ StgTSO *t, *tmp, *next;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->global_link;
+ (StgClosure *)tmp = evacuate((StgClosure *)t);
+ tmp->global_link = resurrected_threads;
+ resurrected_threads = tmp;
+ }
+ }
+
weak_done = rtsTrue;
}
isAlive(StgClosure *p)
{
const StgInfoTable *info;
+ nat size;
while (1) {
* for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
*/
-#if 1 || !defined(PAR)
/* ignore closures in generations that we're not collecting. */
- /* In GUM we use this routine when rebuilding GA tables; for some
- reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */
if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
return p;
}
-#endif
switch (info->type) {
/* alive! */
return ((StgEvacuated *)p)->evacuee;
+ case BCO:
+ size = bco_sizeW((StgBCO*)p);
+ goto large;
+
+ case ARR_WORDS:
+ size = arr_words_sizeW((StgArrWords *)p);
+ goto large;
+
+ case MUT_ARR_PTRS:
+ case MUT_ARR_PTRS_FROZEN:
+ size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
+ goto large;
+
+ case TSO:
+ if (((StgTSO *)p)->what_next == ThreadRelocated) {
+ p = (StgClosure *)((StgTSO *)p)->link;
+ continue;
+ }
+
+ size = tso_sizeW((StgTSO *)p);
+ large:
+ if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
+ && Bdescr((P_)p)->evacuated)
+ return p;
+ else
+ return NULL;
+
default:
/* dead. */
return NULL;
StgClosure *
MarkRoot(StgClosure *root)
{
+# if 0 && defined(PAR) && defined(DEBUG)
+ StgClosure *foo = evacuate(root);
+ // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
+ ASSERT(isAlive(foo)); // must be in to-space
+ return foo;
+# else
return evacuate(root);
+# endif
}
//@cindex addBlock
case IND_PERM:
case IND_OLDGEN:
case IND_OLDGEN_PERM:
- selectee = stgCast(StgInd *,selectee)->indirectee;
+ selectee = ((StgInd *)selectee)->indirectee;
goto selector_loop;
case CAF_ENTERED:
- selectee = stgCast(StgCAF *,selectee)->value;
+ selectee = ((StgCAF *)selectee)->value;
goto selector_loop;
case EVACUATED:
- selectee = stgCast(StgEvacuated*,selectee)->evacuee;
+ selectee = ((StgEvacuated *)selectee)->evacuee;
goto selector_loop;
+ case AP_UPD:
case THUNK:
case THUNK_1_0:
case THUNK_0_1:
case AP_UPD:
case PAP:
- /* these are special - the payload is a copy of a chunk of stack,
- tagging and all. */
- return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
+ /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
+ * of stack, tagging and all.
+ *
+ * They can be larger than a block in size. Both are only
+ * allocated via allocate(), so they should be chained on to the
+ * large_object list.
+ */
+ {
+ nat size = pap_sizeW((StgPAP*)q);
+ if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
+ evacuate_large((P_)q, rtsFalse);
+ return q;
+ } else {
+ return copy(q,size,step);
+ }
+ }
case EVACUATED:
/* Already evacuated, just return the forwarding address.
case ARR_WORDS:
{
- nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
+ nat size = arr_words_sizeW((StgArrWords *)q);
if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
evacuate_large((P_)q, rtsFalse);
case MUT_ARR_PTRS:
case MUT_ARR_PTRS_FROZEN:
{
- nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
+ nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
/* Deal with redirected TSOs (a TSO that's had its stack enlarged).
*/
- if (tso->whatNext == ThreadRelocated) {
+ if (tso->what_next == ThreadRelocated) {
q = (StgClosure *)tso->link;
goto loop;
}
/* relocate the stack pointers... */
new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
new_tso->sp = (StgPtr)new_tso->sp + diff;
- new_tso->splim = (StgPtr)new_tso->splim + diff;
relocate_TSO(tso, new_tso);
* srt field in the info table. That's ok, because we'll
* never dereference it.
*/
- srt = stgCast(StgClosure **,info->srt);
+ srt = (StgClosure **)(info->srt);
srt_end = srt + info->srt_len;
for (; srt < srt_end; srt++) {
/* Special-case to handle references to closures hiding out in DLLs, since
closure that's fixed at link-time, and no extra magic is required.
*/
#ifdef ENABLE_WIN32_DLL_SUPPORT
- if ( stgCast(unsigned long,*srt) & 0x1 ) {
+ if ( (unsigned long)(*srt) & 0x1 ) {
evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
} else {
evacuate(*srt);
(StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
- || tso->why_blocked == BlockedOnException) {
+ || tso->why_blocked == BlockedOnException
+#if defined(PAR)
+ || tso->why_blocked == BlockedOnGA
+ || tso->why_blocked == BlockedOnGA_NoSend
+#endif
+ ) {
tso->block_info.closure = evacuate(tso->block_info.closure);
}
if ( tso->blocked_exceptions != NULL ) {
case BCO:
{
- StgBCO* bco = stgCast(StgBCO*,p);
+ StgBCO* bco = (StgBCO *)p;
nat i;
for (i = 0; i < bco->n_ptrs; i++) {
bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
* evacuate the function pointer too...
*/
{
- StgPAP* pap = stgCast(StgPAP*,p);
+ StgPAP* pap = (StgPAP *)p;
pap->fun = evacuate(pap->fun);
scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
case ARR_WORDS:
/* nothing to follow */
- p += arr_words_sizeW(stgCast(StgArrWords*,p));
+ p += arr_words_sizeW((StgArrWords *)p);
break;
case MUT_ARR_PTRS:
#endif
case EVACUATED:
- barf("scavenge: unimplemented/strange closure type\n");
+ barf("scavenge: unimplemented/strange closure type %d @ %p",
+ info->type, p);
default:
- barf("scavenge");
+ barf("scavenge: unimplemented/strange closure type %d @ %p",
+ info->type, p);
}
/* If we didn't manage to promote all the objects pointed to by
break;
default:
- barf("scavenge_one: strange object");
+ barf("scavenge_one: strange object %d", (int)(info->type));
}
no_luck = failed_to_evac;
{
StgPtr end, q;
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p",
- p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
-
end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
evac_gen = gen->no;
for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
{
StgPtr end, q;
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p",
- p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
-
end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
(StgClosure *)*q = evacuate((StgClosure *)*q);
* it from the mutable list if possible by promoting whatever it
* points to.
*/
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p",
- p, ((StgMutVar *)p)->var, p->mut_link));
-
ASSERT(p->header.info != &MUT_CONS_info);
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
p->mut_link = gen->mut_list;
case MVAR:
{
StgMVar *mvar = (StgMVar *)p;
-
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p",
- mvar, mvar->head, mvar->tail, mvar->value, p->mut_link));
-
(StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
(StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
(StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
case BLACKHOLE_BQ:
{
StgBlockingQueue *bh = (StgBlockingQueue *)p;
-
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p",
- p, p->mut_link));
-
(StgClosure *)bh->blocking_queue =
evacuate((StgClosure *)bh->blocking_queue);
p->mut_link = gen->mut_list;
}
continue;
- // HWL: old PAR code deleted here
+#if defined(PAR)
+ // HWL: check whether all of these are necessary
+
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+ // nat size, ptrs, nonptrs, vhs;
+ // char str[80];
+ // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+ StgRBH *rbh = (StgRBH *)p;
+ (StgClosure *)rbh->blocking_queue =
+ evacuate((StgClosure *)rbh->blocking_queue);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)rbh);
+ }
+ // ToDo: use size of reverted closure here!
+ p += BLACKHOLE_sizeW();
+ break;
+ }
+
+ case BLOCKED_FETCH:
+ {
+ StgBlockedFetch *bf = (StgBlockedFetch *)p;
+ /* follow the pointer to the node which is being demanded */
+ (StgClosure *)bf->node =
+ evacuate((StgClosure *)bf->node);
+ /* follow the link to the rest of the blocking queue */
+ (StgClosure *)bf->link =
+ evacuate((StgClosure *)bf->link);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)bf);
+ }
+ p += sizeofW(StgBlockedFetch);
+ break;
+ }
+
+ case FETCH_ME:
+ p += sizeofW(StgFetchMe);
+ break; // nothing to do in this case
+
+ case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ {
+ StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
+ (StgClosure *)fmbq->blocking_queue =
+ evacuate((StgClosure *)fmbq->blocking_queue);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)fmbq);
+ }
+ p += sizeofW(StgFetchMeBlockingQueue);
+ break;
+ }
+#endif
default:
/* shouldn't have anything else on the mutables list */
}
default:
- barf("scavenge_static");
+ barf("scavenge_static: strange closure %d", (int)(info->type));
}
ASSERT(failed_to_evac == rtsFalse);
- /* get the next static object from the list. Remeber, there might
+ /* get the next static object from the list. Remember, there might
* be more stuff on this list now that we've done some evacuating!
* (static_objects is a global)
*/
const StgInfoTable* info;
StgWord32 bitmap;
- IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
+ //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
/*
* Each time around this loop, we are looking at a chunk of stack
if (! LOOKS_LIKE_GHC_INFO(q) ) {
#ifdef DEBUG
if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
- ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
+ ASSERT(closure_STATIC((StgClosure *)q));
}
/* otherwise, must be a pointer into the allocation space. */
#endif
case STOP_FRAME:
case CATCH_FRAME:
case SEQ_FRAME:
- {
- // StgPtr old_p = p; // debugging only -- HWL
- /* stack frames like these are ordinary closures and therefore may
- contain setup-specific fixed-header words (as in GranSim!);
- therefore, these cases should not use p++ but &(p->payload) -- HWL */
- // IF_DEBUG(gran, IF_DEBUG(sanity, printObj(p)));
- bitmap = info->layout.bitmap;
-
- p = (StgPtr)&(((StgClosure *)p)->payload);
- // IF_DEBUG(sanity, belch("HWL: scavenge_stack: (STOP|CATCH|SEQ)_FRAME adjusting p from %p to %p (instead of %p)", old_p, p, old_p+1));
- goto small_bitmap;
- }
case RET_BCO:
case RET_SMALL:
case RET_VEC_SMALL:
}
default:
- barf("scavenge_stack: weird activation record found on stack.\n");
+ barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
}
}
}
dbl_link_onto(bd, &step->scavenged_large_objects);
p = bd->start;
- info = get_itbl(stgCast(StgClosure*,p));
+ info = get_itbl((StgClosure *)p);
switch (info->type) {
case BCO:
{
- StgBCO* bco = stgCast(StgBCO*,p);
+ StgBCO* bco = (StgBCO *)p;
nat i;
evac_gen = saved_evac_gen;
for (i = 0; i < bco->n_ptrs; i++) {
case TSO:
scavengeTSO((StgTSO *)p);
- // HWL: old PAR code deleted here
continue;
+ case AP_UPD:
+ case PAP:
+ {
+ StgPAP* pap = (StgPAP *)p;
+
+ evac_gen = saved_evac_gen; /* not really mutable */
+ pap->fun = evacuate(pap->fun);
+ scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
+ evac_gen = 0;
+ continue;
+ }
+
default:
- barf("scavenge_large: unknown/strange object");
+ barf("scavenge_large: unknown/strange object %d", (int)(info->type));
}
}
}
void RevertCAFs(void)
{
- while (enteredCAFs != END_CAF_LIST) {
- StgCAF* caf = enteredCAFs;
-
- enteredCAFs = caf->link;
- ASSERT(get_itbl(caf)->type == CAF_ENTERED);
- SET_INFO(caf,&CAF_UNENTERED_info);
- caf->value = stgCast(StgClosure*,0xdeadbeef);
- caf->link = stgCast(StgCAF*,0xdeadbeef);
- }
- enteredCAFs = END_CAF_LIST;
-}
-
-//@cindex revert_dead_CAFs
-
-void revert_dead_CAFs(void)
-{
- StgCAF* caf = enteredCAFs;
- enteredCAFs = END_CAF_LIST;
- while (caf != END_CAF_LIST) {
- StgCAF *next, *new;
- next = caf->link;
- new = (StgCAF*)isAlive((StgClosure*)caf);
- if (new) {
- new->link = enteredCAFs;
- enteredCAFs = new;
- } else {
- /* ASSERT(0); */
- SET_INFO(caf,&CAF_UNENTERED_info);
- caf->value = (StgClosure*)0xdeadbeef;
- caf->link = (StgCAF*)0xdeadbeef;
- }
- caf = next;
- }
+#ifdef INTERPRETER
+ StgInt i;
+
+ /* Deal with CAFs created by compiled code. */
+ for (i = 0; i < usedECafTable; i++) {
+ SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
+ ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
+ }
+
+ /* Deal with CAFs created by the interpreter. */
+ while (ecafList != END_ECAF_LIST) {
+ StgCAF* caf = ecafList;
+ ecafList = caf->link;
+ ASSERT(get_itbl(caf)->type == CAF_ENTERED);
+ SET_INFO(caf,&CAF_UNENTERED_info);
+ caf->value = (StgClosure *)0xdeadbeef;
+ caf->link = (StgCAF *)0xdeadbeef;
+ }
+
+ /* Empty out both the table and the list. */
+ clearECafTable();
+ ecafList = END_ECAF_LIST;
+#endif
}
//@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
switch (get_itbl(update_frame)->type) {
case CATCH_FRAME:
- update_frame = stgCast(StgCatchFrame*,update_frame)->link;
+ update_frame = ((StgCatchFrame *)update_frame)->link;
break;
case UPDATE_FRAME:
break;
case SEQ_FRAME:
- update_frame = stgCast(StgSeqFrame*,update_frame)->link;
+ update_frame = ((StgSeqFrame *)update_frame)->link;
break;
case STOP_FRAME:
* turned on.
* -------------------------------------------------------------------------- */
//@cindex threadPaused
-
void
threadPaused(StgTSO *tso)
{
{
StgMutClosure *p, *next;
- p = gen->saved_mut_list;
+ p = gen->mut_list;
next = p->mut_link;
- fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list);
+ fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
fprintf(stderr, "%p (%s), ",
p, info_type((StgClosure *)p));
}
fputc('\n', stderr);
}
+
+//@cindex maybeLarge
+static inline rtsBool
+maybeLarge(StgClosure *closure)
+{
+ StgInfoTable *info = get_itbl(closure);
+
+ /* closure types that may be found on the new_large_objects list;
+ see scavenge_large */
+ return (info->type == MUT_ARR_PTRS ||
+ info->type == MUT_ARR_PTRS_FROZEN ||
+ info->type == TSO ||
+ info->type == ARR_WORDS ||
+ info->type == BCO);
+}
+
+
#endif /* DEBUG */
//@node Index, , Pausing a thread
//* evacuate_large:: @cindex\s-+evacuate_large
//* gcCAFs:: @cindex\s-+gcCAFs
//* isAlive:: @cindex\s-+isAlive
+//* maybeLarge:: @cindex\s-+maybeLarge
//* mkMutCons:: @cindex\s-+mkMutCons
+//* printMutOnceList:: @cindex\s-+printMutOnceList
+//* printMutableList:: @cindex\s-+printMutableList
//* relocate_TSO:: @cindex\s-+relocate_TSO
-//* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
//* scavenge:: @cindex\s-+scavenge
//* scavenge_large:: @cindex\s-+scavenge_large
//* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list