#include "Apply.h"
#include "Printer.h"
#include "Arena.h"
+#include "RetainerProfile.h"
/* -----------------------------------------------------------------------------
Forward decls.
static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
static void checkClosureShallow ( StgClosure * );
+static void checkSTACK (StgStack *stack);
/* -----------------------------------------------------------------------------
Check stack sanity
case CATCH_STM_FRAME:
case CATCH_FRAME:
// small bitmap cases (<= 32 entries)
+ case UNDERFLOW_FRAME:
case STOP_FRAME:
case RET_SMALL:
size = BITMAP_SIZE(info->i.layout.bitmap);
case CONSTR_0_2:
case CONSTR_2_0:
case IND_PERM:
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
case BLACKHOLE:
- case CAF_BLACKHOLE:
- case STABLE_NAME:
+ case PRIM:
+ case MUT_PRIM:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case CONSTR_STATIC:
return sizeW_fromITBL(info);
}
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ // NO: the BH might have been updated now
+ // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
+
+ ASSERT(get_itbl(bq->owner)->type == TSO);
+ ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
+ || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
+ ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
+ get_itbl(bq->link)->type == IND ||
+ get_itbl(bq->link)->type == BLOCKING_QUEUE);
+
+ return sizeofW(StgBlockingQueue);
+ }
+
case BCO: {
StgBCO *bco = (StgBCO *)p;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
case RET_BIG:
case RET_DYN:
case UPDATE_FRAME:
+ case UNDERFLOW_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
case ATOMICALLY_FRAME:
case TSO:
checkTSO((StgTSO *)p);
- return tso_sizeW((StgTSO *)p);
-
- case TVAR_WATCH_QUEUE:
- {
- StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
- return sizeofW(StgTVarWatchQueue);
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
- return sizeofW(StgInvariantCheckQueue);
- }
+ return sizeofW(StgTSO);
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
- return sizeofW(StgAtomicInvariant);
- }
-
- case TVAR:
- {
- StgTVar *tv = (StgTVar *)p;
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
- return sizeofW(StgTVar);
- }
+ case STACK:
+ checkSTACK((StgStack*)p);
+ return stack_sizeW((StgStack*)p);
case TREC_CHUNK:
{
}
return sizeofW(StgTRecChunk);
}
-
- case TREC_HEADER:
- {
- StgTRecHeader *trec = (StgTRecHeader *)p;
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
- return sizeofW(StgTRecHeader);
- }
default:
barf("checkClosure (closure type %d)", info->type);
#endif
for (; bd != NULL; bd = bd->link) {
- p = bd->start;
- while (p < bd->free) {
- nat size = checkClosure((StgClosure *)p);
- /* This is the smallest size of closure that can live in the heap */
- ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
- p += size;
+ if(!(bd->flags & BF_SWEPT)) {
+ p = bd->start;
+ while (p < bd->free) {
+ nat size = checkClosure((StgClosure *)p);
+ /* This is the smallest size of closure that can live in the heap */
+ ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
+ p += size;
- /* skip over slop */
- while (p < bd->free &&
- (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
+ /* skip over slop */
+ while (p < bd->free &&
+ (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
+ }
}
}
}
}
}
-void
-checkTSO(StgTSO *tso)
+static void
+checkSTACK (StgStack *stack)
{
- StgPtr sp = tso->sp;
- StgPtr stack = tso->stack;
- StgOffset stack_size = tso->stack_size;
- StgPtr stack_end = stack + stack_size;
+ StgPtr sp = stack->sp;
+ StgOffset stack_size = stack->stack_size;
+ StgPtr stack_end = stack->stack + stack_size;
- if (tso->what_next == ThreadRelocated) {
- checkTSO(tso->_link);
- return;
- }
+ ASSERT(stack->stack <= sp && sp <= stack_end);
+
+ checkStackChunk(sp, stack_end);
+}
+void
+checkTSO(StgTSO *tso)
+{
if (tso->what_next == ThreadKilled) {
/* The garbage collector doesn't bother following any pointers
* from dead threads, so don't check sanity here.
return;
}
- ASSERT(stack <= sp && sp < stack_end);
+ ASSERT(tso->_link == END_TSO_QUEUE ||
+ tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
+ tso->_link->header.info == &stg_TSO_info);
- checkStackChunk(sp, stack_end);
+ if ( tso->why_blocked == BlockedOnMVar
+ || tso->why_blocked == BlockedOnBlackHole
+ || tso->why_blocked == BlockedOnMsgThrowTo
+ || tso->why_blocked == NotBlocked
+ ) {
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
+ }
+
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
+
+ // XXX are we checking the stack twice?
+ checkSTACK(tso->stackobj);
}
-/*
+/*
Check that all TSOs have been evacuated.
Optionally also check the sanity of the TSOs.
*/
checkGlobalTSOList (rtsBool checkTSOs)
{
StgTSO *tso;
- nat s;
+ nat g;
- for (s = 0; s < total_steps; s++) {
- for (tso=all_steps[s].threads; tso != END_TSO_QUEUE;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (tso=generations[g].threads; tso != END_TSO_QUEUE;
tso = tso->global_link) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
ASSERT(get_itbl(tso)->type == TSO);
if (checkTSOs)
checkTSO(tso);
- while (tso->what_next == ThreadRelocated) {
- tso = tso->_link;
- }
-
// If this TSO is dirty and in an old generation, it better
// be on the mutable list.
- if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
+ if (tso->dirty) {
ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
tso->flags &= ~TSO_MARKED;
}
/* Nursery sanity check */
void
-checkNurserySanity( step *stp )
+checkNurserySanity (nursery *nursery)
{
bdescr *bd, *prev;
nat blocks = 0;
prev = NULL;
- for (bd = stp->blocks; bd != NULL; bd = bd->link) {
+ for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
ASSERT(bd->u.back == prev);
prev = bd;
blocks += bd->blocks;
}
- ASSERT(blocks == stp->n_blocks);
- ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks);
+ ASSERT(blocks == nursery->n_blocks);
}
void
checkSanity( rtsBool check_heap )
{
- nat g, s;
+ nat g, n;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- ASSERT(countBlocks(generations[g].steps[s].blocks)
- == generations[g].steps[s].n_blocks);
- ASSERT(countBlocks(generations[g].steps[s].large_objects)
- == generations[g].steps[s].n_large_blocks);
- if (check_heap) {
- checkHeap(generations[g].steps[s].blocks);
- }
- checkLargeObjects(generations[g].steps[s].large_objects);
+ ASSERT(countBlocks(generations[g].blocks)
+ == generations[g].n_blocks);
+ ASSERT(countBlocks(generations[g].large_objects)
+ == generations[g].n_large_blocks);
+ if (check_heap) {
+ checkHeap(generations[g].blocks);
}
+ checkLargeObjects(generations[g].large_objects);
}
- for (s = 0; s < n_capabilities; s++) {
- checkNurserySanity(&nurseries[s]);
+ for (n = 0; n < n_capabilities; n++) {
+ checkNurserySanity(&nurseries[n]);
}
checkFreeListSanity();
static void
findMemoryLeak (void)
{
- nat g, s, i;
+ nat g, i;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (i = 0; i < n_capabilities; i++) {
markBlocks(capabilities[i].mut_lists[g]);
}
markBlocks(generations[g].mut_list);
- for (s = 0; s < generations[g].n_steps; s++) {
- markBlocks(generations[g].steps[s].blocks);
- markBlocks(generations[g].steps[s].large_objects);
- }
+ markBlocks(generations[g].blocks);
+ markBlocks(generations[g].large_objects);
}
for (i = 0; i < n_capabilities; i++) {
markBlocks(nurseries[i].blocks);
- markBlocks(nurseries[i].large_objects);
}
#ifdef PROFILING
reportUnmarkedBlocks();
}
+void
+checkRunQueue(Capability *cap)
+{
+ StgTSO *prev, *tso;
+ prev = END_TSO_QUEUE;
+ for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
+ prev = tso, tso = tso->_link) {
+ ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
+ ASSERT(tso->block_info.prev == prev);
+ }
+ ASSERT(cap->run_queue_tl == prev);
+}
/* -----------------------------------------------------------------------------
Memory leak detection
}
static lnat
-stepBlocks (step *stp)
+genBlocks (generation *gen)
{
- ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
- ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks);
- return stp->n_blocks + stp->n_old_blocks +
- countAllocdBlocks(stp->large_objects);
+ ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
+ ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
+ return gen->n_blocks + gen->n_old_blocks +
+ countAllocdBlocks(gen->large_objects);
}
void
memInventory (rtsBool show)
{
- nat g, s, i;
- step *stp;
+ nat g, i;
lnat gen_blocks[RtsFlags.GcFlags.generations];
lnat nursery_blocks, retainer_blocks,
arena_blocks, exec_blocks;
gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
}
gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
- for (s = 0; s < generations[g].n_steps; s++) {
- stp = &generations[g].steps[s];
- gen_blocks[g] += stepBlocks(stp);
- }
+ gen_blocks[g] += genBlocks(&generations[g]);
}
nursery_blocks = 0;
for (i = 0; i < n_capabilities; i++) {
- nursery_blocks += stepBlocks(&nurseries[i]);
+ ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
+ nursery_blocks += nurseries[i].n_blocks;
}
retainer_blocks = 0;