#include "Apply.h"
#include "Printer.h"
#include "Arena.h"
+#include "RetainerProfile.h"
/* -----------------------------------------------------------------------------
Forward decls.
static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
static void checkClosureShallow ( StgClosure * );
+static void checkSTACK (StgStack *stack);
/* -----------------------------------------------------------------------------
Check stack sanity
case CATCH_STM_FRAME:
case CATCH_FRAME:
// small bitmap cases (<= 32 entries)
+ case UNDERFLOW_FRAME:
case STOP_FRAME:
case RET_SMALL:
size = BITMAP_SIZE(info->i.layout.bitmap);
case CONSTR_0_2:
case CONSTR_2_0:
case IND_PERM:
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
case BLACKHOLE:
- case CAF_BLACKHOLE:
case PRIM:
case MUT_PRIM:
case MUT_VAR_CLEAN:
return sizeW_fromITBL(info);
}
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ // NO: the BH might have been updated now
+ // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
+
+ ASSERT(get_itbl(bq->owner)->type == TSO);
+ ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
+ || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
+ ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
+ get_itbl(bq->link)->type == IND ||
+ get_itbl(bq->link)->type == BLOCKING_QUEUE);
+
+ return sizeofW(StgBlockingQueue);
+ }
+
case BCO: {
StgBCO *bco = (StgBCO *)p;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
case RET_BIG:
case RET_DYN:
case UPDATE_FRAME:
+ case UNDERFLOW_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
case ATOMICALLY_FRAME:
case TSO:
checkTSO((StgTSO *)p);
- return tso_sizeW((StgTSO *)p);
+ return sizeofW(StgTSO);
+
+ case STACK:
+ checkSTACK((StgStack*)p);
+ return stack_sizeW((StgStack*)p);
case TREC_CHUNK:
{
#endif
for (; bd != NULL; bd = bd->link) {
- p = bd->start;
- while (p < bd->free) {
- nat size = checkClosure((StgClosure *)p);
- /* This is the smallest size of closure that can live in the heap */
- ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
- p += size;
+ if(!(bd->flags & BF_SWEPT)) {
+ p = bd->start;
+ while (p < bd->free) {
+ nat size = checkClosure((StgClosure *)p);
+ /* This is the smallest size of closure that can live in the heap */
+ ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
+ p += size;
- /* skip over slop */
- while (p < bd->free &&
- (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
+ /* skip over slop */
+ while (p < bd->free &&
+ (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
+ }
}
}
}
}
}
-void
-checkTSO(StgTSO *tso)
+static void
+checkSTACK (StgStack *stack)
{
- StgPtr sp = tso->sp;
- StgPtr stack = tso->stack;
- StgOffset stack_size = tso->stack_size;
- StgPtr stack_end = stack + stack_size;
+ StgPtr sp = stack->sp;
+ StgOffset stack_size = stack->stack_size;
+ StgPtr stack_end = stack->stack + stack_size;
- if (tso->what_next == ThreadRelocated) {
- checkTSO(tso->_link);
- return;
- }
+ ASSERT(stack->stack <= sp && sp <= stack_end);
+
+ checkStackChunk(sp, stack_end);
+}
+void
+checkTSO(StgTSO *tso)
+{
if (tso->what_next == ThreadKilled) {
/* The garbage collector doesn't bother following any pointers
* from dead threads, so don't check sanity here.
return;
}
- ASSERT(stack <= sp && sp < stack_end);
+ ASSERT(tso->_link == END_TSO_QUEUE ||
+ tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
+ tso->_link->header.info == &stg_TSO_info);
- checkStackChunk(sp, stack_end);
+ if ( tso->why_blocked == BlockedOnMVar
+ || tso->why_blocked == BlockedOnBlackHole
+ || tso->why_blocked == BlockedOnMsgThrowTo
+ || tso->why_blocked == NotBlocked
+ ) {
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
+ }
+
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
+
+ // XXX are we checking the stack twice?
+ checkSTACK(tso->stackobj);
}
-/*
+/*
Check that all TSOs have been evacuated.
Optionally also check the sanity of the TSOs.
*/
if (checkTSOs)
checkTSO(tso);
- while (tso->what_next == ThreadRelocated) {
- tso = tso->_link;
- }
-
// If this TSO is dirty and in an old generation, it better
// be on the mutable list.
- if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
+ if (tso->dirty) {
ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
tso->flags &= ~TSO_MARKED;
}
reportUnmarkedBlocks();
}
+void
+checkRunQueue(Capability *cap)
+{
+ StgTSO *prev, *tso;
+ prev = END_TSO_QUEUE;
+ for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
+ prev = tso, tso = tso->_link) {
+ ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
+ ASSERT(tso->block_info.prev == prev);
+ }
+ ASSERT(cap->run_queue_tl == prev);
+}
/* -----------------------------------------------------------------------------
Memory leak detection