/* -----------------------------------------------------------------------------
- * $Id: Sanity.c,v 1.6 1999/01/19 16:56:50 simonm Exp $
+ * $Id: Sanity.c,v 1.20 2000/04/12 09:34:46 sewardj Exp $
+ *
+ * (c) The GHC Team, 1998-1999
*
* Sanity checking code for the heap and stack.
*
*
* ---------------------------------------------------------------------------*/
+//@menu
+//* Includes::
+//* Macros::
+//* Stack sanity::
+//* Heap Sanity::
+//* TSO Sanity::
+//* Thread Queue Sanity::
+//* Blackhole Sanity::
+//@end menu
+
+//@node Includes, Macros
+//@subsection Includes
+
#include "Rts.h"
-#ifdef DEBUG
+#ifdef DEBUG /* whole file */
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "BlockAlloc.h"
#include "Sanity.h"
+#include "StoragePriv.h" // for END_OF_STATIC_LIST
+
+//@node Macros, Stack sanity, Includes
+//@subsection Macros
+
+#define LOOKS_LIKE_PTR(r) ((LOOKS_LIKE_STATIC_CLOSURE(r) || \
+ ((HEAP_ALLOCED(r) && Bdescr((P_)r)->free != (void *)-1))) && \
+ ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa))
-#define LOOKS_LIKE_PTR(r) \
- (IS_DATA_PTR(r) || ((IS_USER_PTR(r) && Bdescr((P_)r)->free != (void *)-1)))
+//@node Stack sanity, Heap Sanity, Macros
+//@subsection Stack sanity
/* -----------------------------------------------------------------------------
Check stack sanity
void checkStackChunk( StgPtr sp, StgPtr stack_end );
-static StgOffset checkSmallBitmap( StgPtr payload, StgNat32 bitmap );
+static StgOffset checkSmallBitmap( StgPtr payload, StgWord32 bitmap );
static StgOffset checkLargeBitmap( StgPtr payload,
StgLargeBitmap* large_bitmap );
void checkClosureShallow( StgClosure* p );
+//@cindex checkSmallBitmap
static StgOffset
-checkSmallBitmap( StgPtr payload, StgNat32 bitmap )
+checkSmallBitmap( StgPtr payload, StgWord32 bitmap )
{
StgOffset i;
return i;
}
-
+//@cindex checkLargeBitmap
static StgOffset
checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap )
{
- StgNat32 bmp;
+ StgWord32 bmp;
StgOffset i;
i = 0;
for (bmp=0; bmp<large_bitmap->size; bmp++) {
- StgNat32 bitmap = large_bitmap->bitmap[bmp];
+ StgWord32 bitmap = large_bitmap->bitmap[bmp];
for(; bitmap != 0; ++i, bitmap >>= 1 ) {
if ((bitmap & 1) == 0) {
checkClosure(stgCast(StgClosure*,payload[i]));
return i;
}
+//@cindex checkStackClosure
StgOffset
checkStackClosure( StgClosure* c )
{
switch (info->type) {
case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
{
- StgRetDyn* r = stgCast(StgRetDyn*,c);
+ StgRetDyn* r = (StgRetDyn *)c;
return sizeofW(StgRetDyn) +
checkSmallBitmap(r->payload,r->liveness);
}
case RET_BCO: /* small bitmap (<= 32 entries) */
case RET_SMALL:
case RET_VEC_SMALL:
+ return 1 + checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
+
case UPDATE_FRAME:
+ ASSERT(LOOKS_LIKE_PTR(((StgUpdateFrame*)c)->updatee));
case CATCH_FRAME:
- case STOP_FRAME:
case SEQ_FRAME:
- return sizeofW(StgClosure) +
- checkSmallBitmap((StgPtr)c->payload,info->layout.bitmap);
+ /* check that the link field points to another stack frame */
+ ASSERT(get_itbl(((StgFrame*)c)->link)->type == UPDATE_FRAME ||
+ get_itbl(((StgFrame*)c)->link)->type == CATCH_FRAME ||
+ get_itbl(((StgFrame*)c)->link)->type == STOP_FRAME ||
+ get_itbl(((StgFrame*)c)->link)->type == SEQ_FRAME);
+ /* fall through */
+ case STOP_FRAME:
+#if defined(GRAN)
+ return 2 +
+#else
+ return 1 +
+#endif
+ checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
case RET_BIG: /* large bitmap (> 32 entries) */
case RET_VEC_BIG:
- return sizeofW(StgClosure) +
- checkLargeBitmap((StgPtr)c->payload,
- info->layout.large_bitmap);
+ return 1 + checkLargeBitmap((StgPtr)c + 1,info->layout.large_bitmap);
case FUN:
case FUN_STATIC: /* probably a slow-entry point return address: */
- return 1;
+#if 0 && defined(GRAN)
+ return 2;
+#else
+ return 1;
+#endif
default:
/* if none of the above, maybe it's a closure which looks a
* little like an infotable
*/
- checkClosureShallow(*stgCast(StgClosure**,c));
+ checkClosureShallow(*(StgClosure **)c);
return 1;
/* barf("checkStackClosure: weird activation record found on stack (%p).",c); */
}
* chunks.
*/
+//@cindex checkClosureShallow
void
checkClosureShallow( StgClosure* p )
{
- ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info));
+ ASSERT(p);
+ ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info)
+ || IS_HUGS_CONSTR_INFO(GET_INFO(p)));
/* Is it a static closure (i.e. in the data segment)? */
if (LOOKS_LIKE_STATIC(p)) {
}
/* check an individual stack object */
+//@cindex checkStackObject
StgOffset
checkStackObject( StgPtr sp )
{
}
/* check sections of stack between update frames */
+//@cindex checkStackChunk
void
checkStackChunk( StgPtr sp, StgPtr stack_end )
{
while (p < stack_end) {
p += checkStackObject( p );
}
- ASSERT( p == stack_end );
+ // ASSERT( p == stack_end ); -- HWL
}
+//@cindex checkStackChunk
StgOffset
checkClosure( StgClosure* p )
{
ASSERT(LOOKS_LIKE_PTR(mvar->head));
ASSERT(LOOKS_LIKE_PTR(mvar->tail));
ASSERT(LOOKS_LIKE_PTR(mvar->value));
+#if 0
+#if defined(PAR)
+ checkBQ((StgBlockingQueueElement *)mvar->head, p);
+#else
+ checkBQ(mvar->head, p);
+#endif
+#endif
return sizeofW(StgMVar);
}
- case FUN:
case THUNK:
+ case THUNK_1_0:
+ case THUNK_0_1:
+ case THUNK_1_1:
+ case THUNK_0_2:
+ case THUNK_2_0:
+ {
+ nat i;
+ for (i = 0; i < info->layout.payload.ptrs; i++) {
+ ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
+ }
+ return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
+ }
+
+ case BLACKHOLE_BQ:
+ checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
+ /* fall through to basic ptr check */
+ case FUN:
+ case FUN_1_0:
+ case FUN_0_1:
+ case FUN_1_1:
+ case FUN_0_2:
+ case FUN_2_0:
case CONSTR:
+ case CONSTR_1_0:
+ case CONSTR_0_1:
+ case CONSTR_1_1:
+ case CONSTR_0_2:
+ case CONSTR_2_0:
case IND_PERM:
case IND_OLDGEN:
case IND_OLDGEN_PERM:
case CAF_UNENTERED:
case CAF_ENTERED:
case CAF_BLACKHOLE:
+#ifdef TICKY_TICKY
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+#endif
case BLACKHOLE:
- case BLACKHOLE_BQ:
case FOREIGN:
+ case STABLE_NAME:
case MUT_VAR:
case CONSTR_INTLIKE:
case CONSTR_CHARLIKE:
case CONSTR_NOCAF_STATIC:
case THUNK_STATIC:
case FUN_STATIC:
- case IND_STATIC:
{
nat i;
for (i = 0; i < info->layout.payload.ptrs; i++) {
- ASSERT(LOOKS_LIKE_PTR(payloadPtr(p,i)));
+ ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
}
return sizeW_fromITBL(info);
}
+ case IND_STATIC: /* (1, 0) closure */
+ ASSERT(LOOKS_LIKE_PTR(((StgIndStatic*)p)->indirectee));
+ return sizeW_fromITBL(info);
+
case WEAK:
/* deal with these specially - the info table isn't
* representative of the actual layout.
{ StgWeak *w = (StgWeak *)p;
ASSERT(LOOKS_LIKE_PTR(w->key));
ASSERT(LOOKS_LIKE_PTR(w->value));
- ASSERT(LOOKS_LIKE_PTR(w->finaliser));
+ ASSERT(LOOKS_LIKE_PTR(w->finalizer));
if (w->link) {
ASSERT(LOOKS_LIKE_PTR(w->link));
}
}
case ARR_WORDS:
- case MUT_ARR_WORDS:
return arr_words_sizeW(stgCast(StgArrWords*,p));
case MUT_ARR_PTRS:
checkTSO((StgTSO *)p);
return tso_sizeW((StgTSO *)p);
+#if defined(PAR)
+
case BLOCKED_FETCH:
+ ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
+ ASSERT(LOOKS_LIKE_PTR((((StgBlockedFetch *)p)->node)));
+ return sizeofW(StgBlockedFetch); // see size used in evacuate()
+
case FETCH_ME:
+ ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
+ return sizeofW(StgFetchMe); // see size used in evacuate()
+
+ case FETCH_ME_BQ:
+ checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
+ return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
+
+ case RBH:
+ /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
+ ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
+ if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
+ checkBQ(((StgRBH *)p)->blocking_queue, p);
+ ASSERT(LOOKS_LIKE_GHC_INFO(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
+ return BLACKHOLE_sizeW(); // see size used in evacuate()
+ // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
+
+#endif
+
case EVACUATED:
- barf("checkClosure: unimplemented/strange closure type");
+ barf("checkClosure: found EVACUATED closure %d",
+ info->type);
default:
- barf("checkClosure");
+ barf("checkClosure (closure type %d)", info->type);
}
-#undef LOOKS_LIKE_PTR
}
+#if defined(PAR)
+
+#define PVM_PE_MASK 0xfffc0000
+#define MAX_PVM_PES MAX_PES
+#define MAX_PVM_TIDS MAX_PES
+#define MAX_SLOTS 100000
+
+rtsBool
+looks_like_tid(StgInt tid)
+{
+ StgInt hi = (tid & PVM_PE_MASK) >> 18;
+ StgInt lo = (tid & ~PVM_PE_MASK);
+ rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
+ return ok;
+}
+
+rtsBool
+looks_like_slot(StgInt slot)
+{
+ /* if tid is known better use looks_like_ga!! */
+ rtsBool ok = slot<MAX_SLOTS;
+ // This refers only to the no. of slots on the current PE
+ // rtsBool ok = slot<=highest_slot();
+ return ok;
+}
+
+rtsBool
+looks_like_ga(globalAddr *ga)
+{
+ rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
+ rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
+ (ga)->payload.gc.slot<=highest_slot() :
+ (ga)->payload.gc.slot<MAX_SLOTS;
+ rtsBool ok = is_tid && is_slot;
+ return ok;
+}
+
+#endif
+
+//@node Heap Sanity, TSO Sanity, Stack sanity
+//@subsection Heap Sanity
+
/* -----------------------------------------------------------------------------
Check Heap Sanity
all the objects in the remainder of the chain.
-------------------------------------------------------------------------- */
+//@cindex checkHeap
extern void
checkHeap(bdescr *bd, StgPtr start)
{
StgPtr p;
+ nat xxx = 0; // tmp -- HWL
if (start == NULL) {
p = bd->start;
nat size = checkClosure(stgCast(StgClosure*,p));
/* This is the smallest size of closure that can live in the heap. */
ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
+ if (get_itbl(stgCast(StgClosure*,p))->type == IND_STATIC)
+ xxx++;
p += size;
/* skip over slop */
while (p < bd->free &&
- (*p == 0 || !LOOKS_LIKE_GHC_INFO(*p))) { p++; }
+ (*p == 0 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; }
}
bd = bd->link;
if (bd != NULL) {
p = bd->start;
}
}
+ fprintf(stderr,"@@@@ checkHeap: Heap ok; %d IND_STATIC closures checked\n",
+ xxx);
+}
+
+/*
+ Check heap between start and end. Used after unpacking graphs.
+*/
+extern void
+checkHeapChunk(StgPtr start, StgPtr end)
+{
+ StgPtr p;
+ nat size;
+
+ for (p=start; p<end; p+=size) {
+ ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
+ size = checkClosure(stgCast(StgClosure*,p));
+ /* This is the smallest size of closure that can live in the heap. */
+ ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
+ }
}
+//@cindex checkChain
extern void
checkChain(bdescr *bd)
{
}
/* check stack - making sure that update frames are linked correctly */
+//@cindex checkStack
void
checkStack(StgPtr sp, StgPtr stack_end, StgUpdateFrame* su )
{
ASSERT(stgCast(StgPtr,su) == stack_end);
}
+//@node TSO Sanity, Thread Queue Sanity, Heap Sanity
+//@subsection TSO Sanity
+
+//@cindex checkTSO
extern void
checkTSO(StgTSO *tso)
{
StgOffset stack_size = tso->stack_size;
StgPtr stack_end = stack + stack_size;
- if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
+ if (tso->what_next == ThreadRelocated) {
+ checkTSO(tso->link);
+ return;
+ }
+
+ if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
/* The garbage collector doesn't bother following any pointers
* from dead threads, so don't check sanity here.
*/
ASSERT(stack <= sp && sp < stack_end);
ASSERT(sp <= stgCast(StgPtr,su));
+#if defined(PAR)
+ ASSERT(tso->par.magic==TSO_MAGIC);
+
+ switch (tso->why_blocked) {
+ case BlockedOnGA:
+ checkClosureShallow(tso->block_info.closure);
+ ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
+ get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
+ break;
+ case BlockedOnGA_NoSend:
+ checkClosureShallow(tso->block_info.closure);
+ ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
+ break;
+ case BlockedOnBlackHole:
+ checkClosureShallow(tso->block_info.closure);
+ ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
+ get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
+ get_itbl(tso->block_info.closure)->type==RBH);
+ break;
+ case BlockedOnRead:
+ case BlockedOnWrite:
+ case BlockedOnDelay:
+ /* isOnBQ(blocked_queue) */
+ break;
+ case BlockedOnException:
+ /* isOnSomeBQ(tso) */
+ ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
+ break;
+ case BlockedOnMVar:
+ ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
+ break;
+ default:
+ /*
+ Could check other values of why_blocked but I am more
+ lazy than paranoid (bad combination) -- HWL
+ */
+ }
+
+ /* if the link field is non-nil it most point to one of these
+ three closure types */
+ ASSERT(tso->link == END_TSO_QUEUE ||
+ get_itbl(tso->link)->type == TSO ||
+ get_itbl(tso->link)->type == BLOCKED_FETCH ||
+ get_itbl(tso->link)->type == CONSTR);
+#endif
+
checkStack(sp, stack_end, su);
}
+#if defined(GRAN)
+//@cindex checkTSOsSanity
+extern void
+checkTSOsSanity(void) {
+ nat i, tsos;
+ StgTSO *tso;
+
+ belch("Checking sanity of all runnable TSOs:");
+
+ for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
+ for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
+ fprintf(stderr, "TSO %p on PE %d ...", tso, i);
+ checkTSO(tso);
+ fprintf(stderr, "OK, ");
+ tsos++;
+ }
+ }
+
+ belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
+}
+
+//@node Thread Queue Sanity, Blackhole Sanity, TSO Sanity
+//@subsection Thread Queue Sanity
+
+// still GRAN only
+
+//@cindex checkThreadQSanity
+extern rtsBool
+checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
+{
+ StgTSO *tso, *prev;
+
+ /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
+ ASSERT(run_queue_hds[proc]!=NULL);
+ ASSERT(run_queue_tls[proc]!=NULL);
+ /* if either head or tail is NIL then the other one must be NIL, too */
+ ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
+ ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
+ for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
+ tso!=END_TSO_QUEUE;
+ prev=tso, tso=tso->link) {
+ ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
+ (prev==END_TSO_QUEUE || prev->link==tso));
+ if (check_TSO_too)
+ checkTSO(tso);
+ }
+ ASSERT(prev==run_queue_tls[proc]);
+}
+
+//@cindex checkThreadQsSanity
+extern rtsBool
+checkThreadQsSanity (rtsBool check_TSO_too)
+{
+ PEs p;
+
+ for (p=0; p<RtsFlags.GranFlags.proc; p++)
+ checkThreadQSanity(p, check_TSO_too);
+}
+#endif /* GRAN */
+
+/*
+ Check that all TSOs have been evacuated.
+ Optionally also check the sanity of the TSOs.
+*/
+void
+checkGlobalTSOList (rtsBool checkTSOs)
+{
+ extern StgTSO *all_threads;
+ StgTSO *tso;
+ for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
+ ASSERT(Bdescr((P_)tso)->evacuated == 1);
+ if (checkTSOs)
+ checkTSO(tso);
+ }
+}
+
+//@node Blackhole Sanity, GALA table sanity, Thread Queue Sanity
+//@subsection Blackhole Sanity
+
/* -----------------------------------------------------------------------------
Check Blackhole Sanity
the update frame list.
-------------------------------------------------------------------------- */
-rtsBool isBlackhole( StgTSO* tso, StgClosure* p )
+//@cindex isBlackhole
+rtsBool
+isBlackhole( StgTSO* tso, StgClosure* p )
{
StgUpdateFrame* su = tso->su;
do {
} while (1);
}
+/*
+ Check the static objects list.
+*/
+extern void
+checkStaticObjects ( void ) {
+ extern StgClosure* static_objects;
+ StgClosure *p = static_objects;
+ StgInfoTable *info;
+
+ while (p != END_OF_STATIC_LIST) {
+ checkClosure(p);
+ info = get_itbl(p);
+ switch (info->type) {
+ case IND_STATIC:
+ {
+ StgClosure *indirectee = stgCast(StgIndStatic*,p)->indirectee;
+
+ ASSERT(LOOKS_LIKE_PTR(indirectee));
+ ASSERT(LOOKS_LIKE_GHC_INFO(indirectee->header.info));
+ p = IND_STATIC_LINK((StgClosure *)p);
+ break;
+ }
+
+ case THUNK_STATIC:
+ p = THUNK_STATIC_LINK((StgClosure *)p);
+ break;
+
+ case FUN_STATIC:
+ p = FUN_STATIC_LINK((StgClosure *)p);
+ break;
+
+ case CONSTR_STATIC:
+ p = STATIC_LINK(info,(StgClosure *)p);
+ break;
+
+ default:
+ barf("checkStaticObjetcs: strange closure %p (%s)",
+ p, info_type(p));
+ }
+ }
+}
+
+/*
+ Check the sanity of a blocking queue starting at bqe with closure being
+ the closure holding the blocking queue.
+ Note that in GUM we can have several different closure types in a
+ blocking queue
+*/
+//@cindex checkBQ
+#if defined(PAR)
+void
+checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
+{
+ rtsBool end = rtsFalse;
+ StgInfoTable *info = get_itbl(closure);
+
+ ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
+ || info->type == FETCH_ME_BQ || info->type == RBH);
+
+ do {
+ switch (get_itbl(bqe)->type) {
+ case BLOCKED_FETCH:
+ case TSO:
+ checkClosure((StgClosure *)bqe);
+ bqe = bqe->link;
+ end = (bqe==END_BQ_QUEUE);
+ break;
+
+ case CONSTR:
+ checkClosure((StgClosure *)bqe);
+ end = rtsTrue;
+ break;
+
+ default:
+ barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
+ get_itbl(bqe)->type, closure, info_type(closure));
+ }
+ } while (!end);
+}
+#elif defined(GRAN)
+void
+checkBQ (StgTSO *bqe, StgClosure *closure)
+{
+ rtsBool end = rtsFalse;
+ StgInfoTable *info = get_itbl(closure);
+
+ ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
+
+ do {
+ switch (get_itbl(bqe)->type) {
+ case BLOCKED_FETCH:
+ case TSO:
+ checkClosure((StgClosure *)bqe);
+ bqe = bqe->link;
+ end = (bqe==END_BQ_QUEUE);
+ break;
+
+ default:
+ barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
+ get_itbl(bqe)->type, closure, info_type(closure));
+ }
+ } while (!end);
+}
+#else
+void
+checkBQ (StgTSO *bqe, StgClosure *closure)
+{
+ rtsBool end = rtsFalse;
+ StgInfoTable *info = get_itbl(closure);
+
+ ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
+
+ do {
+ switch (get_itbl(bqe)->type) {
+ case TSO:
+ checkClosure((StgClosure *)bqe);
+ bqe = bqe->link;
+ end = (bqe==END_TSO_QUEUE);
+ break;
+
+ default:
+ barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
+ get_itbl(bqe)->type, closure, info->type);
+ }
+ } while (!end);
+}
+
+#endif
+
+
+//@node GALA table sanity, Index, Blackhole Sanity
+//@subsection GALA table sanity
+
+/*
+ This routine checks the sanity of the LAGA and GALA tables. They are
+ implemented as lists through one hash table, LAtoGALAtable, because entries
+ in both tables have the same structure:
+ - the LAGA table maps local addresses to global addresses; it starts
+ with liveIndirections
+ - the GALA table maps global addresses to local addresses; it starts
+ with liveRemoteGAs
+*/
+
+#if defined(PAR)
+#include "Hash.h"
+
+/* hidden in parallel/Global.c; only accessed for testing here */
+extern GALA *liveIndirections;
+extern GALA *liveRemoteGAs;
+extern HashTable *LAtoGALAtable;
+
+//@cindex checkLAGAtable
+void
+checkLAGAtable(rtsBool check_closures)
+{
+ GALA *gala, *gala0;
+ nat n=0, m=0; // debugging
+
+ for (gala = liveIndirections; gala != NULL; gala = gala->next) {
+ n++;
+ gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
+ ASSERT(!gala->preferred || gala == gala0);
+ ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
+ ASSERT(gala->next!=gala); // detect direct loops
+ /*
+ if ( check_closures ) {
+ checkClosure(stgCast(StgClosure*,gala->la));
+ }
+ */
+ }
+
+ for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
+ m++;
+ gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
+ ASSERT(!gala->preferred || gala == gala0);
+ ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
+ ASSERT(gala->next!=gala); // detect direct loops
+ /*
+ if ( check_closures ) {
+ checkClosure(stgCast(StgClosure*,gala->la));
+ }
+ */
+ }
+}
+#endif
+
+//@node Index, , GALA table sanity
+//@subsection Index
+
#endif /* DEBUG */
+
+//@index
+//* checkBQ:: @cindex\s-+checkBQ
+//* checkChain:: @cindex\s-+checkChain
+//* checkClosureShallow:: @cindex\s-+checkClosureShallow
+//* checkHeap:: @cindex\s-+checkHeap
+//* checkLargeBitmap:: @cindex\s-+checkLargeBitmap
+//* checkSmallBitmap:: @cindex\s-+checkSmallBitmap
+//* checkStack:: @cindex\s-+checkStack
+//* checkStackChunk:: @cindex\s-+checkStackChunk
+//* checkStackChunk:: @cindex\s-+checkStackChunk
+//* checkStackClosure:: @cindex\s-+checkStackClosure
+//* checkStackObject:: @cindex\s-+checkStackObject
+//* checkTSO:: @cindex\s-+checkTSO
+//* checkTSOsSanity:: @cindex\s-+checkTSOsSanity
+//* checkThreadQSanity:: @cindex\s-+checkThreadQSanity
+//* checkThreadQsSanity:: @cindex\s-+checkThreadQsSanity
+//* isBlackhole:: @cindex\s-+isBlackhole
+//@end index