1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.21 2000/04/14 15:18:06 sewardj Exp $
4 * (c) The GHC Team, 1998-1999
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that the stack looks reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
23 //* Thread Queue Sanity::
24 //* Blackhole Sanity::
27 //@node Includes, Macros
28 //@subsection Includes
32 #ifdef DEBUG /* whole file */
36 #include "BlockAlloc.h"
38 #include "StoragePriv.h" // for END_OF_STATIC_LIST
40 //@node Macros, Stack sanity, Includes
43 #define LOOKS_LIKE_PTR(r) ((LOOKS_LIKE_STATIC_CLOSURE(r) || \
44 ((HEAP_ALLOCED(r) && Bdescr((P_)r)->free != (void *)-1))) && \
45 ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa))
47 //@node Stack sanity, Heap Sanity, Macros
48 //@subsection Stack sanity
50 /* -----------------------------------------------------------------------------
52 -------------------------------------------------------------------------- */
54 StgOffset checkStackClosure( StgClosure* c );
56 StgOffset checkStackObject( StgPtr sp );
58 void checkStackChunk( StgPtr sp, StgPtr stack_end );
60 static StgOffset checkSmallBitmap( StgPtr payload, StgWord32 bitmap );
62 static StgOffset checkLargeBitmap( StgPtr payload,
63 StgLargeBitmap* large_bitmap );
65 void checkClosureShallow( StgClosure* p );
67 //@cindex checkSmallBitmap
69 checkSmallBitmap( StgPtr payload, StgWord32 bitmap )
74 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
75 if ((bitmap & 1) == 0) {
76 checkClosure(stgCast(StgClosure*,payload[i]));
82 //@cindex checkLargeBitmap
84 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap )
90 for (bmp=0; bmp<large_bitmap->size; bmp++) {
91 StgWord32 bitmap = large_bitmap->bitmap[bmp];
92 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
93 if ((bitmap & 1) == 0) {
94 checkClosure(stgCast(StgClosure*,payload[i]));
101 //@cindex checkStackClosure
103 checkStackClosure( StgClosure* c )
105 const StgInfoTable* info = get_itbl(c);
107 /* All activation records have 'bitmap' style layout info. */
108 switch (info->type) {
109 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
111 StgRetDyn* r = (StgRetDyn *)c;
112 return sizeofW(StgRetDyn) +
113 checkSmallBitmap(r->payload,r->liveness);
115 case RET_BCO: /* small bitmap (<= 32 entries) */
118 return 1 + checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
121 ASSERT(LOOKS_LIKE_PTR(((StgUpdateFrame*)c)->updatee));
124 /* check that the link field points to another stack frame */
125 ASSERT(get_itbl(((StgFrame*)c)->link)->type == UPDATE_FRAME ||
126 get_itbl(((StgFrame*)c)->link)->type == CATCH_FRAME ||
127 get_itbl(((StgFrame*)c)->link)->type == STOP_FRAME ||
128 get_itbl(((StgFrame*)c)->link)->type == SEQ_FRAME);
136 checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
137 case RET_BIG: /* large bitmap (> 32 entries) */
139 return 1 + checkLargeBitmap((StgPtr)c + 1,info->layout.large_bitmap);
141 case FUN_STATIC: /* probably a slow-entry point return address: */
142 #if 0 && defined(GRAN)
148 /* if none of the above, maybe it's a closure which looks a
149 * little like an infotable
151 checkClosureShallow(*(StgClosure **)c);
153 /* barf("checkStackClosure: weird activation record found on stack (%p).",c); */
158 * check that it looks like a valid closure - without checking its payload
159 * used to avoid recursion between checking PAPs and checking stack
163 //@cindex checkClosureShallow
165 checkClosureShallow( StgClosure* p )
168 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info)
169 || IS_HUGS_CONSTR_INFO(GET_INFO(p)));
171 /* Is it a static closure (i.e. in the data segment)? */
172 if (LOOKS_LIKE_STATIC(p)) {
173 ASSERT(closure_STATIC(p));
175 ASSERT(!closure_STATIC(p));
176 ASSERT(LOOKS_LIKE_PTR(p));
180 /* check an individual stack object */
181 //@cindex checkStackObject
183 checkStackObject( StgPtr sp )
185 if (IS_ARG_TAG(*sp)) {
186 /* Tagged words might be "stubbed" pointers, so there's no
187 * point checking to see whether they look like pointers or
188 * not (some of them will).
190 return ARG_SIZE(*sp) + 1;
191 } else if (LOOKS_LIKE_GHC_INFO(*stgCast(StgPtr*,sp))) {
192 return checkStackClosure(stgCast(StgClosure*,sp));
193 } else { /* must be an untagged closure pointer in the stack */
194 checkClosureShallow(*stgCast(StgClosure**,sp));
199 /* check sections of stack between update frames */
200 //@cindex checkStackChunk
202 checkStackChunk( StgPtr sp, StgPtr stack_end )
207 while (p < stack_end) {
208 p += checkStackObject( p );
210 // ASSERT( p == stack_end ); -- HWL
213 //@cindex checkStackChunk
215 checkClosure( StgClosure* p )
217 const StgInfoTable *info;
220 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info));
223 /* Is it a static closure (i.e. in the data segment)? */
224 if (LOOKS_LIKE_STATIC(p)) {
225 ASSERT(closure_STATIC(p));
227 ASSERT(!closure_STATIC(p));
228 ASSERT(LOOKS_LIKE_PTR(p));
232 switch (info->type) {
235 StgBCO* bco = stgCast(StgBCO*,p);
237 for(i=0; i < bco->n_ptrs; ++i) {
238 ASSERT(LOOKS_LIKE_PTR(bcoConstPtr(bco,i)));
240 return bco_sizeW(bco);
245 StgMVar *mvar = (StgMVar *)p;
246 ASSERT(LOOKS_LIKE_PTR(mvar->head));
247 ASSERT(LOOKS_LIKE_PTR(mvar->tail));
248 ASSERT(LOOKS_LIKE_PTR(mvar->value));
251 checkBQ((StgBlockingQueueElement *)mvar->head, p);
253 checkBQ(mvar->head, p);
256 return sizeofW(StgMVar);
267 for (i = 0; i < info->layout.payload.ptrs; i++) {
268 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
270 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
274 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
275 /* fall through to basic ptr check */
290 case IND_OLDGEN_PERM:
295 case SE_CAF_BLACKHOLE:
303 case CONSTR_CHARLIKE:
305 case CONSTR_NOCAF_STATIC:
310 for (i = 0; i < info->layout.payload.ptrs; i++) {
311 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
313 return sizeW_fromITBL(info);
316 case IND_STATIC: /* (1, 0) closure */
317 ASSERT(LOOKS_LIKE_PTR(((StgIndStatic*)p)->indirectee));
318 return sizeW_fromITBL(info);
321 /* deal with these specially - the info table isn't
322 * representative of the actual layout.
324 { StgWeak *w = (StgWeak *)p;
325 ASSERT(LOOKS_LIKE_PTR(w->key));
326 ASSERT(LOOKS_LIKE_PTR(w->value));
327 ASSERT(LOOKS_LIKE_PTR(w->finalizer));
329 ASSERT(LOOKS_LIKE_PTR(w->link));
331 return sizeW_fromITBL(info);
335 ASSERT(LOOKS_LIKE_PTR(stgCast(StgSelector*,p)->selectee));
336 return sizeofW(StgHeader) + MIN_UPD_SIZE;
340 /* we don't expect to see any of these after GC
341 * but they might appear during execution
344 StgInd *ind = stgCast(StgInd*,p);
345 ASSERT(LOOKS_LIKE_PTR(ind->indirectee));
346 q = (P_)p + sizeofW(StgInd);
347 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
361 barf("checkClosure: stack frame");
363 case AP_UPD: /* we can treat this as being the same as a PAP */
366 StgPAP *pap = stgCast(StgPAP*,p);
367 ASSERT(LOOKS_LIKE_PTR(pap->fun));
368 checkStackChunk((StgPtr)pap->payload,
369 (StgPtr)pap->payload + pap->n_args
371 return pap_sizeW(pap);
375 return arr_words_sizeW(stgCast(StgArrWords*,p));
378 case MUT_ARR_PTRS_FROZEN:
380 StgMutArrPtrs* a = stgCast(StgMutArrPtrs*,p);
382 for (i = 0; i < a->ptrs; i++) {
383 ASSERT(LOOKS_LIKE_PTR(a->payload[i]));
385 return mut_arr_ptrs_sizeW(a);
389 checkTSO((StgTSO *)p);
390 return tso_sizeW((StgTSO *)p);
395 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
396 ASSERT(LOOKS_LIKE_PTR((((StgBlockedFetch *)p)->node)));
397 return sizeofW(StgBlockedFetch); // see size used in evacuate()
400 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
401 return sizeofW(StgFetchMe); // see size used in evacuate()
404 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
405 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
408 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
409 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
410 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
411 checkBQ(((StgRBH *)p)->blocking_queue, p);
412 ASSERT(LOOKS_LIKE_GHC_INFO(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
413 return BLACKHOLE_sizeW(); // see size used in evacuate()
414 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
419 barf("checkClosure: found EVACUATED closure %d",
422 barf("checkClosure (closure type %d)", info->type);
428 #define PVM_PE_MASK 0xfffc0000
429 #define MAX_PVM_PES MAX_PES
430 #define MAX_PVM_TIDS MAX_PES
431 #define MAX_SLOTS 100000
434 looks_like_tid(StgInt tid)
436 StgInt hi = (tid & PVM_PE_MASK) >> 18;
437 StgInt lo = (tid & ~PVM_PE_MASK);
438 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
443 looks_like_slot(StgInt slot)
445 /* if tid is known better use looks_like_ga!! */
446 rtsBool ok = slot<MAX_SLOTS;
447 // This refers only to the no. of slots on the current PE
448 // rtsBool ok = slot<=highest_slot();
453 looks_like_ga(globalAddr *ga)
455 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
456 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
457 (ga)->payload.gc.slot<=highest_slot() :
458 (ga)->payload.gc.slot<MAX_SLOTS;
459 rtsBool ok = is_tid && is_slot;
465 //@node Heap Sanity, TSO Sanity, Stack sanity
466 //@subsection Heap Sanity
468 /* -----------------------------------------------------------------------------
471 After garbage collection, the live heap is in a state where we can
472 run through and check that all the pointers point to the right
473 place. This function starts at a given position and sanity-checks
474 all the objects in the remainder of the chain.
475 -------------------------------------------------------------------------- */
479 checkHeap(bdescr *bd, StgPtr start)
482 nat xxx = 0; // tmp -- HWL
485 if (bd != NULL) p = bd->start;
491 while (p < bd->free) {
492 nat size = checkClosure(stgCast(StgClosure*,p));
493 /* This is the smallest size of closure that can live in the heap. */
494 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
495 if (get_itbl(stgCast(StgClosure*,p))->type == IND_STATIC)
500 while (p < bd->free &&
501 (*p == 0 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; }
508 fprintf(stderr,"@@@@ checkHeap: Heap ok; %d IND_STATIC closures checked\n",
513 Check heap between start and end. Used after unpacking graphs.
516 checkHeapChunk(StgPtr start, StgPtr end)
521 for (p=start; p<end; p+=size) {
522 ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
523 size = checkClosure(stgCast(StgClosure*,p));
524 /* This is the smallest size of closure that can live in the heap. */
525 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
531 checkChain(bdescr *bd)
534 checkClosure((StgClosure *)bd->start);
539 /* check stack - making sure that update frames are linked correctly */
542 checkStack(StgPtr sp, StgPtr stack_end, StgUpdateFrame* su )
544 /* check everything down to the first update frame */
545 checkStackChunk( sp, stgCast(StgPtr,su) );
546 while ( stgCast(StgPtr,su) < stack_end) {
547 sp = stgCast(StgPtr,su);
548 switch (get_itbl(su)->type) {
553 su = stgCast(StgSeqFrame*,su)->link;
556 su = stgCast(StgCatchFrame*,su)->link;
559 /* not quite: ASSERT(stgCast(StgPtr,su) == stack_end); */
562 barf("checkStack: weird record found on update frame list.");
564 checkStackChunk( sp, stgCast(StgPtr,su) );
566 ASSERT(stgCast(StgPtr,su) == stack_end);
569 //@node TSO Sanity, Thread Queue Sanity, Heap Sanity
570 //@subsection TSO Sanity
574 checkTSO(StgTSO *tso)
577 StgPtr stack = tso->stack;
578 StgUpdateFrame* su = tso->su;
579 StgOffset stack_size = tso->stack_size;
580 StgPtr stack_end = stack + stack_size;
582 if (tso->what_next == ThreadRelocated) {
587 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
588 /* The garbage collector doesn't bother following any pointers
589 * from dead threads, so don't check sanity here.
594 ASSERT(stack <= sp && sp < stack_end);
595 ASSERT(sp <= stgCast(StgPtr,su));
598 ASSERT(tso->par.magic==TSO_MAGIC);
600 switch (tso->why_blocked) {
602 checkClosureShallow(tso->block_info.closure);
603 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
604 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
606 case BlockedOnGA_NoSend:
607 checkClosureShallow(tso->block_info.closure);
608 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
610 case BlockedOnBlackHole:
611 checkClosureShallow(tso->block_info.closure);
612 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
613 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
614 get_itbl(tso->block_info.closure)->type==RBH);
619 /* isOnBQ(blocked_queue) */
621 case BlockedOnException:
622 /* isOnSomeBQ(tso) */
623 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
626 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
630 Could check other values of why_blocked but I am more
631 lazy than paranoid (bad combination) -- HWL
635 /* if the link field is non-nil it most point to one of these
636 three closure types */
637 ASSERT(tso->link == END_TSO_QUEUE ||
638 get_itbl(tso->link)->type == TSO ||
639 get_itbl(tso->link)->type == BLOCKED_FETCH ||
640 get_itbl(tso->link)->type == CONSTR);
643 checkStack(sp, stack_end, su);
647 //@cindex checkTSOsSanity
649 checkTSOsSanity(void) {
653 belch("Checking sanity of all runnable TSOs:");
655 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
656 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
657 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
659 fprintf(stderr, "OK, ");
664 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
667 //@node Thread Queue Sanity, Blackhole Sanity, TSO Sanity
668 //@subsection Thread Queue Sanity
672 //@cindex checkThreadQSanity
674 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
678 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
679 ASSERT(run_queue_hds[proc]!=NULL);
680 ASSERT(run_queue_tls[proc]!=NULL);
681 /* if either head or tail is NIL then the other one must be NIL, too */
682 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
683 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
684 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
686 prev=tso, tso=tso->link) {
687 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
688 (prev==END_TSO_QUEUE || prev->link==tso));
692 ASSERT(prev==run_queue_tls[proc]);
695 //@cindex checkThreadQsSanity
697 checkThreadQsSanity (rtsBool check_TSO_too)
701 for (p=0; p<RtsFlags.GranFlags.proc; p++)
702 checkThreadQSanity(p, check_TSO_too);
707 Check that all TSOs have been evacuated.
708 Optionally also check the sanity of the TSOs.
711 checkGlobalTSOList (rtsBool checkTSOs)
713 extern StgTSO *all_threads;
715 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
716 ASSERT(Bdescr((P_)tso)->evacuated == 1);
722 //@node Blackhole Sanity, GALA table sanity, Thread Queue Sanity
723 //@subsection Blackhole Sanity
725 /* -----------------------------------------------------------------------------
726 Check Blackhole Sanity
728 Test whether an object is already on the update list.
729 It isn't necessarily an rts error if it is - it might be a programming
732 Future versions might be able to test for a blackhole without traversing
733 the update frame list.
735 -------------------------------------------------------------------------- */
736 //@cindex isBlackhole
738 isBlackhole( StgTSO* tso, StgClosure* p )
740 StgUpdateFrame* su = tso->su;
742 switch (get_itbl(su)->type) {
744 if (su->updatee == p) {
751 su = stgCast(StgSeqFrame*,su)->link;
754 su = stgCast(StgCatchFrame*,su)->link;
759 barf("isBlackhole: weird record found on update frame list.");
765 Check the static objects list.
768 checkStaticObjects ( void ) {
769 extern StgClosure* static_objects;
770 StgClosure *p = static_objects;
773 while (p != END_OF_STATIC_LIST) {
776 switch (info->type) {
779 StgClosure *indirectee = stgCast(StgIndStatic*,p)->indirectee;
781 ASSERT(LOOKS_LIKE_PTR(indirectee));
782 ASSERT(LOOKS_LIKE_GHC_INFO(indirectee->header.info));
783 p = IND_STATIC_LINK((StgClosure *)p);
788 p = THUNK_STATIC_LINK((StgClosure *)p);
792 p = FUN_STATIC_LINK((StgClosure *)p);
796 p = STATIC_LINK(info,(StgClosure *)p);
800 barf("checkStaticObjetcs: strange closure %p (%s)",
807 Check the sanity of a blocking queue starting at bqe with closure being
808 the closure holding the blocking queue.
809 Note that in GUM we can have several different closure types in a
815 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
817 rtsBool end = rtsFalse;
818 StgInfoTable *info = get_itbl(closure);
820 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
821 || info->type == FETCH_ME_BQ || info->type == RBH);
824 switch (get_itbl(bqe)->type) {
827 checkClosure((StgClosure *)bqe);
829 end = (bqe==END_BQ_QUEUE);
833 checkClosure((StgClosure *)bqe);
838 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
839 get_itbl(bqe)->type, closure, info_type(closure));
845 checkBQ (StgTSO *bqe, StgClosure *closure)
847 rtsBool end = rtsFalse;
848 StgInfoTable *info = get_itbl(closure);
850 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
853 switch (get_itbl(bqe)->type) {
856 checkClosure((StgClosure *)bqe);
858 end = (bqe==END_BQ_QUEUE);
862 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
863 get_itbl(bqe)->type, closure, info_type(closure));
869 checkBQ (StgTSO *bqe, StgClosure *closure)
871 rtsBool end = rtsFalse;
872 StgInfoTable *info = get_itbl(closure);
874 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
877 switch (get_itbl(bqe)->type) {
879 checkClosure((StgClosure *)bqe);
881 end = (bqe==END_TSO_QUEUE);
885 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
886 get_itbl(bqe)->type, closure, info->type);
894 //@node GALA table sanity, Index, Blackhole Sanity
895 //@subsection GALA table sanity
898 This routine checks the sanity of the LAGA and GALA tables. They are
899 implemented as lists through one hash table, LAtoGALAtable, because entries
900 in both tables have the same structure:
901 - the LAGA table maps local addresses to global addresses; it starts
902 with liveIndirections
903 - the GALA table maps global addresses to local addresses; it starts
910 /* hidden in parallel/Global.c; only accessed for testing here */
911 extern GALA *liveIndirections;
912 extern GALA *liveRemoteGAs;
913 extern HashTable *LAtoGALAtable;
915 //@cindex checkLAGAtable
917 checkLAGAtable(rtsBool check_closures)
920 nat n=0, m=0; // debugging
922 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
924 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
925 ASSERT(!gala->preferred || gala == gala0);
926 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
927 ASSERT(gala->next!=gala); // detect direct loops
929 if ( check_closures ) {
930 checkClosure(stgCast(StgClosure*,gala->la));
935 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
937 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
938 ASSERT(!gala->preferred || gala == gala0);
939 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
940 ASSERT(gala->next!=gala); // detect direct loops
942 if ( check_closures ) {
943 checkClosure(stgCast(StgClosure*,gala->la));
950 //@node Index, , GALA table sanity
956 //* checkBQ:: @cindex\s-+checkBQ
957 //* checkChain:: @cindex\s-+checkChain
958 //* checkClosureShallow:: @cindex\s-+checkClosureShallow
959 //* checkHeap:: @cindex\s-+checkHeap
960 //* checkLargeBitmap:: @cindex\s-+checkLargeBitmap
961 //* checkSmallBitmap:: @cindex\s-+checkSmallBitmap
962 //* checkStack:: @cindex\s-+checkStack
963 //* checkStackChunk:: @cindex\s-+checkStackChunk
964 //* checkStackChunk:: @cindex\s-+checkStackChunk
965 //* checkStackClosure:: @cindex\s-+checkStackClosure
966 //* checkStackObject:: @cindex\s-+checkStackObject
967 //* checkTSO:: @cindex\s-+checkTSO
968 //* checkTSOsSanity:: @cindex\s-+checkTSOsSanity
969 //* checkThreadQSanity:: @cindex\s-+checkThreadQSanity
970 //* checkThreadQsSanity:: @cindex\s-+checkThreadQsSanity
971 //* isBlackhole:: @cindex\s-+isBlackhole