1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.19 2000/03/31 03:09:36 hwloidl Exp $
4 * (c) The GHC Team, 1998-1999
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that the stack looks reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
23 //* Thread Queue Sanity::
24 //* Blackhole Sanity::
27 //@node Includes, Macros
28 //@subsection Includes
32 #ifdef DEBUG /* whole file */
36 #include "BlockAlloc.h"
38 #include "StoragePriv.h" // for END_OF_STATIC_LIST
40 //@node Macros, Stack sanity, Includes
43 #define LOOKS_LIKE_PTR(r) ((LOOKS_LIKE_STATIC_CLOSURE(r) || \
44 ((HEAP_ALLOCED(r) && Bdescr((P_)r)->free != (void *)-1))) && \
45 ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa))
47 //@node Stack sanity, Heap Sanity, Macros
48 //@subsection Stack sanity
50 /* -----------------------------------------------------------------------------
52 -------------------------------------------------------------------------- */
54 StgOffset checkStackClosure( StgClosure* c );
56 StgOffset checkStackObject( StgPtr sp );
58 void checkStackChunk( StgPtr sp, StgPtr stack_end );
60 static StgOffset checkSmallBitmap( StgPtr payload, StgWord32 bitmap );
62 static StgOffset checkLargeBitmap( StgPtr payload,
63 StgLargeBitmap* large_bitmap );
65 void checkClosureShallow( StgClosure* p );
67 //@cindex checkSmallBitmap
69 checkSmallBitmap( StgPtr payload, StgWord32 bitmap )
74 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
75 if ((bitmap & 1) == 0) {
76 checkClosure(stgCast(StgClosure*,payload[i]));
82 //@cindex checkLargeBitmap
84 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap )
90 for (bmp=0; bmp<large_bitmap->size; bmp++) {
91 StgWord32 bitmap = large_bitmap->bitmap[bmp];
92 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
93 if ((bitmap & 1) == 0) {
94 checkClosure(stgCast(StgClosure*,payload[i]));
101 //@cindex checkStackClosure
103 checkStackClosure( StgClosure* c )
105 const StgInfoTable* info = get_itbl(c);
107 /* All activation records have 'bitmap' style layout info. */
108 switch (info->type) {
109 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
111 StgRetDyn* r = (StgRetDyn *)c;
112 return sizeofW(StgRetDyn) +
113 checkSmallBitmap(r->payload,r->liveness);
115 case RET_BCO: /* small bitmap (<= 32 entries) */
118 return 1 + checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
121 ASSERT(LOOKS_LIKE_PTR(((StgUpdateFrame*)c)->updatee));
124 /* check that the link field points to another stack frame */
125 ASSERT(get_itbl(((StgFrame*)c)->link)->type == UPDATE_FRAME ||
126 get_itbl(((StgFrame*)c)->link)->type == CATCH_FRAME ||
127 get_itbl(((StgFrame*)c)->link)->type == STOP_FRAME ||
128 get_itbl(((StgFrame*)c)->link)->type == SEQ_FRAME);
136 checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
137 case RET_BIG: /* large bitmap (> 32 entries) */
139 return 1 + checkLargeBitmap((StgPtr)c + 1,info->layout.large_bitmap);
141 case FUN_STATIC: /* probably a slow-entry point return address: */
142 #if 0 && defined(GRAN)
148 /* if none of the above, maybe it's a closure which looks a
149 * little like an infotable
151 checkClosureShallow(*(StgClosure **)c);
153 /* barf("checkStackClosure: weird activation record found on stack (%p).",c); */
158 * check that it looks like a valid closure - without checking its payload
159 * used to avoid recursion between checking PAPs and checking stack
163 //@cindex checkClosureShallow
165 checkClosureShallow( StgClosure* p )
167 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info));
169 /* Is it a static closure (i.e. in the data segment)? */
170 if (LOOKS_LIKE_STATIC(p)) {
171 ASSERT(closure_STATIC(p));
173 ASSERT(!closure_STATIC(p));
174 ASSERT(LOOKS_LIKE_PTR(p));
178 /* check an individual stack object */
179 //@cindex checkStackObject
181 checkStackObject( StgPtr sp )
183 if (IS_ARG_TAG(*sp)) {
184 /* Tagged words might be "stubbed" pointers, so there's no
185 * point checking to see whether they look like pointers or
186 * not (some of them will).
188 return ARG_SIZE(*sp) + 1;
189 } else if (LOOKS_LIKE_GHC_INFO(*stgCast(StgPtr*,sp))) {
190 return checkStackClosure(stgCast(StgClosure*,sp));
191 } else { /* must be an untagged closure pointer in the stack */
192 checkClosureShallow(*stgCast(StgClosure**,sp));
197 /* check sections of stack between update frames */
198 //@cindex checkStackChunk
200 checkStackChunk( StgPtr sp, StgPtr stack_end )
205 while (p < stack_end) {
206 p += checkStackObject( p );
208 // ASSERT( p == stack_end ); -- HWL
211 //@cindex checkStackChunk
213 checkClosure( StgClosure* p )
215 const StgInfoTable *info;
218 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info));
221 /* Is it a static closure (i.e. in the data segment)? */
222 if (LOOKS_LIKE_STATIC(p)) {
223 ASSERT(closure_STATIC(p));
225 ASSERT(!closure_STATIC(p));
226 ASSERT(LOOKS_LIKE_PTR(p));
230 switch (info->type) {
233 StgBCO* bco = stgCast(StgBCO*,p);
235 for(i=0; i < bco->n_ptrs; ++i) {
236 ASSERT(LOOKS_LIKE_PTR(bcoConstPtr(bco,i)));
238 return bco_sizeW(bco);
243 StgMVar *mvar = (StgMVar *)p;
244 ASSERT(LOOKS_LIKE_PTR(mvar->head));
245 ASSERT(LOOKS_LIKE_PTR(mvar->tail));
246 ASSERT(LOOKS_LIKE_PTR(mvar->value));
249 checkBQ((StgBlockingQueueElement *)mvar->head, p);
251 checkBQ(mvar->head, p);
254 return sizeofW(StgMVar);
265 for (i = 0; i < info->layout.payload.ptrs; i++) {
266 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
268 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
272 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
273 /* fall through to basic ptr check */
288 case IND_OLDGEN_PERM:
293 case SE_CAF_BLACKHOLE:
301 case CONSTR_CHARLIKE:
303 case CONSTR_NOCAF_STATIC:
308 for (i = 0; i < info->layout.payload.ptrs; i++) {
309 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
311 return sizeW_fromITBL(info);
314 case IND_STATIC: /* (1, 0) closure */
315 ASSERT(LOOKS_LIKE_PTR(((StgIndStatic*)p)->indirectee));
316 return sizeW_fromITBL(info);
319 /* deal with these specially - the info table isn't
320 * representative of the actual layout.
322 { StgWeak *w = (StgWeak *)p;
323 ASSERT(LOOKS_LIKE_PTR(w->key));
324 ASSERT(LOOKS_LIKE_PTR(w->value));
325 ASSERT(LOOKS_LIKE_PTR(w->finalizer));
327 ASSERT(LOOKS_LIKE_PTR(w->link));
329 return sizeW_fromITBL(info);
333 ASSERT(LOOKS_LIKE_PTR(stgCast(StgSelector*,p)->selectee));
334 return sizeofW(StgHeader) + MIN_UPD_SIZE;
338 /* we don't expect to see any of these after GC
339 * but they might appear during execution
342 StgInd *ind = stgCast(StgInd*,p);
343 ASSERT(LOOKS_LIKE_PTR(ind->indirectee));
344 q = (P_)p + sizeofW(StgInd);
345 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
359 barf("checkClosure: stack frame");
361 case AP_UPD: /* we can treat this as being the same as a PAP */
364 StgPAP *pap = stgCast(StgPAP*,p);
365 ASSERT(LOOKS_LIKE_PTR(pap->fun));
366 checkStackChunk((StgPtr)pap->payload,
367 (StgPtr)pap->payload + pap->n_args
369 return pap_sizeW(pap);
373 return arr_words_sizeW(stgCast(StgArrWords*,p));
376 case MUT_ARR_PTRS_FROZEN:
378 StgMutArrPtrs* a = stgCast(StgMutArrPtrs*,p);
380 for (i = 0; i < a->ptrs; i++) {
381 ASSERT(LOOKS_LIKE_PTR(a->payload[i]));
383 return mut_arr_ptrs_sizeW(a);
387 checkTSO((StgTSO *)p);
388 return tso_sizeW((StgTSO *)p);
393 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
394 ASSERT(LOOKS_LIKE_PTR((((StgBlockedFetch *)p)->node)));
395 return sizeofW(StgBlockedFetch); // see size used in evacuate()
398 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
399 return sizeofW(StgFetchMe); // see size used in evacuate()
402 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
403 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
406 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
407 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
408 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
409 checkBQ(((StgRBH *)p)->blocking_queue, p);
410 ASSERT(LOOKS_LIKE_GHC_INFO(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
411 return BLACKHOLE_sizeW(); // see size used in evacuate()
412 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
417 barf("checkClosure: found EVACUATED closure %d",
420 barf("checkClosure (closure type %d)", info->type);
426 #define PVM_PE_MASK 0xfffc0000
427 #define MAX_PVM_PES MAX_PES
428 #define MAX_PVM_TIDS MAX_PES
429 #define MAX_SLOTS 100000
432 looks_like_tid(StgInt tid)
434 StgInt hi = (tid & PVM_PE_MASK) >> 18;
435 StgInt lo = (tid & ~PVM_PE_MASK);
436 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
441 looks_like_slot(StgInt slot)
443 /* if tid is known better use looks_like_ga!! */
444 rtsBool ok = slot<MAX_SLOTS;
445 // This refers only to the no. of slots on the current PE
446 // rtsBool ok = slot<=highest_slot();
451 looks_like_ga(globalAddr *ga)
453 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
454 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
455 (ga)->payload.gc.slot<=highest_slot() :
456 (ga)->payload.gc.slot<MAX_SLOTS;
457 rtsBool ok = is_tid && is_slot;
463 //@node Heap Sanity, TSO Sanity, Stack sanity
464 //@subsection Heap Sanity
466 /* -----------------------------------------------------------------------------
469 After garbage collection, the live heap is in a state where we can
470 run through and check that all the pointers point to the right
471 place. This function starts at a given position and sanity-checks
472 all the objects in the remainder of the chain.
473 -------------------------------------------------------------------------- */
477 checkHeap(bdescr *bd, StgPtr start)
480 nat xxx = 0; // tmp -- HWL
489 while (p < bd->free) {
490 nat size = checkClosure(stgCast(StgClosure*,p));
491 /* This is the smallest size of closure that can live in the heap. */
492 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
493 if (get_itbl(stgCast(StgClosure*,p))->type == IND_STATIC)
498 while (p < bd->free &&
499 (*p == 0 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; }
506 fprintf(stderr,"@@@@ checkHeap: Heap ok; %d IND_STATIC closures checked\n",
511 Check heap between start and end. Used after unpacking graphs.
514 checkHeapChunk(StgPtr start, StgPtr end)
519 for (p=start; p<end; p+=size) {
520 ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
521 size = checkClosure(stgCast(StgClosure*,p));
522 /* This is the smallest size of closure that can live in the heap. */
523 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
529 checkChain(bdescr *bd)
532 checkClosure((StgClosure *)bd->start);
537 /* check stack - making sure that update frames are linked correctly */
540 checkStack(StgPtr sp, StgPtr stack_end, StgUpdateFrame* su )
542 /* check everything down to the first update frame */
543 checkStackChunk( sp, stgCast(StgPtr,su) );
544 while ( stgCast(StgPtr,su) < stack_end) {
545 sp = stgCast(StgPtr,su);
546 switch (get_itbl(su)->type) {
551 su = stgCast(StgSeqFrame*,su)->link;
554 su = stgCast(StgCatchFrame*,su)->link;
557 /* not quite: ASSERT(stgCast(StgPtr,su) == stack_end); */
560 barf("checkStack: weird record found on update frame list.");
562 checkStackChunk( sp, stgCast(StgPtr,su) );
564 ASSERT(stgCast(StgPtr,su) == stack_end);
567 //@node TSO Sanity, Thread Queue Sanity, Heap Sanity
568 //@subsection TSO Sanity
572 checkTSO(StgTSO *tso)
575 StgPtr stack = tso->stack;
576 StgUpdateFrame* su = tso->su;
577 StgOffset stack_size = tso->stack_size;
578 StgPtr stack_end = stack + stack_size;
580 if (tso->what_next == ThreadRelocated) {
585 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
586 /* The garbage collector doesn't bother following any pointers
587 * from dead threads, so don't check sanity here.
592 ASSERT(stack <= sp && sp < stack_end);
593 ASSERT(sp <= stgCast(StgPtr,su));
596 ASSERT(tso->par.magic==TSO_MAGIC);
598 switch (tso->why_blocked) {
600 checkClosureShallow(tso->block_info.closure);
601 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
602 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
604 case BlockedOnGA_NoSend:
605 checkClosureShallow(tso->block_info.closure);
606 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
608 case BlockedOnBlackHole:
609 checkClosureShallow(tso->block_info.closure);
610 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
611 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
612 get_itbl(tso->block_info.closure)->type==RBH);
617 /* isOnBQ(blocked_queue) */
619 case BlockedOnException:
620 /* isOnSomeBQ(tso) */
621 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
624 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
628 Could check other values of why_blocked but I am more
629 lazy than paranoid (bad combination) -- HWL
633 /* if the link field is non-nil it most point to one of these
634 three closure types */
635 ASSERT(tso->link == END_TSO_QUEUE ||
636 get_itbl(tso->link)->type == TSO ||
637 get_itbl(tso->link)->type == BLOCKED_FETCH ||
638 get_itbl(tso->link)->type == CONSTR);
641 checkStack(sp, stack_end, su);
645 //@cindex checkTSOsSanity
647 checkTSOsSanity(void) {
651 belch("Checking sanity of all runnable TSOs:");
653 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
654 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
655 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
657 fprintf(stderr, "OK, ");
662 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
665 //@node Thread Queue Sanity, Blackhole Sanity, TSO Sanity
666 //@subsection Thread Queue Sanity
670 //@cindex checkThreadQSanity
672 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
676 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
677 ASSERT(run_queue_hds[proc]!=NULL);
678 ASSERT(run_queue_tls[proc]!=NULL);
679 /* if either head or tail is NIL then the other one must be NIL, too */
680 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
681 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
682 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
684 prev=tso, tso=tso->link) {
685 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
686 (prev==END_TSO_QUEUE || prev->link==tso));
690 ASSERT(prev==run_queue_tls[proc]);
693 //@cindex checkThreadQsSanity
695 checkThreadQsSanity (rtsBool check_TSO_too)
699 for (p=0; p<RtsFlags.GranFlags.proc; p++)
700 checkThreadQSanity(p, check_TSO_too);
705 Check that all TSOs have been evacuated.
706 Optionally also check the sanity of the TSOs.
709 checkGlobalTSOList (rtsBool checkTSOs)
711 extern StgTSO *all_threads;
713 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
714 ASSERT(Bdescr((P_)tso)->evacuated == 1);
720 //@node Blackhole Sanity, GALA table sanity, Thread Queue Sanity
721 //@subsection Blackhole Sanity
723 /* -----------------------------------------------------------------------------
724 Check Blackhole Sanity
726 Test whether an object is already on the update list.
727 It isn't necessarily an rts error if it is - it might be a programming
730 Future versions might be able to test for a blackhole without traversing
731 the update frame list.
733 -------------------------------------------------------------------------- */
734 //@cindex isBlackhole
736 isBlackhole( StgTSO* tso, StgClosure* p )
738 StgUpdateFrame* su = tso->su;
740 switch (get_itbl(su)->type) {
742 if (su->updatee == p) {
749 su = stgCast(StgSeqFrame*,su)->link;
752 su = stgCast(StgCatchFrame*,su)->link;
757 barf("isBlackhole: weird record found on update frame list.");
763 Check the static objects list.
766 checkStaticObjects ( void ) {
767 extern StgClosure* static_objects;
768 StgClosure *p = static_objects;
771 while (p != END_OF_STATIC_LIST) {
774 switch (info->type) {
777 StgClosure *indirectee = stgCast(StgIndStatic*,p)->indirectee;
779 ASSERT(LOOKS_LIKE_PTR(indirectee));
780 ASSERT(LOOKS_LIKE_GHC_INFO(indirectee->header.info));
781 p = IND_STATIC_LINK((StgClosure *)p);
786 p = THUNK_STATIC_LINK((StgClosure *)p);
790 p = FUN_STATIC_LINK((StgClosure *)p);
794 p = STATIC_LINK(info,(StgClosure *)p);
798 barf("checkStaticObjetcs: strange closure %p (%s)",
805 Check the sanity of a blocking queue starting at bqe with closure being
806 the closure holding the blocking queue.
807 Note that in GUM we can have several different closure types in a
813 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
815 rtsBool end = rtsFalse;
816 StgInfoTable *info = get_itbl(closure);
818 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
819 || info->type == FETCH_ME_BQ || info->type == RBH);
822 switch (get_itbl(bqe)->type) {
825 checkClosure((StgClosure *)bqe);
827 end = (bqe==END_BQ_QUEUE);
831 checkClosure((StgClosure *)bqe);
836 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
837 get_itbl(bqe)->type, closure, info_type(closure));
843 checkBQ (StgTSO *bqe, StgClosure *closure)
845 rtsBool end = rtsFalse;
846 StgInfoTable *info = get_itbl(closure);
848 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
851 switch (get_itbl(bqe)->type) {
854 checkClosure((StgClosure *)bqe);
856 end = (bqe==END_BQ_QUEUE);
860 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
861 get_itbl(bqe)->type, closure, info_type(closure));
867 checkBQ (StgTSO *bqe, StgClosure *closure)
869 rtsBool end = rtsFalse;
870 StgInfoTable *info = get_itbl(closure);
872 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
875 switch (get_itbl(bqe)->type) {
877 checkClosure((StgClosure *)bqe);
879 end = (bqe==END_TSO_QUEUE);
883 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
884 get_itbl(bqe)->type, closure, info->type);
892 //@node GALA table sanity, Index, Blackhole Sanity
893 //@subsection GALA table sanity
896 This routine checks the sanity of the LAGA and GALA tables. They are
897 implemented as lists through one hash table, LAtoGALAtable, because entries
898 in both tables have the same structure:
899 - the LAGA table maps local addresses to global addresses; it starts
900 with liveIndirections
901 - the GALA table maps global addresses to local addresses; it starts
908 /* hidden in parallel/Global.c; only accessed for testing here */
909 extern GALA *liveIndirections;
910 extern GALA *liveRemoteGAs;
911 extern HashTable *LAtoGALAtable;
913 //@cindex checkLAGAtable
915 checkLAGAtable(rtsBool check_closures)
918 nat n=0, m=0; // debugging
920 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
922 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
923 ASSERT(!gala->preferred || gala == gala0);
924 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
925 ASSERT(gala->next!=gala); // detect direct loops
927 if ( check_closures ) {
928 checkClosure(stgCast(StgClosure*,gala->la));
933 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
935 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
936 ASSERT(!gala->preferred || gala == gala0);
937 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
938 ASSERT(gala->next!=gala); // detect direct loops
940 if ( check_closures ) {
941 checkClosure(stgCast(StgClosure*,gala->la));
948 //@node Index, , GALA table sanity
954 //* checkBQ:: @cindex\s-+checkBQ
955 //* checkChain:: @cindex\s-+checkChain
956 //* checkClosureShallow:: @cindex\s-+checkClosureShallow
957 //* checkHeap:: @cindex\s-+checkHeap
958 //* checkLargeBitmap:: @cindex\s-+checkLargeBitmap
959 //* checkSmallBitmap:: @cindex\s-+checkSmallBitmap
960 //* checkStack:: @cindex\s-+checkStack
961 //* checkStackChunk:: @cindex\s-+checkStackChunk
962 //* checkStackChunk:: @cindex\s-+checkStackChunk
963 //* checkStackClosure:: @cindex\s-+checkStackClosure
964 //* checkStackObject:: @cindex\s-+checkStackObject
965 //* checkTSO:: @cindex\s-+checkTSO
966 //* checkTSOsSanity:: @cindex\s-+checkTSOsSanity
967 //* checkThreadQSanity:: @cindex\s-+checkThreadQSanity
968 //* checkThreadQsSanity:: @cindex\s-+checkThreadQsSanity
969 //* isBlackhole:: @cindex\s-+isBlackhole