1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.24 2000/12/11 12:37:00 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that the stack looks reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
23 //* Thread Queue Sanity::
24 //* Blackhole Sanity::
27 //@node Includes, Macros
28 //@subsection Includes
32 #ifdef DEBUG /* whole file */
36 #include "BlockAlloc.h"
41 #include "StoragePriv.h" // for END_OF_STATIC_LIST
43 //@node Macros, Stack sanity, Includes
46 #define LOOKS_LIKE_PTR(r) ((LOOKS_LIKE_STATIC_CLOSURE(r) || \
47 ((HEAP_ALLOCED(r) && Bdescr((P_)r)->free != (void *)-1))) && \
48 ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa))
50 //@node Stack sanity, Heap Sanity, Macros
51 //@subsection Stack sanity
53 /* -----------------------------------------------------------------------------
55 -------------------------------------------------------------------------- */
57 StgOffset checkStackClosure( StgClosure* c );
59 StgOffset checkStackObject( StgPtr sp );
61 void checkStackChunk( StgPtr sp, StgPtr stack_end );
63 static StgOffset checkSmallBitmap( StgPtr payload, StgWord32 bitmap );
65 static StgOffset checkLargeBitmap( StgPtr payload,
66 StgLargeBitmap* large_bitmap );
68 void checkClosureShallow( StgClosure* p );
70 //@cindex checkSmallBitmap
72 checkSmallBitmap( StgPtr payload, StgWord32 bitmap )
77 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
78 if ((bitmap & 1) == 0) {
79 checkClosure(stgCast(StgClosure*,payload[i]));
85 //@cindex checkLargeBitmap
87 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap )
93 for (bmp=0; bmp<large_bitmap->size; bmp++) {
94 StgWord32 bitmap = large_bitmap->bitmap[bmp];
95 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
96 if ((bitmap & 1) == 0) {
97 checkClosure(stgCast(StgClosure*,payload[i]));
104 //@cindex checkStackClosure
106 checkStackClosure( StgClosure* c )
108 const StgInfoTable* info = get_itbl(c);
110 /* All activation records have 'bitmap' style layout info. */
111 switch (info->type) {
112 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
114 StgRetDyn* r = (StgRetDyn *)c;
115 return sizeofW(StgRetDyn) +
116 checkSmallBitmap(r->payload,r->liveness);
118 case RET_BCO: /* small bitmap (<= 32 entries) */
121 return 1 + checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
124 ASSERT(LOOKS_LIKE_PTR(((StgUpdateFrame*)c)->updatee));
127 /* check that the link field points to another stack frame */
128 ASSERT(get_itbl(((StgFrame*)c)->link)->type == UPDATE_FRAME ||
129 get_itbl(((StgFrame*)c)->link)->type == CATCH_FRAME ||
130 get_itbl(((StgFrame*)c)->link)->type == STOP_FRAME ||
131 get_itbl(((StgFrame*)c)->link)->type == SEQ_FRAME);
139 checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
140 case RET_BIG: /* large bitmap (> 32 entries) */
142 return 1 + checkLargeBitmap((StgPtr)c + 1,info->layout.large_bitmap);
144 case FUN_STATIC: /* probably a slow-entry point return address: */
145 #if 0 && defined(GRAN)
151 /* if none of the above, maybe it's a closure which looks a
152 * little like an infotable
154 checkClosureShallow(*(StgClosure **)c);
156 /* barf("checkStackClosure: weird activation record found on stack (%p).",c); */
161 * check that it looks like a valid closure - without checking its payload
162 * used to avoid recursion between checking PAPs and checking stack
166 //@cindex checkClosureShallow
168 checkClosureShallow( StgClosure* p )
171 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info)
172 || IS_HUGS_CONSTR_INFO(GET_INFO(p)));
174 /* Is it a static closure (i.e. in the data segment)? */
175 if (LOOKS_LIKE_STATIC(p)) {
176 ASSERT(closure_STATIC(p));
178 ASSERT(!closure_STATIC(p));
179 ASSERT(LOOKS_LIKE_PTR(p));
183 /* check an individual stack object */
184 //@cindex checkStackObject
186 checkStackObject( StgPtr sp )
188 if (IS_ARG_TAG(*sp)) {
189 /* Tagged words might be "stubbed" pointers, so there's no
190 * point checking to see whether they look like pointers or
191 * not (some of them will).
193 return ARG_SIZE(*sp) + 1;
194 } else if (LOOKS_LIKE_GHC_INFO(*stgCast(StgPtr*,sp))) {
195 return checkStackClosure(stgCast(StgClosure*,sp));
196 } else { /* must be an untagged closure pointer in the stack */
197 checkClosureShallow(*stgCast(StgClosure**,sp));
202 /* check sections of stack between update frames */
203 //@cindex checkStackChunk
205 checkStackChunk( StgPtr sp, StgPtr stack_end )
210 while (p < stack_end) {
211 p += checkStackObject( p );
213 // ASSERT( p == stack_end ); -- HWL
216 //@cindex checkStackChunk
218 checkClosure( StgClosure* p )
220 const StgInfoTable *info;
223 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info));
226 /* Is it a static closure (i.e. in the data segment)? */
227 if (LOOKS_LIKE_STATIC(p)) {
228 ASSERT(closure_STATIC(p));
230 ASSERT(!closure_STATIC(p));
231 ASSERT(LOOKS_LIKE_PTR(p));
235 switch (info->type) {
239 StgMVar *mvar = (StgMVar *)p;
240 ASSERT(LOOKS_LIKE_PTR(mvar->head));
241 ASSERT(LOOKS_LIKE_PTR(mvar->tail));
242 ASSERT(LOOKS_LIKE_PTR(mvar->value));
245 checkBQ((StgBlockingQueueElement *)mvar->head, p);
247 checkBQ(mvar->head, p);
250 return sizeofW(StgMVar);
261 for (i = 0; i < info->layout.payload.ptrs; i++) {
262 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
264 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
268 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
269 /* fall through to basic ptr check */
284 case IND_OLDGEN_PERM:
289 case SE_CAF_BLACKHOLE:
298 case CONSTR_CHARLIKE:
300 case CONSTR_NOCAF_STATIC:
305 for (i = 0; i < info->layout.payload.ptrs; i++) {
306 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
308 return sizeW_fromITBL(info);
311 case IND_STATIC: /* (1, 0) closure */
312 ASSERT(LOOKS_LIKE_PTR(((StgIndStatic*)p)->indirectee));
313 return sizeW_fromITBL(info);
316 /* deal with these specially - the info table isn't
317 * representative of the actual layout.
319 { StgWeak *w = (StgWeak *)p;
320 ASSERT(LOOKS_LIKE_PTR(w->key));
321 ASSERT(LOOKS_LIKE_PTR(w->value));
322 ASSERT(LOOKS_LIKE_PTR(w->finalizer));
324 ASSERT(LOOKS_LIKE_PTR(w->link));
326 return sizeW_fromITBL(info);
330 ASSERT(LOOKS_LIKE_PTR(stgCast(StgSelector*,p)->selectee));
331 return sizeofW(StgHeader) + MIN_UPD_SIZE;
335 /* we don't expect to see any of these after GC
336 * but they might appear during execution
339 StgInd *ind = stgCast(StgInd*,p);
340 ASSERT(LOOKS_LIKE_PTR(ind->indirectee));
341 q = (P_)p + sizeofW(StgInd);
342 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
356 barf("checkClosure: stack frame");
358 case AP_UPD: /* we can treat this as being the same as a PAP */
361 StgPAP *pap = stgCast(StgPAP*,p);
362 ASSERT(LOOKS_LIKE_PTR(pap->fun));
363 checkStackChunk((StgPtr)pap->payload,
364 (StgPtr)pap->payload + pap->n_args
366 return pap_sizeW(pap);
370 return arr_words_sizeW(stgCast(StgArrWords*,p));
373 case MUT_ARR_PTRS_FROZEN:
375 StgMutArrPtrs* a = stgCast(StgMutArrPtrs*,p);
377 for (i = 0; i < a->ptrs; i++) {
378 ASSERT(LOOKS_LIKE_PTR(a->payload[i]));
380 return mut_arr_ptrs_sizeW(a);
384 checkTSO((StgTSO *)p);
385 return tso_sizeW((StgTSO *)p);
390 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
391 ASSERT(LOOKS_LIKE_PTR((((StgBlockedFetch *)p)->node)));
392 return sizeofW(StgBlockedFetch); // see size used in evacuate()
395 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
396 return sizeofW(StgFetchMe); // see size used in evacuate()
399 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
400 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
403 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
404 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
405 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
406 checkBQ(((StgRBH *)p)->blocking_queue, p);
407 ASSERT(LOOKS_LIKE_GHC_INFO(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
408 return BLACKHOLE_sizeW(); // see size used in evacuate()
409 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
414 barf("checkClosure: found EVACUATED closure %d",
417 barf("checkClosure (closure type %d)", info->type);
423 #define PVM_PE_MASK 0xfffc0000
424 #define MAX_PVM_PES MAX_PES
425 #define MAX_PVM_TIDS MAX_PES
426 #define MAX_SLOTS 100000
429 looks_like_tid(StgInt tid)
431 StgInt hi = (tid & PVM_PE_MASK) >> 18;
432 StgInt lo = (tid & ~PVM_PE_MASK);
433 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
438 looks_like_slot(StgInt slot)
440 /* if tid is known better use looks_like_ga!! */
441 rtsBool ok = slot<MAX_SLOTS;
442 // This refers only to the no. of slots on the current PE
443 // rtsBool ok = slot<=highest_slot();
448 looks_like_ga(globalAddr *ga)
450 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
451 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
452 (ga)->payload.gc.slot<=highest_slot() :
453 (ga)->payload.gc.slot<MAX_SLOTS;
454 rtsBool ok = is_tid && is_slot;
460 //@node Heap Sanity, TSO Sanity, Stack sanity
461 //@subsection Heap Sanity
463 /* -----------------------------------------------------------------------------
466 After garbage collection, the live heap is in a state where we can
467 run through and check that all the pointers point to the right
468 place. This function starts at a given position and sanity-checks
469 all the objects in the remainder of the chain.
470 -------------------------------------------------------------------------- */
474 checkHeap(bdescr *bd, StgPtr start)
477 nat xxx = 0; // tmp -- HWL
480 if (bd != NULL) p = bd->start;
486 while (p < bd->free) {
487 nat size = checkClosure(stgCast(StgClosure*,p));
488 /* This is the smallest size of closure that can live in the heap. */
489 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
490 if (get_itbl(stgCast(StgClosure*,p))->type == IND_STATIC)
495 while (p < bd->free &&
496 (*p < 0x1000 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; }
503 fprintf(stderr,"@@@@ checkHeap: Heap ok; %d IND_STATIC closures checked\n",
508 Check heap between start and end. Used after unpacking graphs.
511 checkHeapChunk(StgPtr start, StgPtr end)
516 for (p=start; p<end; p+=size) {
517 ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
518 size = checkClosure(stgCast(StgClosure*,p));
519 /* This is the smallest size of closure that can live in the heap. */
520 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
526 checkChain(bdescr *bd)
529 checkClosure((StgClosure *)bd->start);
534 /* check stack - making sure that update frames are linked correctly */
537 checkStack(StgPtr sp, StgPtr stack_end, StgUpdateFrame* su )
539 /* check everything down to the first update frame */
540 checkStackChunk( sp, stgCast(StgPtr,su) );
541 while ( stgCast(StgPtr,su) < stack_end) {
542 sp = stgCast(StgPtr,su);
543 switch (get_itbl(su)->type) {
548 su = stgCast(StgSeqFrame*,su)->link;
551 su = stgCast(StgCatchFrame*,su)->link;
554 /* not quite: ASSERT(stgCast(StgPtr,su) == stack_end); */
557 barf("checkStack: weird record found on update frame list.");
559 checkStackChunk( sp, stgCast(StgPtr,su) );
561 ASSERT(stgCast(StgPtr,su) == stack_end);
564 //@node TSO Sanity, Thread Queue Sanity, Heap Sanity
565 //@subsection TSO Sanity
569 checkTSO(StgTSO *tso)
572 StgPtr stack = tso->stack;
573 StgUpdateFrame* su = tso->su;
574 StgOffset stack_size = tso->stack_size;
575 StgPtr stack_end = stack + stack_size;
577 if (tso->what_next == ThreadRelocated) {
582 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
583 /* The garbage collector doesn't bother following any pointers
584 * from dead threads, so don't check sanity here.
589 ASSERT(stack <= sp && sp < stack_end);
590 ASSERT(sp <= stgCast(StgPtr,su));
593 ASSERT(tso->par.magic==TSO_MAGIC);
595 switch (tso->why_blocked) {
597 checkClosureShallow(tso->block_info.closure);
598 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
599 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
601 case BlockedOnGA_NoSend:
602 checkClosureShallow(tso->block_info.closure);
603 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
605 case BlockedOnBlackHole:
606 checkClosureShallow(tso->block_info.closure);
607 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
608 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
609 get_itbl(tso->block_info.closure)->type==RBH);
614 /* isOnBQ(blocked_queue) */
616 case BlockedOnException:
617 /* isOnSomeBQ(tso) */
618 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
621 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
625 Could check other values of why_blocked but I am more
626 lazy than paranoid (bad combination) -- HWL
630 /* if the link field is non-nil it most point to one of these
631 three closure types */
632 ASSERT(tso->link == END_TSO_QUEUE ||
633 get_itbl(tso->link)->type == TSO ||
634 get_itbl(tso->link)->type == BLOCKED_FETCH ||
635 get_itbl(tso->link)->type == CONSTR);
638 checkStack(sp, stack_end, su);
642 //@cindex checkTSOsSanity
644 checkTSOsSanity(void) {
648 belch("Checking sanity of all runnable TSOs:");
650 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
651 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
652 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
654 fprintf(stderr, "OK, ");
659 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
662 //@node Thread Queue Sanity, Blackhole Sanity, TSO Sanity
663 //@subsection Thread Queue Sanity
667 //@cindex checkThreadQSanity
669 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
673 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
674 ASSERT(run_queue_hds[proc]!=NULL);
675 ASSERT(run_queue_tls[proc]!=NULL);
676 /* if either head or tail is NIL then the other one must be NIL, too */
677 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
678 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
679 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
681 prev=tso, tso=tso->link) {
682 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
683 (prev==END_TSO_QUEUE || prev->link==tso));
687 ASSERT(prev==run_queue_tls[proc]);
690 //@cindex checkThreadQsSanity
692 checkThreadQsSanity (rtsBool check_TSO_too)
696 for (p=0; p<RtsFlags.GranFlags.proc; p++)
697 checkThreadQSanity(p, check_TSO_too);
702 Check that all TSOs have been evacuated.
703 Optionally also check the sanity of the TSOs.
706 checkGlobalTSOList (rtsBool checkTSOs)
708 extern StgTSO *all_threads;
710 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
711 ASSERT(Bdescr((P_)tso)->evacuated == 1);
717 //@node Blackhole Sanity, GALA table sanity, Thread Queue Sanity
718 //@subsection Blackhole Sanity
720 /* -----------------------------------------------------------------------------
721 Check Blackhole Sanity
723 Test whether an object is already on the update list.
724 It isn't necessarily an rts error if it is - it might be a programming
727 Future versions might be able to test for a blackhole without traversing
728 the update frame list.
730 -------------------------------------------------------------------------- */
731 //@cindex isBlackhole
733 isBlackhole( StgTSO* tso, StgClosure* p )
735 StgUpdateFrame* su = tso->su;
737 switch (get_itbl(su)->type) {
739 if (su->updatee == p) {
746 su = stgCast(StgSeqFrame*,su)->link;
749 su = stgCast(StgCatchFrame*,su)->link;
754 barf("isBlackhole: weird record found on update frame list.");
760 Check the static objects list.
763 checkStaticObjects ( void ) {
764 extern StgClosure* static_objects;
765 StgClosure *p = static_objects;
768 while (p != END_OF_STATIC_LIST) {
771 switch (info->type) {
774 StgClosure *indirectee = stgCast(StgIndStatic*,p)->indirectee;
776 ASSERT(LOOKS_LIKE_PTR(indirectee));
777 ASSERT(LOOKS_LIKE_GHC_INFO(indirectee->header.info));
778 p = IND_STATIC_LINK((StgClosure *)p);
783 p = THUNK_STATIC_LINK((StgClosure *)p);
787 p = FUN_STATIC_LINK((StgClosure *)p);
791 p = STATIC_LINK(info,(StgClosure *)p);
795 barf("checkStaticObjetcs: strange closure %p (%s)",
802 Check the sanity of a blocking queue starting at bqe with closure being
803 the closure holding the blocking queue.
804 Note that in GUM we can have several different closure types in a
810 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
812 rtsBool end = rtsFalse;
813 StgInfoTable *info = get_itbl(closure);
815 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
816 || info->type == FETCH_ME_BQ || info->type == RBH);
819 switch (get_itbl(bqe)->type) {
822 checkClosure((StgClosure *)bqe);
824 end = (bqe==END_BQ_QUEUE);
828 checkClosure((StgClosure *)bqe);
833 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
834 get_itbl(bqe)->type, closure, info_type(closure));
840 checkBQ (StgTSO *bqe, StgClosure *closure)
842 rtsBool end = rtsFalse;
843 StgInfoTable *info = get_itbl(closure);
845 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
848 switch (get_itbl(bqe)->type) {
851 checkClosure((StgClosure *)bqe);
853 end = (bqe==END_BQ_QUEUE);
857 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
858 get_itbl(bqe)->type, closure, info_type(closure));
864 checkBQ (StgTSO *bqe, StgClosure *closure)
866 rtsBool end = rtsFalse;
867 StgInfoTable *info = get_itbl(closure);
869 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
872 switch (get_itbl(bqe)->type) {
874 checkClosure((StgClosure *)bqe);
876 end = (bqe==END_TSO_QUEUE);
880 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
881 get_itbl(bqe)->type, closure, info->type);
889 //@node GALA table sanity, Index, Blackhole Sanity
890 //@subsection GALA table sanity
893 This routine checks the sanity of the LAGA and GALA tables. They are
894 implemented as lists through one hash table, LAtoGALAtable, because entries
895 in both tables have the same structure:
896 - the LAGA table maps local addresses to global addresses; it starts
897 with liveIndirections
898 - the GALA table maps global addresses to local addresses; it starts
905 /* hidden in parallel/Global.c; only accessed for testing here */
906 extern GALA *liveIndirections;
907 extern GALA *liveRemoteGAs;
908 extern HashTable *LAtoGALAtable;
910 //@cindex checkLAGAtable
912 checkLAGAtable(rtsBool check_closures)
915 nat n=0, m=0; // debugging
917 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
919 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
920 ASSERT(!gala->preferred || gala == gala0);
921 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
922 ASSERT(gala->next!=gala); // detect direct loops
924 if ( check_closures ) {
925 checkClosure(stgCast(StgClosure*,gala->la));
930 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
932 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
933 ASSERT(!gala->preferred || gala == gala0);
934 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
935 ASSERT(gala->next!=gala); // detect direct loops
937 if ( check_closures ) {
938 checkClosure(stgCast(StgClosure*,gala->la));
945 //@node Index, , GALA table sanity
951 //* checkBQ:: @cindex\s-+checkBQ
952 //* checkChain:: @cindex\s-+checkChain
953 //* checkClosureShallow:: @cindex\s-+checkClosureShallow
954 //* checkHeap:: @cindex\s-+checkHeap
955 //* checkLargeBitmap:: @cindex\s-+checkLargeBitmap
956 //* checkSmallBitmap:: @cindex\s-+checkSmallBitmap
957 //* checkStack:: @cindex\s-+checkStack
958 //* checkStackChunk:: @cindex\s-+checkStackChunk
959 //* checkStackChunk:: @cindex\s-+checkStackChunk
960 //* checkStackClosure:: @cindex\s-+checkStackClosure
961 //* checkStackObject:: @cindex\s-+checkStackObject
962 //* checkTSO:: @cindex\s-+checkTSO
963 //* checkTSOsSanity:: @cindex\s-+checkTSOsSanity
964 //* checkThreadQSanity:: @cindex\s-+checkThreadQSanity
965 //* checkThreadQsSanity:: @cindex\s-+checkThreadQsSanity
966 //* isBlackhole:: @cindex\s-+isBlackhole