1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 1
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
260 info = p->header.info;
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
265 info = INFO_PTR_TO_STRUCT(info);
267 switch (info->type) {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
278 checkBQ((StgBlockingQueueElement *)mvar->head, p);
280 checkBQ(mvar->head, p);
283 return sizeofW(StgMVar);
294 for (i = 0; i < info->layout.payload.ptrs; i++) {
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
297 return thunk_sizeW_fromITBL(info);
314 case IND_OLDGEN_PERM:
317 case SE_CAF_BLACKHOLE:
325 case CONSTR_NOCAF_STATIC:
330 for (i = 0; i < info->layout.payload.ptrs; i++) {
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
333 return sizeW_fromITBL(info);
337 StgBCO *bco = (StgBCO *)p;
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
340 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
341 return bco_sizeW(bco);
344 case IND_STATIC: /* (1, 0) closure */
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
346 return sizeW_fromITBL(info);
349 /* deal with these specially - the info table isn't
350 * representative of the actual layout.
352 { StgWeak *w = (StgWeak *)p;
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
354 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
355 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
357 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
359 return sizeW_fromITBL(info);
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
364 return THUNK_SELECTOR_sizeW();
368 /* we don't expect to see any of these after GC
369 * but they might appear during execution
371 StgInd *ind = (StgInd *)p;
372 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
373 return sizeofW(StgInd);
383 case ATOMICALLY_FRAME:
384 case CATCH_RETRY_FRAME:
385 case CATCH_STM_FRAME:
386 barf("checkClosure: stack frame");
390 StgAP* ap = (StgAP *)p;
391 checkPAP (ap->fun, ap->payload, ap->n_args);
397 StgPAP* pap = (StgPAP *)p;
398 checkPAP (pap->fun, pap->payload, pap->n_args);
399 return pap_sizeW(pap);
404 StgAP_STACK *ap = (StgAP_STACK *)p;
405 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
406 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
407 return ap_stack_sizeW(ap);
411 return arr_words_sizeW((StgArrWords *)p);
413 case MUT_ARR_PTRS_CLEAN:
414 case MUT_ARR_PTRS_DIRTY:
415 case MUT_ARR_PTRS_FROZEN:
416 case MUT_ARR_PTRS_FROZEN0:
418 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
420 for (i = 0; i < a->ptrs; i++) {
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
423 return mut_arr_ptrs_sizeW(a);
427 checkTSO((StgTSO *)p);
428 return tso_sizeW((StgTSO *)p);
433 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
434 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
435 return sizeofW(StgBlockedFetch); // see size used in evacuate()
439 return sizeofW(StgFetchMe);
443 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
444 return sizeofW(StgFetchMe); // see size used in evacuate()
447 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
448 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
451 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
452 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
453 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
454 checkBQ(((StgRBH *)p)->blocking_queue, p);
455 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
456 return BLACKHOLE_sizeW(); // see size used in evacuate()
457 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
461 case TVAR_WATCH_QUEUE:
463 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
464 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
466 return sizeofW(StgTVarWatchQueue);
469 case INVARIANT_CHECK_QUEUE:
471 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
472 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
475 return sizeofW(StgInvariantCheckQueue);
478 case ATOMIC_INVARIANT:
480 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
481 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
483 return sizeofW(StgAtomicInvariant);
488 StgTVar *tv = (StgTVar *)p;
489 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
491 return sizeofW(StgTVar);
497 StgTRecChunk *tc = (StgTRecChunk *)p;
498 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
499 for (i = 0; i < tc -> next_entry_idx; i ++) {
500 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
501 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
502 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
504 return sizeofW(StgTRecChunk);
509 StgTRecHeader *trec = (StgTRecHeader *)p;
510 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
511 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
512 return sizeofW(StgTRecHeader);
516 barf("checkClosure (closure type %d)", info->type);
522 #define PVM_PE_MASK 0xfffc0000
523 #define MAX_PVM_PES MAX_PES
524 #define MAX_PVM_TIDS MAX_PES
525 #define MAX_SLOTS 100000
528 looks_like_tid(StgInt tid)
530 StgInt hi = (tid & PVM_PE_MASK) >> 18;
531 StgInt lo = (tid & ~PVM_PE_MASK);
532 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
537 looks_like_slot(StgInt slot)
539 /* if tid is known better use looks_like_ga!! */
540 rtsBool ok = slot<MAX_SLOTS;
541 // This refers only to the no. of slots on the current PE
542 // rtsBool ok = slot<=highest_slot();
547 looks_like_ga(globalAddr *ga)
549 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
550 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
551 (ga)->payload.gc.slot<=highest_slot() :
552 (ga)->payload.gc.slot<MAX_SLOTS;
553 rtsBool ok = is_tid && is_slot;
560 /* -----------------------------------------------------------------------------
563 After garbage collection, the live heap is in a state where we can
564 run through and check that all the pointers point to the right
565 place. This function starts at a given position and sanity-checks
566 all the objects in the remainder of the chain.
567 -------------------------------------------------------------------------- */
570 checkHeap(bdescr *bd)
574 #if defined(THREADED_RTS)
575 // heap sanity checking doesn't work with SMP, because we can't
576 // zero the slop (see Updates.h).
580 for (; bd != NULL; bd = bd->link) {
582 while (p < bd->free) {
583 nat size = checkClosure((StgClosure *)p);
584 /* This is the smallest size of closure that can live in the heap */
585 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
589 while (p < bd->free &&
590 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
597 Check heap between start and end. Used after unpacking graphs.
600 checkHeapChunk(StgPtr start, StgPtr end)
602 extern globalAddr *LAGAlookup(StgClosure *addr);
606 for (p=start; p<end; p+=size) {
607 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
608 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
609 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
610 /* if it's a FM created during unpack and commoned up, it's not global */
611 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
612 size = sizeofW(StgFetchMe);
613 } else if (get_itbl((StgClosure*)p)->type == IND) {
614 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
615 size = sizeofW(StgInd);
617 size = checkClosure((StgClosure *)p);
618 /* This is the smallest size of closure that can live in the heap. */
619 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
625 checkHeapChunk(StgPtr start, StgPtr end)
630 for (p=start; p<end; p+=size) {
631 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
632 size = checkClosure((StgClosure *)p);
633 /* This is the smallest size of closure that can live in the heap. */
634 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
640 checkChain(bdescr *bd)
643 checkClosure((StgClosure *)bd->start);
649 checkTSO(StgTSO *tso)
652 StgPtr stack = tso->stack;
653 StgOffset stack_size = tso->stack_size;
654 StgPtr stack_end = stack + stack_size;
656 if (tso->what_next == ThreadRelocated) {
657 checkTSO(tso->_link);
661 if (tso->what_next == ThreadKilled) {
662 /* The garbage collector doesn't bother following any pointers
663 * from dead threads, so don't check sanity here.
668 ASSERT(stack <= sp && sp < stack_end);
671 ASSERT(tso->par.magic==TSO_MAGIC);
673 switch (tso->why_blocked) {
675 checkClosureShallow(tso->block_info.closure);
676 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
677 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
679 case BlockedOnGA_NoSend:
680 checkClosureShallow(tso->block_info.closure);
681 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
683 case BlockedOnBlackHole:
684 checkClosureShallow(tso->block_info.closure);
685 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
686 get_itbl(tso->block_info.closure)->type==RBH);
691 #if defined(mingw32_HOST_OS)
692 case BlockedOnDoProc:
694 /* isOnBQ(blocked_queue) */
696 case BlockedOnException:
697 /* isOnSomeBQ(tso) */
698 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
701 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
704 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
708 Could check other values of why_blocked but I am more
709 lazy than paranoid (bad combination) -- HWL
713 /* if the link field is non-nil it most point to one of these
714 three closure types */
715 ASSERT(tso->link == END_TSO_QUEUE ||
716 get_itbl(tso->link)->type == TSO ||
717 get_itbl(tso->link)->type == BLOCKED_FETCH ||
718 get_itbl(tso->link)->type == CONSTR);
721 checkStackChunk(sp, stack_end);
726 checkTSOsSanity(void) {
730 debugBelch("Checking sanity of all runnable TSOs:");
732 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
733 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
734 debugBelch("TSO %p on PE %d ...", tso, i);
741 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
748 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
752 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
753 ASSERT(run_queue_hds[proc]!=NULL);
754 ASSERT(run_queue_tls[proc]!=NULL);
755 /* if either head or tail is NIL then the other one must be NIL, too */
756 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
757 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
758 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
760 prev=tso, tso=tso->link) {
761 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
762 (prev==END_TSO_QUEUE || prev->link==tso));
766 ASSERT(prev==run_queue_tls[proc]);
770 checkThreadQsSanity (rtsBool check_TSO_too)
774 for (p=0; p<RtsFlags.GranFlags.proc; p++)
775 checkThreadQSanity(p, check_TSO_too);
780 Check that all TSOs have been evacuated.
781 Optionally also check the sanity of the TSOs.
784 checkGlobalTSOList (rtsBool checkTSOs)
789 for (s = 0; s < total_steps; s++) {
790 for (tso=all_steps[s].threads; tso != END_TSO_QUEUE;
791 tso = tso->global_link) {
792 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
793 ASSERT(get_itbl(tso)->type == TSO);
800 /* -----------------------------------------------------------------------------
801 Check mutable list sanity.
802 -------------------------------------------------------------------------- */
805 checkMutableList( bdescr *mut_bd, nat gen )
811 for (bd = mut_bd; bd != NULL; bd = bd->link) {
812 for (q = bd->start; q < bd->free; q++) {
813 p = (StgClosure *)*q;
814 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
820 Check the static objects list.
823 checkStaticObjects ( StgClosure* static_objects )
825 StgClosure *p = static_objects;
828 while (p != END_OF_STATIC_LIST) {
831 switch (info->type) {
834 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
836 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
837 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
838 p = *IND_STATIC_LINK((StgClosure *)p);
843 p = *THUNK_STATIC_LINK((StgClosure *)p);
847 p = *FUN_STATIC_LINK((StgClosure *)p);
851 p = *STATIC_LINK(info,(StgClosure *)p);
855 barf("checkStaticObjetcs: strange closure %p (%s)",
862 Check the sanity of a blocking queue starting at bqe with closure being
863 the closure holding the blocking queue.
864 Note that in GUM we can have several different closure types in a
869 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
871 rtsBool end = rtsFalse;
872 StgInfoTable *info = get_itbl(closure);
874 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
877 switch (get_itbl(bqe)->type) {
880 checkClosure((StgClosure *)bqe);
882 end = (bqe==END_BQ_QUEUE);
886 checkClosure((StgClosure *)bqe);
891 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
892 get_itbl(bqe)->type, closure, info_type(closure));
898 checkBQ (StgTSO *bqe, StgClosure *closure)
900 rtsBool end = rtsFalse;
901 StgInfoTable *info = get_itbl(closure);
903 ASSERT(info->type == MVAR);
906 switch (get_itbl(bqe)->type) {
909 checkClosure((StgClosure *)bqe);
911 end = (bqe==END_BQ_QUEUE);
915 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
916 get_itbl(bqe)->type, closure, info_type(closure));
925 This routine checks the sanity of the LAGA and GALA tables. They are
926 implemented as lists through one hash table, LAtoGALAtable, because entries
927 in both tables have the same structure:
928 - the LAGA table maps local addresses to global addresses; it starts
929 with liveIndirections
930 - the GALA table maps global addresses to local addresses; it starts
937 /* hidden in parallel/Global.c; only accessed for testing here */
938 extern GALA *liveIndirections;
939 extern GALA *liveRemoteGAs;
940 extern HashTable *LAtoGALAtable;
943 checkLAGAtable(rtsBool check_closures)
946 nat n=0, m=0; // debugging
948 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
950 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
951 ASSERT(!gala->preferred || gala == gala0);
952 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
953 ASSERT(gala->next!=gala); // detect direct loops
954 if ( check_closures ) {
955 checkClosure((StgClosure *)gala->la);
959 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
961 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
962 ASSERT(!gala->preferred || gala == gala0);
963 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
964 ASSERT(gala->next!=gala); // detect direct loops
966 if ( check_closures ) {
967 checkClosure((StgClosure *)gala->la);