1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 1
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
260 info = p->header.info;
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
265 info = INFO_PTR_TO_STRUCT(info);
267 switch (info->type) {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
278 checkBQ((StgBlockingQueueElement *)mvar->head, p);
280 checkBQ(mvar->head, p);
283 return sizeofW(StgMVar);
294 for (i = 0; i < info->layout.payload.ptrs; i++) {
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
297 return thunk_sizeW_fromITBL(info);
314 case IND_OLDGEN_PERM:
321 case CONSTR_NOCAF_STATIC:
326 for (i = 0; i < info->layout.payload.ptrs; i++) {
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
329 return sizeW_fromITBL(info);
333 StgBCO *bco = (StgBCO *)p;
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
335 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
336 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
337 return bco_sizeW(bco);
340 case IND_STATIC: /* (1, 0) closure */
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
342 return sizeW_fromITBL(info);
345 /* deal with these specially - the info table isn't
346 * representative of the actual layout.
348 { StgWeak *w = (StgWeak *)p;
349 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
355 return sizeW_fromITBL(info);
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
360 return THUNK_SELECTOR_sizeW();
364 /* we don't expect to see any of these after GC
365 * but they might appear during execution
367 StgInd *ind = (StgInd *)p;
368 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
369 return sizeofW(StgInd);
379 case ATOMICALLY_FRAME:
380 case CATCH_RETRY_FRAME:
381 case CATCH_STM_FRAME:
382 barf("checkClosure: stack frame");
386 StgAP* ap = (StgAP *)p;
387 checkPAP (ap->fun, ap->payload, ap->n_args);
393 StgPAP* pap = (StgPAP *)p;
394 checkPAP (pap->fun, pap->payload, pap->n_args);
395 return pap_sizeW(pap);
400 StgAP_STACK *ap = (StgAP_STACK *)p;
401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
402 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
403 return ap_stack_sizeW(ap);
407 return arr_words_sizeW((StgArrWords *)p);
409 case MUT_ARR_PTRS_CLEAN:
410 case MUT_ARR_PTRS_DIRTY:
411 case MUT_ARR_PTRS_FROZEN:
412 case MUT_ARR_PTRS_FROZEN0:
414 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
416 for (i = 0; i < a->ptrs; i++) {
417 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
419 return mut_arr_ptrs_sizeW(a);
423 checkTSO((StgTSO *)p);
424 return tso_sizeW((StgTSO *)p);
429 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
430 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
431 return sizeofW(StgBlockedFetch); // see size used in evacuate()
435 return sizeofW(StgFetchMe);
439 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
440 return sizeofW(StgFetchMe); // see size used in evacuate()
443 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
444 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
447 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
448 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
449 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
450 checkBQ(((StgRBH *)p)->blocking_queue, p);
451 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
452 return BLACKHOLE_sizeW(); // see size used in evacuate()
453 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
457 case TVAR_WATCH_QUEUE:
459 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
460 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
461 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
462 return sizeofW(StgTVarWatchQueue);
465 case INVARIANT_CHECK_QUEUE:
467 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
468 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
469 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
470 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
471 return sizeofW(StgInvariantCheckQueue);
474 case ATOMIC_INVARIANT:
476 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
477 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
478 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
479 return sizeofW(StgAtomicInvariant);
484 StgTVar *tv = (StgTVar *)p;
485 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
486 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
487 return sizeofW(StgTVar);
493 StgTRecChunk *tc = (StgTRecChunk *)p;
494 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
495 for (i = 0; i < tc -> next_entry_idx; i ++) {
496 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
497 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
498 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
500 return sizeofW(StgTRecChunk);
505 StgTRecHeader *trec = (StgTRecHeader *)p;
506 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
507 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
508 return sizeofW(StgTRecHeader);
512 barf("checkClosure (closure type %d)", info->type);
518 #define PVM_PE_MASK 0xfffc0000
519 #define MAX_PVM_PES MAX_PES
520 #define MAX_PVM_TIDS MAX_PES
521 #define MAX_SLOTS 100000
524 looks_like_tid(StgInt tid)
526 StgInt hi = (tid & PVM_PE_MASK) >> 18;
527 StgInt lo = (tid & ~PVM_PE_MASK);
528 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
533 looks_like_slot(StgInt slot)
535 /* if tid is known better use looks_like_ga!! */
536 rtsBool ok = slot<MAX_SLOTS;
537 // This refers only to the no. of slots on the current PE
538 // rtsBool ok = slot<=highest_slot();
543 looks_like_ga(globalAddr *ga)
545 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
546 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
547 (ga)->payload.gc.slot<=highest_slot() :
548 (ga)->payload.gc.slot<MAX_SLOTS;
549 rtsBool ok = is_tid && is_slot;
556 /* -----------------------------------------------------------------------------
559 After garbage collection, the live heap is in a state where we can
560 run through and check that all the pointers point to the right
561 place. This function starts at a given position and sanity-checks
562 all the objects in the remainder of the chain.
563 -------------------------------------------------------------------------- */
566 checkHeap(bdescr *bd)
570 #if defined(THREADED_RTS)
571 // heap sanity checking doesn't work with SMP, because we can't
572 // zero the slop (see Updates.h).
576 for (; bd != NULL; bd = bd->link) {
578 while (p < bd->free) {
579 nat size = checkClosure((StgClosure *)p);
580 /* This is the smallest size of closure that can live in the heap */
581 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
585 while (p < bd->free &&
586 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
593 Check heap between start and end. Used after unpacking graphs.
596 checkHeapChunk(StgPtr start, StgPtr end)
598 extern globalAddr *LAGAlookup(StgClosure *addr);
602 for (p=start; p<end; p+=size) {
603 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
604 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
605 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
606 /* if it's a FM created during unpack and commoned up, it's not global */
607 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
608 size = sizeofW(StgFetchMe);
609 } else if (get_itbl((StgClosure*)p)->type == IND) {
610 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
611 size = sizeofW(StgInd);
613 size = checkClosure((StgClosure *)p);
614 /* This is the smallest size of closure that can live in the heap. */
615 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
621 checkHeapChunk(StgPtr start, StgPtr end)
626 for (p=start; p<end; p+=size) {
627 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
628 size = checkClosure((StgClosure *)p);
629 /* This is the smallest size of closure that can live in the heap. */
630 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
636 checkLargeObjects(bdescr *bd)
639 if (!(bd->flags & BF_PINNED)) {
640 checkClosure((StgClosure *)bd->start);
647 checkTSO(StgTSO *tso)
650 StgPtr stack = tso->stack;
651 StgOffset stack_size = tso->stack_size;
652 StgPtr stack_end = stack + stack_size;
654 if (tso->what_next == ThreadRelocated) {
655 checkTSO(tso->_link);
659 if (tso->what_next == ThreadKilled) {
660 /* The garbage collector doesn't bother following any pointers
661 * from dead threads, so don't check sanity here.
666 ASSERT(stack <= sp && sp < stack_end);
669 ASSERT(tso->par.magic==TSO_MAGIC);
671 switch (tso->why_blocked) {
673 checkClosureShallow(tso->block_info.closure);
674 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
675 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
677 case BlockedOnGA_NoSend:
678 checkClosureShallow(tso->block_info.closure);
679 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
681 case BlockedOnBlackHole:
682 checkClosureShallow(tso->block_info.closure);
683 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
684 get_itbl(tso->block_info.closure)->type==RBH);
689 #if defined(mingw32_HOST_OS)
690 case BlockedOnDoProc:
692 /* isOnBQ(blocked_queue) */
694 case BlockedOnException:
695 /* isOnSomeBQ(tso) */
696 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
699 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
702 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
706 Could check other values of why_blocked but I am more
707 lazy than paranoid (bad combination) -- HWL
711 /* if the link field is non-nil it most point to one of these
712 three closure types */
713 ASSERT(tso->link == END_TSO_QUEUE ||
714 get_itbl(tso->link)->type == TSO ||
715 get_itbl(tso->link)->type == BLOCKED_FETCH ||
716 get_itbl(tso->link)->type == CONSTR);
719 checkStackChunk(sp, stack_end);
724 checkTSOsSanity(void) {
728 debugBelch("Checking sanity of all runnable TSOs:");
730 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
731 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
732 debugBelch("TSO %p on PE %d ...", tso, i);
739 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
746 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
750 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
751 ASSERT(run_queue_hds[proc]!=NULL);
752 ASSERT(run_queue_tls[proc]!=NULL);
753 /* if either head or tail is NIL then the other one must be NIL, too */
754 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
755 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
756 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
758 prev=tso, tso=tso->link) {
759 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
760 (prev==END_TSO_QUEUE || prev->link==tso));
764 ASSERT(prev==run_queue_tls[proc]);
768 checkThreadQsSanity (rtsBool check_TSO_too)
772 for (p=0; p<RtsFlags.GranFlags.proc; p++)
773 checkThreadQSanity(p, check_TSO_too);
778 Check that all TSOs have been evacuated.
779 Optionally also check the sanity of the TSOs.
782 checkGlobalTSOList (rtsBool checkTSOs)
787 for (s = 0; s < total_steps; s++) {
788 for (tso=all_steps[s].threads; tso != END_TSO_QUEUE;
789 tso = tso->global_link) {
790 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
791 ASSERT(get_itbl(tso)->type == TSO);
795 // If this TSO is dirty and in an old generation, it better
796 // be on the mutable list.
797 if (tso->what_next == ThreadRelocated) continue;
798 if (tso->flags & (TSO_DIRTY|TSO_LINK_DIRTY)) {
799 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
800 tso->flags &= ~TSO_MARKED;
806 /* -----------------------------------------------------------------------------
807 Check mutable list sanity.
808 -------------------------------------------------------------------------- */
811 checkMutableList( bdescr *mut_bd, nat gen )
817 for (bd = mut_bd; bd != NULL; bd = bd->link) {
818 for (q = bd->start; q < bd->free; q++) {
819 p = (StgClosure *)*q;
820 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
821 if (get_itbl(p)->type == TSO) {
822 ((StgTSO *)p)->flags |= TSO_MARKED;
829 checkMutableLists (rtsBool checkTSOs)
833 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
834 checkMutableList(generations[g].mut_list, g);
835 for (i = 0; i < n_capabilities; i++) {
836 checkMutableList(capabilities[i].mut_lists[g], g);
839 checkGlobalTSOList(checkTSOs);
843 Check the static objects list.
846 checkStaticObjects ( StgClosure* static_objects )
848 StgClosure *p = static_objects;
851 while (p != END_OF_STATIC_LIST) {
854 switch (info->type) {
857 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
859 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
860 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
861 p = *IND_STATIC_LINK((StgClosure *)p);
866 p = *THUNK_STATIC_LINK((StgClosure *)p);
870 p = *FUN_STATIC_LINK((StgClosure *)p);
874 p = *STATIC_LINK(info,(StgClosure *)p);
878 barf("checkStaticObjetcs: strange closure %p (%s)",
885 Check the sanity of a blocking queue starting at bqe with closure being
886 the closure holding the blocking queue.
887 Note that in GUM we can have several different closure types in a
892 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
894 rtsBool end = rtsFalse;
895 StgInfoTable *info = get_itbl(closure);
897 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
900 switch (get_itbl(bqe)->type) {
903 checkClosure((StgClosure *)bqe);
905 end = (bqe==END_BQ_QUEUE);
909 checkClosure((StgClosure *)bqe);
914 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
915 get_itbl(bqe)->type, closure, info_type(closure));
921 checkBQ (StgTSO *bqe, StgClosure *closure)
923 rtsBool end = rtsFalse;
924 StgInfoTable *info = get_itbl(closure);
926 ASSERT(info->type == MVAR);
929 switch (get_itbl(bqe)->type) {
932 checkClosure((StgClosure *)bqe);
934 end = (bqe==END_BQ_QUEUE);
938 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
939 get_itbl(bqe)->type, closure, info_type(closure));
948 This routine checks the sanity of the LAGA and GALA tables. They are
949 implemented as lists through one hash table, LAtoGALAtable, because entries
950 in both tables have the same structure:
951 - the LAGA table maps local addresses to global addresses; it starts
952 with liveIndirections
953 - the GALA table maps global addresses to local addresses; it starts
960 /* hidden in parallel/Global.c; only accessed for testing here */
961 extern GALA *liveIndirections;
962 extern GALA *liveRemoteGAs;
963 extern HashTable *LAtoGALAtable;
966 checkLAGAtable(rtsBool check_closures)
969 nat n=0, m=0; // debugging
971 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
973 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
974 ASSERT(!gala->preferred || gala == gala0);
975 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
976 ASSERT(gala->next!=gala); // detect direct loops
977 if ( check_closures ) {
978 checkClosure((StgClosure *)gala->la);
982 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
984 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
985 ASSERT(!gala->preferred || gala == gala0);
986 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
987 ASSERT(gala->next!=gala); // detect direct loops
989 if ( check_closures ) {
990 checkClosure((StgClosure *)gala->la);