1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 1
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
261 switch (info->type) {
266 StgMVar *mvar = (StgMVar *)p;
267 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
268 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
269 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
272 checkBQ((StgBlockingQueueElement *)mvar->head, p);
274 checkBQ(mvar->head, p);
277 return sizeofW(StgMVar);
288 for (i = 0; i < info->layout.payload.ptrs; i++) {
289 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
291 return thunk_sizeW_fromITBL(info);
308 case IND_OLDGEN_PERM:
311 case SE_CAF_BLACKHOLE:
319 case CONSTR_NOCAF_STATIC:
324 for (i = 0; i < info->layout.payload.ptrs; i++) {
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
327 return sizeW_fromITBL(info);
331 StgBCO *bco = (StgBCO *)p;
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
335 return bco_sizeW(bco);
338 case IND_STATIC: /* (1, 0) closure */
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
340 return sizeW_fromITBL(info);
343 /* deal with these specially - the info table isn't
344 * representative of the actual layout.
346 { StgWeak *w = (StgWeak *)p;
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
349 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
353 return sizeW_fromITBL(info);
357 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
358 return THUNK_SELECTOR_sizeW();
362 /* we don't expect to see any of these after GC
363 * but they might appear during execution
365 StgInd *ind = (StgInd *)p;
366 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
367 return sizeofW(StgInd);
377 case ATOMICALLY_FRAME:
378 case CATCH_RETRY_FRAME:
379 case CATCH_STM_FRAME:
380 barf("checkClosure: stack frame");
384 StgAP* ap = (StgAP *)p;
385 checkPAP (ap->fun, ap->payload, ap->n_args);
391 StgPAP* pap = (StgPAP *)p;
392 checkPAP (pap->fun, pap->payload, pap->n_args);
393 return pap_sizeW(pap);
398 StgAP_STACK *ap = (StgAP_STACK *)p;
399 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
400 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
401 return ap_stack_sizeW(ap);
405 return arr_words_sizeW((StgArrWords *)p);
407 case MUT_ARR_PTRS_CLEAN:
408 case MUT_ARR_PTRS_DIRTY:
409 case MUT_ARR_PTRS_FROZEN:
410 case MUT_ARR_PTRS_FROZEN0:
412 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
414 for (i = 0; i < a->ptrs; i++) {
415 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
417 return mut_arr_ptrs_sizeW(a);
421 checkTSO((StgTSO *)p);
422 return tso_sizeW((StgTSO *)p);
427 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
428 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
429 return sizeofW(StgBlockedFetch); // see size used in evacuate()
433 return sizeofW(StgFetchMe);
437 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
438 return sizeofW(StgFetchMe); // see size used in evacuate()
441 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
442 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
445 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
446 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
447 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
448 checkBQ(((StgRBH *)p)->blocking_queue, p);
449 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
450 return BLACKHOLE_sizeW(); // see size used in evacuate()
451 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
455 case TVAR_WATCH_QUEUE:
457 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
459 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
460 return sizeofW(StgTVarWatchQueue);
463 case INVARIANT_CHECK_QUEUE:
465 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
466 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
467 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
468 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
469 return sizeofW(StgInvariantCheckQueue);
472 case ATOMIC_INVARIANT:
474 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
475 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
476 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
477 return sizeofW(StgAtomicInvariant);
482 StgTVar *tv = (StgTVar *)p;
483 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
484 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
485 return sizeofW(StgTVar);
491 StgTRecChunk *tc = (StgTRecChunk *)p;
492 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
493 for (i = 0; i < tc -> next_entry_idx; i ++) {
494 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
495 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
496 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
498 return sizeofW(StgTRecChunk);
503 StgTRecHeader *trec = (StgTRecHeader *)p;
504 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
505 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
506 return sizeofW(StgTRecHeader);
511 barf("checkClosure: found EVACUATED closure %d",
514 barf("checkClosure (closure type %d)", info->type);
520 #define PVM_PE_MASK 0xfffc0000
521 #define MAX_PVM_PES MAX_PES
522 #define MAX_PVM_TIDS MAX_PES
523 #define MAX_SLOTS 100000
526 looks_like_tid(StgInt tid)
528 StgInt hi = (tid & PVM_PE_MASK) >> 18;
529 StgInt lo = (tid & ~PVM_PE_MASK);
530 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
535 looks_like_slot(StgInt slot)
537 /* if tid is known better use looks_like_ga!! */
538 rtsBool ok = slot<MAX_SLOTS;
539 // This refers only to the no. of slots on the current PE
540 // rtsBool ok = slot<=highest_slot();
545 looks_like_ga(globalAddr *ga)
547 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
548 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
549 (ga)->payload.gc.slot<=highest_slot() :
550 (ga)->payload.gc.slot<MAX_SLOTS;
551 rtsBool ok = is_tid && is_slot;
558 /* -----------------------------------------------------------------------------
561 After garbage collection, the live heap is in a state where we can
562 run through and check that all the pointers point to the right
563 place. This function starts at a given position and sanity-checks
564 all the objects in the remainder of the chain.
565 -------------------------------------------------------------------------- */
568 checkHeap(bdescr *bd)
572 #if defined(THREADED_RTS)
573 // heap sanity checking doesn't work with SMP, because we can't
574 // zero the slop (see Updates.h).
578 for (; bd != NULL; bd = bd->link) {
580 while (p < bd->free) {
581 nat size = checkClosure((StgClosure *)p);
582 /* This is the smallest size of closure that can live in the heap */
583 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
587 while (p < bd->free &&
588 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
595 Check heap between start and end. Used after unpacking graphs.
598 checkHeapChunk(StgPtr start, StgPtr end)
600 extern globalAddr *LAGAlookup(StgClosure *addr);
604 for (p=start; p<end; p+=size) {
605 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
606 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
607 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
608 /* if it's a FM created during unpack and commoned up, it's not global */
609 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
610 size = sizeofW(StgFetchMe);
611 } else if (get_itbl((StgClosure*)p)->type == IND) {
612 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
613 size = sizeofW(StgInd);
615 size = checkClosure((StgClosure *)p);
616 /* This is the smallest size of closure that can live in the heap. */
617 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
623 checkHeapChunk(StgPtr start, StgPtr end)
628 for (p=start; p<end; p+=size) {
629 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
630 size = checkClosure((StgClosure *)p);
631 /* This is the smallest size of closure that can live in the heap. */
632 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
638 checkChain(bdescr *bd)
641 checkClosure((StgClosure *)bd->start);
647 checkTSO(StgTSO *tso)
650 StgPtr stack = tso->stack;
651 StgOffset stack_size = tso->stack_size;
652 StgPtr stack_end = stack + stack_size;
654 if (tso->what_next == ThreadRelocated) {
659 if (tso->what_next == ThreadKilled) {
660 /* The garbage collector doesn't bother following any pointers
661 * from dead threads, so don't check sanity here.
666 ASSERT(stack <= sp && sp < stack_end);
669 ASSERT(tso->par.magic==TSO_MAGIC);
671 switch (tso->why_blocked) {
673 checkClosureShallow(tso->block_info.closure);
674 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
675 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
677 case BlockedOnGA_NoSend:
678 checkClosureShallow(tso->block_info.closure);
679 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
681 case BlockedOnBlackHole:
682 checkClosureShallow(tso->block_info.closure);
683 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
684 get_itbl(tso->block_info.closure)->type==RBH);
689 #if defined(mingw32_HOST_OS)
690 case BlockedOnDoProc:
692 /* isOnBQ(blocked_queue) */
694 case BlockedOnException:
695 /* isOnSomeBQ(tso) */
696 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
699 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
702 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
706 Could check other values of why_blocked but I am more
707 lazy than paranoid (bad combination) -- HWL
711 /* if the link field is non-nil it most point to one of these
712 three closure types */
713 ASSERT(tso->link == END_TSO_QUEUE ||
714 get_itbl(tso->link)->type == TSO ||
715 get_itbl(tso->link)->type == BLOCKED_FETCH ||
716 get_itbl(tso->link)->type == CONSTR);
719 checkStackChunk(sp, stack_end);
724 checkTSOsSanity(void) {
728 debugBelch("Checking sanity of all runnable TSOs:");
730 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
731 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
732 debugBelch("TSO %p on PE %d ...", tso, i);
739 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
746 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
750 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
751 ASSERT(run_queue_hds[proc]!=NULL);
752 ASSERT(run_queue_tls[proc]!=NULL);
753 /* if either head or tail is NIL then the other one must be NIL, too */
754 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
755 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
756 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
758 prev=tso, tso=tso->link) {
759 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
760 (prev==END_TSO_QUEUE || prev->link==tso));
764 ASSERT(prev==run_queue_tls[proc]);
768 checkThreadQsSanity (rtsBool check_TSO_too)
772 for (p=0; p<RtsFlags.GranFlags.proc; p++)
773 checkThreadQSanity(p, check_TSO_too);
778 Check that all TSOs have been evacuated.
779 Optionally also check the sanity of the TSOs.
782 checkGlobalTSOList (rtsBool checkTSOs)
784 extern StgTSO *all_threads;
786 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
787 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
788 ASSERT(get_itbl(tso)->type == TSO);
794 /* -----------------------------------------------------------------------------
795 Check mutable list sanity.
796 -------------------------------------------------------------------------- */
799 checkMutableList( bdescr *mut_bd, nat gen )
805 for (bd = mut_bd; bd != NULL; bd = bd->link) {
806 for (q = bd->start; q < bd->free; q++) {
807 p = (StgClosure *)*q;
808 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
814 Check the static objects list.
817 checkStaticObjects ( StgClosure* static_objects )
819 StgClosure *p = static_objects;
822 while (p != END_OF_STATIC_LIST) {
825 switch (info->type) {
828 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
830 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
831 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
832 p = *IND_STATIC_LINK((StgClosure *)p);
837 p = *THUNK_STATIC_LINK((StgClosure *)p);
841 p = *FUN_STATIC_LINK((StgClosure *)p);
845 p = *STATIC_LINK(info,(StgClosure *)p);
849 barf("checkStaticObjetcs: strange closure %p (%s)",
856 Check the sanity of a blocking queue starting at bqe with closure being
857 the closure holding the blocking queue.
858 Note that in GUM we can have several different closure types in a
863 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
865 rtsBool end = rtsFalse;
866 StgInfoTable *info = get_itbl(closure);
868 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
871 switch (get_itbl(bqe)->type) {
874 checkClosure((StgClosure *)bqe);
876 end = (bqe==END_BQ_QUEUE);
880 checkClosure((StgClosure *)bqe);
885 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
886 get_itbl(bqe)->type, closure, info_type(closure));
892 checkBQ (StgTSO *bqe, StgClosure *closure)
894 rtsBool end = rtsFalse;
895 StgInfoTable *info = get_itbl(closure);
897 ASSERT(info->type == MVAR);
900 switch (get_itbl(bqe)->type) {
903 checkClosure((StgClosure *)bqe);
905 end = (bqe==END_BQ_QUEUE);
909 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
910 get_itbl(bqe)->type, closure, info_type(closure));
919 This routine checks the sanity of the LAGA and GALA tables. They are
920 implemented as lists through one hash table, LAtoGALAtable, because entries
921 in both tables have the same structure:
922 - the LAGA table maps local addresses to global addresses; it starts
923 with liveIndirections
924 - the GALA table maps global addresses to local addresses; it starts
931 /* hidden in parallel/Global.c; only accessed for testing here */
932 extern GALA *liveIndirections;
933 extern GALA *liveRemoteGAs;
934 extern HashTable *LAtoGALAtable;
937 checkLAGAtable(rtsBool check_closures)
940 nat n=0, m=0; // debugging
942 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
944 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
945 ASSERT(!gala->preferred || gala == gala0);
946 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
947 ASSERT(gala->next!=gala); // detect direct loops
948 if ( check_closures ) {
949 checkClosure((StgClosure *)gala->la);
953 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
955 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
956 ASSERT(!gala->preferred || gala == gala0);
957 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
958 ASSERT(gala->next!=gala); // detect direct loops
960 if ( check_closures ) {
961 checkClosure((StgClosure *)gala->la);