1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkClosure( StgClosure* p )
208 const StgInfoTable *info;
210 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
212 /* Is it a static closure (i.e. in the data segment)? */
213 if (!HEAP_ALLOCED(p)) {
214 ASSERT(closure_STATIC(p));
216 ASSERT(!closure_STATIC(p));
220 switch (info->type) {
224 StgMVar *mvar = (StgMVar *)p;
225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
226 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
227 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
230 checkBQ((StgBlockingQueueElement *)mvar->head, p);
232 checkBQ(mvar->head, p);
235 return sizeofW(StgMVar);
246 for (i = 0; i < info->layout.payload.ptrs; i++) {
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
249 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
253 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
254 /* fall through to basic ptr check */
269 case IND_OLDGEN_PERM:
272 case SE_CAF_BLACKHOLE:
281 case CONSTR_CHARLIKE:
283 case CONSTR_NOCAF_STATIC:
288 for (i = 0; i < info->layout.payload.ptrs; i++) {
289 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
291 return sizeW_fromITBL(info);
295 StgBCO *bco = (StgBCO *)p;
296 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
297 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
298 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
299 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
300 return bco_sizeW(bco);
303 case IND_STATIC: /* (1, 0) closure */
304 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
305 return sizeW_fromITBL(info);
308 /* deal with these specially - the info table isn't
309 * representative of the actual layout.
311 { StgWeak *w = (StgWeak *)p;
312 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
314 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
316 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
318 return sizeW_fromITBL(info);
322 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
323 return sizeofW(StgHeader) + MIN_UPD_SIZE;
327 /* we don't expect to see any of these after GC
328 * but they might appear during execution
331 StgInd *ind = (StgInd *)p;
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
333 q = (P_)p + sizeofW(StgInd);
334 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
347 case ATOMICALLY_FRAME:
348 case CATCH_RETRY_FRAME:
349 case CATCH_STM_FRAME:
350 barf("checkClosure: stack frame");
352 case AP: /* we can treat this as being the same as a PAP */
355 StgFunInfoTable *fun_info;
356 StgPAP* pap = (StgPAP *)p;
358 ASSERT(LOOKS_LIKE_CLOSURE_PTR(pap->fun));
359 fun_info = get_fun_itbl(pap->fun);
361 p = (StgClosure *)pap->payload;
362 switch (fun_info->f.fun_type) {
364 checkSmallBitmap( (StgPtr)pap->payload,
365 BITMAP_BITS(fun_info->f.bitmap), pap->n_args );
368 checkLargeBitmap( (StgPtr)pap->payload,
369 GET_FUN_LARGE_BITMAP(fun_info),
373 checkLargeBitmap( (StgPtr)pap->payload,
374 BCO_BITMAP(pap->fun),
378 checkSmallBitmap( (StgPtr)pap->payload,
379 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
383 return pap_sizeW(pap);
388 StgAP_STACK *ap = (StgAP_STACK *)p;
389 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
390 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
391 return ap_stack_sizeW(ap);
395 return arr_words_sizeW((StgArrWords *)p);
398 case MUT_ARR_PTRS_FROZEN:
400 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
402 for (i = 0; i < a->ptrs; i++) {
403 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
405 return mut_arr_ptrs_sizeW(a);
409 checkTSO((StgTSO *)p);
410 return tso_sizeW((StgTSO *)p);
415 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
416 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
417 return sizeofW(StgBlockedFetch); // see size used in evacuate()
421 return sizeofW(StgFetchMe);
425 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
426 return sizeofW(StgFetchMe); // see size used in evacuate()
429 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
430 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
433 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
434 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
435 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
436 checkBQ(((StgRBH *)p)->blocking_queue, p);
437 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
438 return BLACKHOLE_sizeW(); // see size used in evacuate()
439 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
443 case TVAR_WAIT_QUEUE:
445 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
446 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
448 return sizeofW(StgTVarWaitQueue);
453 StgTVar *tv = (StgTVar *)p;
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
455 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
456 return sizeofW(StgTVar);
462 StgTRecChunk *tc = (StgTRecChunk *)p;
463 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
464 for (i = 0; i < tc -> next_entry_idx; i ++) {
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
466 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
467 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
469 return sizeofW(StgTRecChunk);
474 StgTRecHeader *trec = (StgTRecHeader *)p;
475 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
476 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
477 return sizeofW(StgTRecHeader);
482 barf("checkClosure: found EVACUATED closure %d",
485 barf("checkClosure (closure type %d)", info->type);
491 #define PVM_PE_MASK 0xfffc0000
492 #define MAX_PVM_PES MAX_PES
493 #define MAX_PVM_TIDS MAX_PES
494 #define MAX_SLOTS 100000
497 looks_like_tid(StgInt tid)
499 StgInt hi = (tid & PVM_PE_MASK) >> 18;
500 StgInt lo = (tid & ~PVM_PE_MASK);
501 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
506 looks_like_slot(StgInt slot)
508 /* if tid is known better use looks_like_ga!! */
509 rtsBool ok = slot<MAX_SLOTS;
510 // This refers only to the no. of slots on the current PE
511 // rtsBool ok = slot<=highest_slot();
516 looks_like_ga(globalAddr *ga)
518 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
519 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
520 (ga)->payload.gc.slot<=highest_slot() :
521 (ga)->payload.gc.slot<MAX_SLOTS;
522 rtsBool ok = is_tid && is_slot;
529 /* -----------------------------------------------------------------------------
532 After garbage collection, the live heap is in a state where we can
533 run through and check that all the pointers point to the right
534 place. This function starts at a given position and sanity-checks
535 all the objects in the remainder of the chain.
536 -------------------------------------------------------------------------- */
539 checkHeap(bdescr *bd)
543 for (; bd != NULL; bd = bd->link) {
545 while (p < bd->free) {
546 nat size = checkClosure((StgClosure *)p);
547 /* This is the smallest size of closure that can live in the heap */
548 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
552 while (p < bd->free &&
553 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
560 Check heap between start and end. Used after unpacking graphs.
563 checkHeapChunk(StgPtr start, StgPtr end)
565 extern globalAddr *LAGAlookup(StgClosure *addr);
569 for (p=start; p<end; p+=size) {
570 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
571 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
572 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
573 /* if it's a FM created during unpack and commoned up, it's not global */
574 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
575 size = sizeofW(StgFetchMe);
576 } else if (get_itbl((StgClosure*)p)->type == IND) {
577 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
580 size = checkClosure((StgClosure *)p);
581 /* This is the smallest size of closure that can live in the heap. */
582 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
588 checkHeapChunk(StgPtr start, StgPtr end)
593 for (p=start; p<end; p+=size) {
594 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
595 size = checkClosure((StgClosure *)p);
596 /* This is the smallest size of closure that can live in the heap. */
597 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
603 checkChain(bdescr *bd)
606 checkClosure((StgClosure *)bd->start);
612 checkTSO(StgTSO *tso)
615 StgPtr stack = tso->stack;
616 StgOffset stack_size = tso->stack_size;
617 StgPtr stack_end = stack + stack_size;
619 if (tso->what_next == ThreadRelocated) {
624 if (tso->what_next == ThreadKilled) {
625 /* The garbage collector doesn't bother following any pointers
626 * from dead threads, so don't check sanity here.
631 ASSERT(stack <= sp && sp < stack_end);
634 ASSERT(tso->par.magic==TSO_MAGIC);
636 switch (tso->why_blocked) {
638 checkClosureShallow(tso->block_info.closure);
639 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
640 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
642 case BlockedOnGA_NoSend:
643 checkClosureShallow(tso->block_info.closure);
644 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
646 case BlockedOnBlackHole:
647 checkClosureShallow(tso->block_info.closure);
648 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
649 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
650 get_itbl(tso->block_info.closure)->type==RBH);
655 #if defined(mingw32_TARGET_OS)
656 case BlockedOnDoProc:
658 /* isOnBQ(blocked_queue) */
660 case BlockedOnException:
661 /* isOnSomeBQ(tso) */
662 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
665 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
668 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
672 Could check other values of why_blocked but I am more
673 lazy than paranoid (bad combination) -- HWL
677 /* if the link field is non-nil it most point to one of these
678 three closure types */
679 ASSERT(tso->link == END_TSO_QUEUE ||
680 get_itbl(tso->link)->type == TSO ||
681 get_itbl(tso->link)->type == BLOCKED_FETCH ||
682 get_itbl(tso->link)->type == CONSTR);
685 checkStackChunk(sp, stack_end);
690 checkTSOsSanity(void) {
694 debugBelch("Checking sanity of all runnable TSOs:");
696 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
697 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
698 debugBelch("TSO %p on PE %d ...", tso, i);
705 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
712 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
716 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
717 ASSERT(run_queue_hds[proc]!=NULL);
718 ASSERT(run_queue_tls[proc]!=NULL);
719 /* if either head or tail is NIL then the other one must be NIL, too */
720 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
721 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
722 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
724 prev=tso, tso=tso->link) {
725 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
726 (prev==END_TSO_QUEUE || prev->link==tso));
730 ASSERT(prev==run_queue_tls[proc]);
734 checkThreadQsSanity (rtsBool check_TSO_too)
738 for (p=0; p<RtsFlags.GranFlags.proc; p++)
739 checkThreadQSanity(p, check_TSO_too);
744 Check that all TSOs have been evacuated.
745 Optionally also check the sanity of the TSOs.
748 checkGlobalTSOList (rtsBool checkTSOs)
750 extern StgTSO *all_threads;
752 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
753 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
754 ASSERT(get_itbl(tso)->type == TSO);
760 /* -----------------------------------------------------------------------------
761 Check mutable list sanity.
762 -------------------------------------------------------------------------- */
765 checkMutableList( StgMutClosure *p, nat gen )
769 for (; p != END_MUT_LIST; p = p->mut_link) {
771 ASSERT(closure_MUTABLE(p));
772 ASSERT(bd->gen_no == gen);
773 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
778 checkMutOnceList( StgMutClosure *p, nat gen )
783 for (; p != END_MUT_LIST; p = p->mut_link) {
787 ASSERT(!closure_MUTABLE(p));
788 ASSERT(ip_STATIC(info) || bd->gen_no == gen);
789 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
791 switch (info->type) {
794 case IND_OLDGEN_PERM:
798 barf("checkMutOnceList: strange closure %p (%s)",
799 p, info_type((StgClosure *)p));
805 Check the static objects list.
808 checkStaticObjects ( StgClosure* static_objects )
810 StgClosure *p = static_objects;
813 while (p != END_OF_STATIC_LIST) {
816 switch (info->type) {
819 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
821 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
822 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
823 p = IND_STATIC_LINK((StgClosure *)p);
828 p = THUNK_STATIC_LINK((StgClosure *)p);
832 p = FUN_STATIC_LINK((StgClosure *)p);
836 p = STATIC_LINK(info,(StgClosure *)p);
840 barf("checkStaticObjetcs: strange closure %p (%s)",
847 Check the sanity of a blocking queue starting at bqe with closure being
848 the closure holding the blocking queue.
849 Note that in GUM we can have several different closure types in a
854 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
856 rtsBool end = rtsFalse;
857 StgInfoTable *info = get_itbl(closure);
859 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
860 || info->type == FETCH_ME_BQ || info->type == RBH);
863 switch (get_itbl(bqe)->type) {
866 checkClosure((StgClosure *)bqe);
868 end = (bqe==END_BQ_QUEUE);
872 checkClosure((StgClosure *)bqe);
877 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
878 get_itbl(bqe)->type, closure, info_type(closure));
884 checkBQ (StgTSO *bqe, StgClosure *closure)
886 rtsBool end = rtsFalse;
887 StgInfoTable *info = get_itbl(closure);
889 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
892 switch (get_itbl(bqe)->type) {
895 checkClosure((StgClosure *)bqe);
897 end = (bqe==END_BQ_QUEUE);
901 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
902 get_itbl(bqe)->type, closure, info_type(closure));
908 checkBQ (StgTSO *bqe, StgClosure *closure)
910 rtsBool end = rtsFalse;
911 StgInfoTable *info = get_itbl(closure);
913 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
916 switch (get_itbl(bqe)->type) {
918 checkClosure((StgClosure *)bqe);
920 end = (bqe==END_TSO_QUEUE);
924 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
925 get_itbl(bqe)->type, closure, info->type);
935 This routine checks the sanity of the LAGA and GALA tables. They are
936 implemented as lists through one hash table, LAtoGALAtable, because entries
937 in both tables have the same structure:
938 - the LAGA table maps local addresses to global addresses; it starts
939 with liveIndirections
940 - the GALA table maps global addresses to local addresses; it starts
947 /* hidden in parallel/Global.c; only accessed for testing here */
948 extern GALA *liveIndirections;
949 extern GALA *liveRemoteGAs;
950 extern HashTable *LAtoGALAtable;
953 checkLAGAtable(rtsBool check_closures)
956 nat n=0, m=0; // debugging
958 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
960 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
961 ASSERT(!gala->preferred || gala == gala0);
962 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
963 ASSERT(gala->next!=gala); // detect direct loops
964 if ( check_closures ) {
965 checkClosure((StgClosure *)gala->la);
969 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
971 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
972 ASSERT(!gala->preferred || gala == gala0);
973 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
974 ASSERT(gala->next!=gala); // detect direct loops
976 if ( check_closures ) {
977 checkClosure((StgClosure *)gala->la);