1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
210 StgFunInfoTable *fun_info;
212 fun = UNTAG_CLOSURE(fun);
213 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
214 fun_info = get_fun_itbl(fun);
216 p = (StgClosure *)payload;
217 switch (fun_info->f.fun_type) {
219 checkSmallBitmap( (StgPtr)payload,
220 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
223 checkLargeBitmap( (StgPtr)payload,
224 GET_FUN_LARGE_BITMAP(fun_info),
228 checkLargeBitmap( (StgPtr)payload,
233 checkSmallBitmap( (StgPtr)payload,
234 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
242 checkClosure( StgClosure* p )
244 const StgInfoTable *info;
246 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
248 p = UNTAG_CLOSURE(p);
249 /* Is it a static closure (i.e. in the data segment)? */
250 if (!HEAP_ALLOCED(p)) {
251 ASSERT(closure_STATIC(p));
253 ASSERT(!closure_STATIC(p));
257 switch (info->type) {
261 StgMVar *mvar = (StgMVar *)p;
262 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
263 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
264 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
267 checkBQ((StgBlockingQueueElement *)mvar->head, p);
269 checkBQ(mvar->head, p);
272 return sizeofW(StgMVar);
283 for (i = 0; i < info->layout.payload.ptrs; i++) {
284 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
286 return thunk_sizeW_fromITBL(info);
303 case IND_OLDGEN_PERM:
306 case SE_CAF_BLACKHOLE:
314 case CONSTR_NOCAF_STATIC:
319 for (i = 0; i < info->layout.payload.ptrs; i++) {
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
322 return sizeW_fromITBL(info);
326 StgBCO *bco = (StgBCO *)p;
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
330 return bco_sizeW(bco);
333 case IND_STATIC: /* (1, 0) closure */
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
335 return sizeW_fromITBL(info);
338 /* deal with these specially - the info table isn't
339 * representative of the actual layout.
341 { StgWeak *w = (StgWeak *)p;
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
348 return sizeW_fromITBL(info);
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
353 return THUNK_SELECTOR_sizeW();
357 /* we don't expect to see any of these after GC
358 * but they might appear during execution
360 StgInd *ind = (StgInd *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
362 return sizeofW(StgInd);
372 case ATOMICALLY_FRAME:
373 case CATCH_RETRY_FRAME:
374 case CATCH_STM_FRAME:
375 barf("checkClosure: stack frame");
379 StgAP* ap = (StgAP *)p;
380 checkPAP (ap->fun, ap->payload, ap->n_args);
386 StgPAP* pap = (StgPAP *)p;
387 checkPAP (pap->fun, pap->payload, pap->n_args);
388 return pap_sizeW(pap);
393 StgAP_STACK *ap = (StgAP_STACK *)p;
394 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
395 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
396 return ap_stack_sizeW(ap);
400 return arr_words_sizeW((StgArrWords *)p);
402 case MUT_ARR_PTRS_CLEAN:
403 case MUT_ARR_PTRS_DIRTY:
404 case MUT_ARR_PTRS_FROZEN:
405 case MUT_ARR_PTRS_FROZEN0:
407 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
409 for (i = 0; i < a->ptrs; i++) {
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
412 return mut_arr_ptrs_sizeW(a);
416 checkTSO((StgTSO *)p);
417 return tso_sizeW((StgTSO *)p);
422 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
423 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
424 return sizeofW(StgBlockedFetch); // see size used in evacuate()
428 return sizeofW(StgFetchMe);
432 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
433 return sizeofW(StgFetchMe); // see size used in evacuate()
436 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
437 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
440 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
441 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
442 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
443 checkBQ(((StgRBH *)p)->blocking_queue, p);
444 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
445 return BLACKHOLE_sizeW(); // see size used in evacuate()
446 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
450 case TVAR_WATCH_QUEUE:
452 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
453 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
455 return sizeofW(StgTVarWatchQueue);
458 case INVARIANT_CHECK_QUEUE:
460 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
461 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
463 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
464 return sizeofW(StgInvariantCheckQueue);
467 case ATOMIC_INVARIANT:
469 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
470 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
471 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
472 return sizeofW(StgAtomicInvariant);
477 StgTVar *tv = (StgTVar *)p;
478 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
479 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
480 return sizeofW(StgTVar);
486 StgTRecChunk *tc = (StgTRecChunk *)p;
487 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
488 for (i = 0; i < tc -> next_entry_idx; i ++) {
489 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
491 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
493 return sizeofW(StgTRecChunk);
498 StgTRecHeader *trec = (StgTRecHeader *)p;
499 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
500 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
501 return sizeofW(StgTRecHeader);
506 barf("checkClosure: found EVACUATED closure %d",
509 barf("checkClosure (closure type %d)", info->type);
515 #define PVM_PE_MASK 0xfffc0000
516 #define MAX_PVM_PES MAX_PES
517 #define MAX_PVM_TIDS MAX_PES
518 #define MAX_SLOTS 100000
521 looks_like_tid(StgInt tid)
523 StgInt hi = (tid & PVM_PE_MASK) >> 18;
524 StgInt lo = (tid & ~PVM_PE_MASK);
525 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
530 looks_like_slot(StgInt slot)
532 /* if tid is known better use looks_like_ga!! */
533 rtsBool ok = slot<MAX_SLOTS;
534 // This refers only to the no. of slots on the current PE
535 // rtsBool ok = slot<=highest_slot();
540 looks_like_ga(globalAddr *ga)
542 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
543 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
544 (ga)->payload.gc.slot<=highest_slot() :
545 (ga)->payload.gc.slot<MAX_SLOTS;
546 rtsBool ok = is_tid && is_slot;
553 /* -----------------------------------------------------------------------------
556 After garbage collection, the live heap is in a state where we can
557 run through and check that all the pointers point to the right
558 place. This function starts at a given position and sanity-checks
559 all the objects in the remainder of the chain.
560 -------------------------------------------------------------------------- */
563 checkHeap(bdescr *bd)
567 #if defined(THREADED_RTS)
568 // heap sanity checking doesn't work with SMP, because we can't
569 // zero the slop (see Updates.h).
573 for (; bd != NULL; bd = bd->link) {
575 while (p < bd->free) {
576 nat size = checkClosure((StgClosure *)p);
577 /* This is the smallest size of closure that can live in the heap */
578 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
582 while (p < bd->free &&
583 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
590 Check heap between start and end. Used after unpacking graphs.
593 checkHeapChunk(StgPtr start, StgPtr end)
595 extern globalAddr *LAGAlookup(StgClosure *addr);
599 for (p=start; p<end; p+=size) {
600 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
601 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
602 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
603 /* if it's a FM created during unpack and commoned up, it's not global */
604 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
605 size = sizeofW(StgFetchMe);
606 } else if (get_itbl((StgClosure*)p)->type == IND) {
607 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
608 size = sizeofW(StgInd);
610 size = checkClosure((StgClosure *)p);
611 /* This is the smallest size of closure that can live in the heap. */
612 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
618 checkHeapChunk(StgPtr start, StgPtr end)
623 for (p=start; p<end; p+=size) {
624 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
625 size = checkClosure((StgClosure *)p);
626 /* This is the smallest size of closure that can live in the heap. */
627 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
633 checkChain(bdescr *bd)
636 checkClosure((StgClosure *)bd->start);
642 checkTSO(StgTSO *tso)
645 StgPtr stack = tso->stack;
646 StgOffset stack_size = tso->stack_size;
647 StgPtr stack_end = stack + stack_size;
649 if (tso->what_next == ThreadRelocated) {
654 if (tso->what_next == ThreadKilled) {
655 /* The garbage collector doesn't bother following any pointers
656 * from dead threads, so don't check sanity here.
661 ASSERT(stack <= sp && sp < stack_end);
664 ASSERT(tso->par.magic==TSO_MAGIC);
666 switch (tso->why_blocked) {
668 checkClosureShallow(tso->block_info.closure);
669 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
670 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
672 case BlockedOnGA_NoSend:
673 checkClosureShallow(tso->block_info.closure);
674 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
676 case BlockedOnBlackHole:
677 checkClosureShallow(tso->block_info.closure);
678 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
679 get_itbl(tso->block_info.closure)->type==RBH);
684 #if defined(mingw32_HOST_OS)
685 case BlockedOnDoProc:
687 /* isOnBQ(blocked_queue) */
689 case BlockedOnException:
690 /* isOnSomeBQ(tso) */
691 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
694 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
697 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
701 Could check other values of why_blocked but I am more
702 lazy than paranoid (bad combination) -- HWL
706 /* if the link field is non-nil it most point to one of these
707 three closure types */
708 ASSERT(tso->link == END_TSO_QUEUE ||
709 get_itbl(tso->link)->type == TSO ||
710 get_itbl(tso->link)->type == BLOCKED_FETCH ||
711 get_itbl(tso->link)->type == CONSTR);
714 checkStackChunk(sp, stack_end);
719 checkTSOsSanity(void) {
723 debugBelch("Checking sanity of all runnable TSOs:");
725 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
726 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
727 debugBelch("TSO %p on PE %d ...", tso, i);
734 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
741 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
745 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
746 ASSERT(run_queue_hds[proc]!=NULL);
747 ASSERT(run_queue_tls[proc]!=NULL);
748 /* if either head or tail is NIL then the other one must be NIL, too */
749 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
750 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
751 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
753 prev=tso, tso=tso->link) {
754 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
755 (prev==END_TSO_QUEUE || prev->link==tso));
759 ASSERT(prev==run_queue_tls[proc]);
763 checkThreadQsSanity (rtsBool check_TSO_too)
767 for (p=0; p<RtsFlags.GranFlags.proc; p++)
768 checkThreadQSanity(p, check_TSO_too);
773 Check that all TSOs have been evacuated.
774 Optionally also check the sanity of the TSOs.
777 checkGlobalTSOList (rtsBool checkTSOs)
779 extern StgTSO *all_threads;
781 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
782 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
783 ASSERT(get_itbl(tso)->type == TSO);
789 /* -----------------------------------------------------------------------------
790 Check mutable list sanity.
791 -------------------------------------------------------------------------- */
794 checkMutableList( bdescr *mut_bd, nat gen )
800 for (bd = mut_bd; bd != NULL; bd = bd->link) {
801 for (q = bd->start; q < bd->free; q++) {
802 p = (StgClosure *)*q;
803 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
809 Check the static objects list.
812 checkStaticObjects ( StgClosure* static_objects )
814 StgClosure *p = static_objects;
817 while (p != END_OF_STATIC_LIST) {
820 switch (info->type) {
823 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
825 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
826 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
827 p = *IND_STATIC_LINK((StgClosure *)p);
832 p = *THUNK_STATIC_LINK((StgClosure *)p);
836 p = *FUN_STATIC_LINK((StgClosure *)p);
840 p = *STATIC_LINK(info,(StgClosure *)p);
844 barf("checkStaticObjetcs: strange closure %p (%s)",
851 Check the sanity of a blocking queue starting at bqe with closure being
852 the closure holding the blocking queue.
853 Note that in GUM we can have several different closure types in a
858 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
860 rtsBool end = rtsFalse;
861 StgInfoTable *info = get_itbl(closure);
863 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
866 switch (get_itbl(bqe)->type) {
869 checkClosure((StgClosure *)bqe);
871 end = (bqe==END_BQ_QUEUE);
875 checkClosure((StgClosure *)bqe);
880 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
881 get_itbl(bqe)->type, closure, info_type(closure));
887 checkBQ (StgTSO *bqe, StgClosure *closure)
889 rtsBool end = rtsFalse;
890 StgInfoTable *info = get_itbl(closure);
892 ASSERT(info->type == MVAR);
895 switch (get_itbl(bqe)->type) {
898 checkClosure((StgClosure *)bqe);
900 end = (bqe==END_BQ_QUEUE);
904 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
905 get_itbl(bqe)->type, closure, info_type(closure));
914 This routine checks the sanity of the LAGA and GALA tables. They are
915 implemented as lists through one hash table, LAtoGALAtable, because entries
916 in both tables have the same structure:
917 - the LAGA table maps local addresses to global addresses; it starts
918 with liveIndirections
919 - the GALA table maps global addresses to local addresses; it starts
926 /* hidden in parallel/Global.c; only accessed for testing here */
927 extern GALA *liveIndirections;
928 extern GALA *liveRemoteGAs;
929 extern HashTable *LAtoGALAtable;
932 checkLAGAtable(rtsBool check_closures)
935 nat n=0, m=0; // debugging
937 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
939 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
940 ASSERT(!gala->preferred || gala == gala0);
941 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
942 ASSERT(gala->next!=gala); // detect direct loops
943 if ( check_closures ) {
944 checkClosure((StgClosure *)gala->la);
948 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
950 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
951 ASSERT(!gala->preferred || gala == gala0);
952 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
953 ASSERT(gala->next!=gala); // detect direct loops
955 if ( check_closures ) {
956 checkClosure((StgClosure *)gala->la);