1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
210 StgFunInfoTable *fun_info;
212 fun = UNTAG_CLOSURE(fun);
213 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
214 fun_info = get_fun_itbl(fun);
216 p = (StgClosure *)payload;
217 switch (fun_info->f.fun_type) {
219 checkSmallBitmap( (StgPtr)payload,
220 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
223 checkLargeBitmap( (StgPtr)payload,
224 GET_FUN_LARGE_BITMAP(fun_info),
228 checkLargeBitmap( (StgPtr)payload,
233 checkSmallBitmap( (StgPtr)payload,
234 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
242 checkClosure( StgClosure* p )
244 const StgInfoTable *info;
246 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
248 p = UNTAG_CLOSURE(p);
249 /* Is it a static closure (i.e. in the data segment)? */
250 if (!HEAP_ALLOCED(p)) {
251 ASSERT(closure_STATIC(p));
253 ASSERT(!closure_STATIC(p));
257 switch (info->type) {
262 StgMVar *mvar = (StgMVar *)p;
263 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
264 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
265 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
268 checkBQ((StgBlockingQueueElement *)mvar->head, p);
270 checkBQ(mvar->head, p);
273 return sizeofW(StgMVar);
284 for (i = 0; i < info->layout.payload.ptrs; i++) {
285 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
287 return thunk_sizeW_fromITBL(info);
304 case IND_OLDGEN_PERM:
307 case SE_CAF_BLACKHOLE:
315 case CONSTR_NOCAF_STATIC:
320 for (i = 0; i < info->layout.payload.ptrs; i++) {
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
323 return sizeW_fromITBL(info);
327 StgBCO *bco = (StgBCO *)p;
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
330 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
331 return bco_sizeW(bco);
334 case IND_STATIC: /* (1, 0) closure */
335 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
336 return sizeW_fromITBL(info);
339 /* deal with these specially - the info table isn't
340 * representative of the actual layout.
342 { StgWeak *w = (StgWeak *)p;
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
349 return sizeW_fromITBL(info);
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
354 return THUNK_SELECTOR_sizeW();
358 /* we don't expect to see any of these after GC
359 * but they might appear during execution
361 StgInd *ind = (StgInd *)p;
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
363 return sizeofW(StgInd);
373 case ATOMICALLY_FRAME:
374 case CATCH_RETRY_FRAME:
375 case CATCH_STM_FRAME:
376 barf("checkClosure: stack frame");
380 StgAP* ap = (StgAP *)p;
381 checkPAP (ap->fun, ap->payload, ap->n_args);
387 StgPAP* pap = (StgPAP *)p;
388 checkPAP (pap->fun, pap->payload, pap->n_args);
389 return pap_sizeW(pap);
394 StgAP_STACK *ap = (StgAP_STACK *)p;
395 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
396 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
397 return ap_stack_sizeW(ap);
401 return arr_words_sizeW((StgArrWords *)p);
403 case MUT_ARR_PTRS_CLEAN:
404 case MUT_ARR_PTRS_DIRTY:
405 case MUT_ARR_PTRS_FROZEN:
406 case MUT_ARR_PTRS_FROZEN0:
408 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
410 for (i = 0; i < a->ptrs; i++) {
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
413 return mut_arr_ptrs_sizeW(a);
417 checkTSO((StgTSO *)p);
418 return tso_sizeW((StgTSO *)p);
423 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
424 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
425 return sizeofW(StgBlockedFetch); // see size used in evacuate()
429 return sizeofW(StgFetchMe);
433 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
434 return sizeofW(StgFetchMe); // see size used in evacuate()
437 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
438 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
441 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
442 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
443 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
444 checkBQ(((StgRBH *)p)->blocking_queue, p);
445 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
446 return BLACKHOLE_sizeW(); // see size used in evacuate()
447 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
451 case TVAR_WATCH_QUEUE:
453 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
455 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
456 return sizeofW(StgTVarWatchQueue);
459 case INVARIANT_CHECK_QUEUE:
461 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
463 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
464 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
465 return sizeofW(StgInvariantCheckQueue);
468 case ATOMIC_INVARIANT:
470 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
471 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
472 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
473 return sizeofW(StgAtomicInvariant);
478 StgTVar *tv = (StgTVar *)p;
479 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
480 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
481 return sizeofW(StgTVar);
487 StgTRecChunk *tc = (StgTRecChunk *)p;
488 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
489 for (i = 0; i < tc -> next_entry_idx; i ++) {
490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
491 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
492 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
494 return sizeofW(StgTRecChunk);
499 StgTRecHeader *trec = (StgTRecHeader *)p;
500 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
501 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
502 return sizeofW(StgTRecHeader);
507 barf("checkClosure: found EVACUATED closure %d",
510 barf("checkClosure (closure type %d)", info->type);
516 #define PVM_PE_MASK 0xfffc0000
517 #define MAX_PVM_PES MAX_PES
518 #define MAX_PVM_TIDS MAX_PES
519 #define MAX_SLOTS 100000
522 looks_like_tid(StgInt tid)
524 StgInt hi = (tid & PVM_PE_MASK) >> 18;
525 StgInt lo = (tid & ~PVM_PE_MASK);
526 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
531 looks_like_slot(StgInt slot)
533 /* if tid is known better use looks_like_ga!! */
534 rtsBool ok = slot<MAX_SLOTS;
535 // This refers only to the no. of slots on the current PE
536 // rtsBool ok = slot<=highest_slot();
541 looks_like_ga(globalAddr *ga)
543 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
544 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
545 (ga)->payload.gc.slot<=highest_slot() :
546 (ga)->payload.gc.slot<MAX_SLOTS;
547 rtsBool ok = is_tid && is_slot;
554 /* -----------------------------------------------------------------------------
557 After garbage collection, the live heap is in a state where we can
558 run through and check that all the pointers point to the right
559 place. This function starts at a given position and sanity-checks
560 all the objects in the remainder of the chain.
561 -------------------------------------------------------------------------- */
564 checkHeap(bdescr *bd)
568 #if defined(THREADED_RTS)
569 // heap sanity checking doesn't work with SMP, because we can't
570 // zero the slop (see Updates.h).
574 for (; bd != NULL; bd = bd->link) {
576 while (p < bd->free) {
577 nat size = checkClosure((StgClosure *)p);
578 /* This is the smallest size of closure that can live in the heap */
579 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
583 while (p < bd->free &&
584 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
591 Check heap between start and end. Used after unpacking graphs.
594 checkHeapChunk(StgPtr start, StgPtr end)
596 extern globalAddr *LAGAlookup(StgClosure *addr);
600 for (p=start; p<end; p+=size) {
601 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
602 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
603 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
604 /* if it's a FM created during unpack and commoned up, it's not global */
605 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
606 size = sizeofW(StgFetchMe);
607 } else if (get_itbl((StgClosure*)p)->type == IND) {
608 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
609 size = sizeofW(StgInd);
611 size = checkClosure((StgClosure *)p);
612 /* This is the smallest size of closure that can live in the heap. */
613 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
619 checkHeapChunk(StgPtr start, StgPtr end)
624 for (p=start; p<end; p+=size) {
625 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
626 size = checkClosure((StgClosure *)p);
627 /* This is the smallest size of closure that can live in the heap. */
628 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
634 checkChain(bdescr *bd)
637 checkClosure((StgClosure *)bd->start);
643 checkTSO(StgTSO *tso)
646 StgPtr stack = tso->stack;
647 StgOffset stack_size = tso->stack_size;
648 StgPtr stack_end = stack + stack_size;
650 if (tso->what_next == ThreadRelocated) {
655 if (tso->what_next == ThreadKilled) {
656 /* The garbage collector doesn't bother following any pointers
657 * from dead threads, so don't check sanity here.
662 ASSERT(stack <= sp && sp < stack_end);
665 ASSERT(tso->par.magic==TSO_MAGIC);
667 switch (tso->why_blocked) {
669 checkClosureShallow(tso->block_info.closure);
670 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
671 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
673 case BlockedOnGA_NoSend:
674 checkClosureShallow(tso->block_info.closure);
675 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
677 case BlockedOnBlackHole:
678 checkClosureShallow(tso->block_info.closure);
679 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
680 get_itbl(tso->block_info.closure)->type==RBH);
685 #if defined(mingw32_HOST_OS)
686 case BlockedOnDoProc:
688 /* isOnBQ(blocked_queue) */
690 case BlockedOnException:
691 /* isOnSomeBQ(tso) */
692 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
695 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
698 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
702 Could check other values of why_blocked but I am more
703 lazy than paranoid (bad combination) -- HWL
707 /* if the link field is non-nil it most point to one of these
708 three closure types */
709 ASSERT(tso->link == END_TSO_QUEUE ||
710 get_itbl(tso->link)->type == TSO ||
711 get_itbl(tso->link)->type == BLOCKED_FETCH ||
712 get_itbl(tso->link)->type == CONSTR);
715 checkStackChunk(sp, stack_end);
720 checkTSOsSanity(void) {
724 debugBelch("Checking sanity of all runnable TSOs:");
726 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
727 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
728 debugBelch("TSO %p on PE %d ...", tso, i);
735 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
742 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
746 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
747 ASSERT(run_queue_hds[proc]!=NULL);
748 ASSERT(run_queue_tls[proc]!=NULL);
749 /* if either head or tail is NIL then the other one must be NIL, too */
750 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
751 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
752 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
754 prev=tso, tso=tso->link) {
755 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
756 (prev==END_TSO_QUEUE || prev->link==tso));
760 ASSERT(prev==run_queue_tls[proc]);
764 checkThreadQsSanity (rtsBool check_TSO_too)
768 for (p=0; p<RtsFlags.GranFlags.proc; p++)
769 checkThreadQSanity(p, check_TSO_too);
774 Check that all TSOs have been evacuated.
775 Optionally also check the sanity of the TSOs.
778 checkGlobalTSOList (rtsBool checkTSOs)
780 extern StgTSO *all_threads;
782 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
783 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
784 ASSERT(get_itbl(tso)->type == TSO);
790 /* -----------------------------------------------------------------------------
791 Check mutable list sanity.
792 -------------------------------------------------------------------------- */
795 checkMutableList( bdescr *mut_bd, nat gen )
801 for (bd = mut_bd; bd != NULL; bd = bd->link) {
802 for (q = bd->start; q < bd->free; q++) {
803 p = (StgClosure *)*q;
804 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
810 Check the static objects list.
813 checkStaticObjects ( StgClosure* static_objects )
815 StgClosure *p = static_objects;
818 while (p != END_OF_STATIC_LIST) {
821 switch (info->type) {
824 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
826 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
827 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
828 p = *IND_STATIC_LINK((StgClosure *)p);
833 p = *THUNK_STATIC_LINK((StgClosure *)p);
837 p = *FUN_STATIC_LINK((StgClosure *)p);
841 p = *STATIC_LINK(info,(StgClosure *)p);
845 barf("checkStaticObjetcs: strange closure %p (%s)",
852 Check the sanity of a blocking queue starting at bqe with closure being
853 the closure holding the blocking queue.
854 Note that in GUM we can have several different closure types in a
859 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
861 rtsBool end = rtsFalse;
862 StgInfoTable *info = get_itbl(closure);
864 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
867 switch (get_itbl(bqe)->type) {
870 checkClosure((StgClosure *)bqe);
872 end = (bqe==END_BQ_QUEUE);
876 checkClosure((StgClosure *)bqe);
881 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
882 get_itbl(bqe)->type, closure, info_type(closure));
888 checkBQ (StgTSO *bqe, StgClosure *closure)
890 rtsBool end = rtsFalse;
891 StgInfoTable *info = get_itbl(closure);
893 ASSERT(info->type == MVAR);
896 switch (get_itbl(bqe)->type) {
899 checkClosure((StgClosure *)bqe);
901 end = (bqe==END_BQ_QUEUE);
905 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
906 get_itbl(bqe)->type, closure, info_type(closure));
915 This routine checks the sanity of the LAGA and GALA tables. They are
916 implemented as lists through one hash table, LAtoGALAtable, because entries
917 in both tables have the same structure:
918 - the LAGA table maps local addresses to global addresses; it starts
919 with liveIndirections
920 - the GALA table maps global addresses to local addresses; it starts
927 /* hidden in parallel/Global.c; only accessed for testing here */
928 extern GALA *liveIndirections;
929 extern GALA *liveRemoteGAs;
930 extern HashTable *LAtoGALAtable;
933 checkLAGAtable(rtsBool check_closures)
936 nat n=0, m=0; // debugging
938 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
940 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
941 ASSERT(!gala->preferred || gala == gala0);
942 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
943 ASSERT(gala->next!=gala); // detect direct loops
944 if ( check_closures ) {
945 checkClosure((StgClosure *)gala->la);
949 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
951 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
952 ASSERT(!gala->preferred || gala == gala0);
953 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
954 ASSERT(gala->next!=gala); // detect direct loops
956 if ( check_closures ) {
957 checkClosure((StgClosure *)gala->la);