1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
209 StgFunInfoTable *fun_info;
211 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
212 fun_info = get_fun_itbl(fun);
214 p = (StgClosure *)payload;
215 switch (fun_info->f.fun_type) {
217 checkSmallBitmap( (StgPtr)payload,
218 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 checkLargeBitmap( (StgPtr)payload,
222 GET_FUN_LARGE_BITMAP(fun_info),
226 checkLargeBitmap( (StgPtr)payload,
231 checkSmallBitmap( (StgPtr)payload,
232 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 checkClosure( StgClosure* p )
242 const StgInfoTable *info;
244 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
246 /* Is it a static closure (i.e. in the data segment)? */
247 if (!HEAP_ALLOCED(p)) {
248 ASSERT(closure_STATIC(p));
250 ASSERT(!closure_STATIC(p));
254 switch (info->type) {
258 StgMVar *mvar = (StgMVar *)p;
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
260 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
261 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
264 checkBQ((StgBlockingQueueElement *)mvar->head, p);
266 checkBQ(mvar->head, p);
269 return sizeofW(StgMVar);
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
283 return thunk_sizeW_fromITBL(info);
300 case IND_OLDGEN_PERM:
303 case SE_CAF_BLACKHOLE:
311 case CONSTR_NOCAF_STATIC:
316 for (i = 0; i < info->layout.payload.ptrs; i++) {
317 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
319 return sizeW_fromITBL(info);
323 StgBCO *bco = (StgBCO *)p;
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
326 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
327 return bco_sizeW(bco);
330 case IND_STATIC: /* (1, 0) closure */
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
332 return sizeW_fromITBL(info);
335 /* deal with these specially - the info table isn't
336 * representative of the actual layout.
338 { StgWeak *w = (StgWeak *)p;
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
340 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
345 return sizeW_fromITBL(info);
349 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
350 return THUNK_SELECTOR_sizeW();
354 /* we don't expect to see any of these after GC
355 * but they might appear during execution
357 StgInd *ind = (StgInd *)p;
358 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
359 return sizeofW(StgInd);
371 case ATOMICALLY_FRAME:
372 case CATCH_RETRY_FRAME:
373 case CATCH_STM_FRAME:
374 barf("checkClosure: stack frame");
378 StgAP* ap = (StgAP *)p;
379 checkPAP (ap->fun, ap->payload, ap->n_args);
385 StgPAP* pap = (StgPAP *)p;
386 checkPAP (pap->fun, pap->payload, pap->n_args);
387 return pap_sizeW(pap);
392 StgAP_STACK *ap = (StgAP_STACK *)p;
393 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
394 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
395 return ap_stack_sizeW(ap);
399 return arr_words_sizeW((StgArrWords *)p);
401 case MUT_ARR_PTRS_CLEAN:
402 case MUT_ARR_PTRS_DIRTY:
403 case MUT_ARR_PTRS_FROZEN:
404 case MUT_ARR_PTRS_FROZEN0:
406 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
408 for (i = 0; i < a->ptrs; i++) {
409 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
411 return mut_arr_ptrs_sizeW(a);
415 checkTSO((StgTSO *)p);
416 return tso_sizeW((StgTSO *)p);
421 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
423 return sizeofW(StgBlockedFetch); // see size used in evacuate()
427 return sizeofW(StgFetchMe);
431 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
432 return sizeofW(StgFetchMe); // see size used in evacuate()
435 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
436 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
439 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
440 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
441 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
442 checkBQ(((StgRBH *)p)->blocking_queue, p);
443 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
444 return BLACKHOLE_sizeW(); // see size used in evacuate()
445 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
449 case TVAR_WATCH_QUEUE:
451 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
452 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
453 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
454 return sizeofW(StgTVarWatchQueue);
457 case INVARIANT_CHECK_QUEUE:
459 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
460 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
461 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
463 return sizeofW(StgInvariantCheckQueue);
466 case ATOMIC_INVARIANT:
468 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
469 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
470 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
471 return sizeofW(StgAtomicInvariant);
476 StgTVar *tv = (StgTVar *)p;
477 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
478 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
479 return sizeofW(StgTVar);
485 StgTRecChunk *tc = (StgTRecChunk *)p;
486 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
487 for (i = 0; i < tc -> next_entry_idx; i ++) {
488 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
489 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
492 return sizeofW(StgTRecChunk);
497 StgTRecHeader *trec = (StgTRecHeader *)p;
498 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
499 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
500 return sizeofW(StgTRecHeader);
505 barf("checkClosure: found EVACUATED closure %d",
508 barf("checkClosure (closure type %d)", info->type);
514 #define PVM_PE_MASK 0xfffc0000
515 #define MAX_PVM_PES MAX_PES
516 #define MAX_PVM_TIDS MAX_PES
517 #define MAX_SLOTS 100000
520 looks_like_tid(StgInt tid)
522 StgInt hi = (tid & PVM_PE_MASK) >> 18;
523 StgInt lo = (tid & ~PVM_PE_MASK);
524 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
529 looks_like_slot(StgInt slot)
531 /* if tid is known better use looks_like_ga!! */
532 rtsBool ok = slot<MAX_SLOTS;
533 // This refers only to the no. of slots on the current PE
534 // rtsBool ok = slot<=highest_slot();
539 looks_like_ga(globalAddr *ga)
541 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
542 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
543 (ga)->payload.gc.slot<=highest_slot() :
544 (ga)->payload.gc.slot<MAX_SLOTS;
545 rtsBool ok = is_tid && is_slot;
552 /* -----------------------------------------------------------------------------
555 After garbage collection, the live heap is in a state where we can
556 run through and check that all the pointers point to the right
557 place. This function starts at a given position and sanity-checks
558 all the objects in the remainder of the chain.
559 -------------------------------------------------------------------------- */
562 checkHeap(bdescr *bd)
566 #if defined(THREADED_RTS)
567 // heap sanity checking doesn't work with SMP, because we can't
568 // zero the slop (see Updates.h).
572 for (; bd != NULL; bd = bd->link) {
574 while (p < bd->free) {
575 nat size = checkClosure((StgClosure *)p);
576 /* This is the smallest size of closure that can live in the heap */
577 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
581 while (p < bd->free &&
582 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
589 Check heap between start and end. Used after unpacking graphs.
592 checkHeapChunk(StgPtr start, StgPtr end)
594 extern globalAddr *LAGAlookup(StgClosure *addr);
598 for (p=start; p<end; p+=size) {
599 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
600 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
601 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
602 /* if it's a FM created during unpack and commoned up, it's not global */
603 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
604 size = sizeofW(StgFetchMe);
605 } else if (get_itbl((StgClosure*)p)->type == IND) {
606 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
607 size = sizeofW(StgInd);
609 size = checkClosure((StgClosure *)p);
610 /* This is the smallest size of closure that can live in the heap. */
611 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
617 checkHeapChunk(StgPtr start, StgPtr end)
622 for (p=start; p<end; p+=size) {
623 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
624 size = checkClosure((StgClosure *)p);
625 /* This is the smallest size of closure that can live in the heap. */
626 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
632 checkChain(bdescr *bd)
635 checkClosure((StgClosure *)bd->start);
641 checkTSO(StgTSO *tso)
644 StgPtr stack = tso->stack;
645 StgOffset stack_size = tso->stack_size;
646 StgPtr stack_end = stack + stack_size;
648 if (tso->what_next == ThreadRelocated) {
653 if (tso->what_next == ThreadKilled) {
654 /* The garbage collector doesn't bother following any pointers
655 * from dead threads, so don't check sanity here.
660 ASSERT(stack <= sp && sp < stack_end);
663 ASSERT(tso->par.magic==TSO_MAGIC);
665 switch (tso->why_blocked) {
667 checkClosureShallow(tso->block_info.closure);
668 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
669 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
671 case BlockedOnGA_NoSend:
672 checkClosureShallow(tso->block_info.closure);
673 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
675 case BlockedOnBlackHole:
676 checkClosureShallow(tso->block_info.closure);
677 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
678 get_itbl(tso->block_info.closure)->type==RBH);
683 #if defined(mingw32_HOST_OS)
684 case BlockedOnDoProc:
686 /* isOnBQ(blocked_queue) */
688 case BlockedOnException:
689 /* isOnSomeBQ(tso) */
690 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
693 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
696 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
700 Could check other values of why_blocked but I am more
701 lazy than paranoid (bad combination) -- HWL
705 /* if the link field is non-nil it most point to one of these
706 three closure types */
707 ASSERT(tso->link == END_TSO_QUEUE ||
708 get_itbl(tso->link)->type == TSO ||
709 get_itbl(tso->link)->type == BLOCKED_FETCH ||
710 get_itbl(tso->link)->type == CONSTR);
713 checkStackChunk(sp, stack_end);
718 checkTSOsSanity(void) {
722 debugBelch("Checking sanity of all runnable TSOs:");
724 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
725 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
726 debugBelch("TSO %p on PE %d ...", tso, i);
733 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
740 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
744 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
745 ASSERT(run_queue_hds[proc]!=NULL);
746 ASSERT(run_queue_tls[proc]!=NULL);
747 /* if either head or tail is NIL then the other one must be NIL, too */
748 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
749 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
750 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
752 prev=tso, tso=tso->link) {
753 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
754 (prev==END_TSO_QUEUE || prev->link==tso));
758 ASSERT(prev==run_queue_tls[proc]);
762 checkThreadQsSanity (rtsBool check_TSO_too)
766 for (p=0; p<RtsFlags.GranFlags.proc; p++)
767 checkThreadQSanity(p, check_TSO_too);
772 Check that all TSOs have been evacuated.
773 Optionally also check the sanity of the TSOs.
776 checkGlobalTSOList (rtsBool checkTSOs)
778 extern StgTSO *all_threads;
780 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
781 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
782 ASSERT(get_itbl(tso)->type == TSO);
788 /* -----------------------------------------------------------------------------
789 Check mutable list sanity.
790 -------------------------------------------------------------------------- */
793 checkMutableList( bdescr *mut_bd, nat gen )
799 for (bd = mut_bd; bd != NULL; bd = bd->link) {
800 for (q = bd->start; q < bd->free; q++) {
801 p = (StgClosure *)*q;
802 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
808 Check the static objects list.
811 checkStaticObjects ( StgClosure* static_objects )
813 StgClosure *p = static_objects;
816 while (p != END_OF_STATIC_LIST) {
819 switch (info->type) {
822 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
824 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
825 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
826 p = *IND_STATIC_LINK((StgClosure *)p);
831 p = *THUNK_STATIC_LINK((StgClosure *)p);
835 p = *FUN_STATIC_LINK((StgClosure *)p);
839 p = *STATIC_LINK(info,(StgClosure *)p);
843 barf("checkStaticObjetcs: strange closure %p (%s)",
850 Check the sanity of a blocking queue starting at bqe with closure being
851 the closure holding the blocking queue.
852 Note that in GUM we can have several different closure types in a
857 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
859 rtsBool end = rtsFalse;
860 StgInfoTable *info = get_itbl(closure);
862 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
865 switch (get_itbl(bqe)->type) {
868 checkClosure((StgClosure *)bqe);
870 end = (bqe==END_BQ_QUEUE);
874 checkClosure((StgClosure *)bqe);
879 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
880 get_itbl(bqe)->type, closure, info_type(closure));
886 checkBQ (StgTSO *bqe, StgClosure *closure)
888 rtsBool end = rtsFalse;
889 StgInfoTable *info = get_itbl(closure);
891 ASSERT(info->type == MVAR);
894 switch (get_itbl(bqe)->type) {
897 checkClosure((StgClosure *)bqe);
899 end = (bqe==END_BQ_QUEUE);
903 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
904 get_itbl(bqe)->type, closure, info_type(closure));
913 This routine checks the sanity of the LAGA and GALA tables. They are
914 implemented as lists through one hash table, LAtoGALAtable, because entries
915 in both tables have the same structure:
916 - the LAGA table maps local addresses to global addresses; it starts
917 with liveIndirections
918 - the GALA table maps global addresses to local addresses; it starts
925 /* hidden in parallel/Global.c; only accessed for testing here */
926 extern GALA *liveIndirections;
927 extern GALA *liveRemoteGAs;
928 extern HashTable *LAtoGALAtable;
931 checkLAGAtable(rtsBool check_closures)
934 nat n=0, m=0; // debugging
936 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
938 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
939 ASSERT(!gala->preferred || gala == gala0);
940 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
941 ASSERT(gala->next!=gala); // detect direct loops
942 if ( check_closures ) {
943 checkClosure((StgClosure *)gala->la);
947 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
949 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
950 ASSERT(!gala->preferred || gala == gala0);
951 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
952 ASSERT(gala->next!=gala); // detect direct loops
954 if ( check_closures ) {
955 checkClosure((StgClosure *)gala->la);