1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
140 size = BITMAP_SIZE(info->i.layout.bitmap);
141 checkSmallBitmap((StgPtr)c + 1,
142 BITMAP_BITS(info->i.layout.bitmap), size);
148 bco = (StgBCO *)*(c+1);
149 size = BCO_BITMAP_SIZE(bco);
150 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 case RET_BIG: // large bitmap (> 32 entries)
155 size = GET_LARGE_BITMAP(&info->i)->size;
156 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
161 StgFunInfoTable *fun_info;
164 ret_fun = (StgRetFun *)c;
165 fun_info = get_fun_itbl(ret_fun->fun);
166 size = ret_fun->size;
167 switch (fun_info->f.fun_type) {
169 checkSmallBitmap((StgPtr)ret_fun->payload,
170 BITMAP_BITS(fun_info->f.b.bitmap), size);
173 checkLargeBitmap((StgPtr)ret_fun->payload,
174 GET_FUN_LARGE_BITMAP(fun_info), size);
177 checkSmallBitmap((StgPtr)ret_fun->payload,
178 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
182 return sizeofW(StgRetFun) + size;
186 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
190 // check sections of stack between update frames
192 checkStackChunk( StgPtr sp, StgPtr stack_end )
197 while (p < stack_end) {
198 p += checkStackFrame( p );
200 // ASSERT( p == stack_end ); -- HWL
204 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
207 StgFunInfoTable *fun_info;
209 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
210 fun_info = get_fun_itbl(fun);
212 p = (StgClosure *)payload;
213 switch (fun_info->f.fun_type) {
215 checkSmallBitmap( (StgPtr)payload,
216 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
219 checkLargeBitmap( (StgPtr)payload,
220 GET_FUN_LARGE_BITMAP(fun_info),
224 checkLargeBitmap( (StgPtr)payload,
229 checkSmallBitmap( (StgPtr)payload,
230 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
238 checkClosure( StgClosure* p )
240 const StgInfoTable *info;
242 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
244 /* Is it a static closure (i.e. in the data segment)? */
245 if (!HEAP_ALLOCED(p)) {
246 ASSERT(closure_STATIC(p));
248 ASSERT(!closure_STATIC(p));
252 switch (info->type) {
256 StgMVar *mvar = (StgMVar *)p;
257 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
258 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
262 checkBQ((StgBlockingQueueElement *)mvar->head, p);
264 checkBQ(mvar->head, p);
267 return sizeofW(StgMVar);
278 for (i = 0; i < info->layout.payload.ptrs; i++) {
279 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
281 return thunk_sizeW_fromITBL(info);
298 case IND_OLDGEN_PERM:
301 case SE_CAF_BLACKHOLE:
309 case CONSTR_NOCAF_STATIC:
314 for (i = 0; i < info->layout.payload.ptrs; i++) {
315 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
317 return sizeW_fromITBL(info);
321 StgBCO *bco = (StgBCO *)p;
322 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
323 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
325 return bco_sizeW(bco);
328 case IND_STATIC: /* (1, 0) closure */
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
330 return sizeW_fromITBL(info);
333 /* deal with these specially - the info table isn't
334 * representative of the actual layout.
336 { StgWeak *w = (StgWeak *)p;
337 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
343 return sizeW_fromITBL(info);
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
348 return THUNK_SELECTOR_sizeW();
352 /* we don't expect to see any of these after GC
353 * but they might appear during execution
355 StgInd *ind = (StgInd *)p;
356 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
357 return sizeofW(StgInd);
367 case ATOMICALLY_FRAME:
368 case CATCH_RETRY_FRAME:
369 case CATCH_STM_FRAME:
370 barf("checkClosure: stack frame");
374 StgAP* ap = (StgAP *)p;
375 checkPAP (ap->fun, ap->payload, ap->n_args);
381 StgPAP* pap = (StgPAP *)p;
382 checkPAP (pap->fun, pap->payload, pap->n_args);
383 return pap_sizeW(pap);
388 StgAP_STACK *ap = (StgAP_STACK *)p;
389 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
390 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
391 return ap_stack_sizeW(ap);
395 return arr_words_sizeW((StgArrWords *)p);
397 case MUT_ARR_PTRS_CLEAN:
398 case MUT_ARR_PTRS_DIRTY:
399 case MUT_ARR_PTRS_FROZEN:
400 case MUT_ARR_PTRS_FROZEN0:
402 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
404 for (i = 0; i < a->ptrs; i++) {
405 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
407 return mut_arr_ptrs_sizeW(a);
411 checkTSO((StgTSO *)p);
412 return tso_sizeW((StgTSO *)p);
417 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
418 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
419 return sizeofW(StgBlockedFetch); // see size used in evacuate()
423 return sizeofW(StgFetchMe);
427 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
428 return sizeofW(StgFetchMe); // see size used in evacuate()
431 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
432 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
435 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
436 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
437 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
438 checkBQ(((StgRBH *)p)->blocking_queue, p);
439 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
440 return BLACKHOLE_sizeW(); // see size used in evacuate()
441 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
445 case TVAR_WATCH_QUEUE:
447 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
449 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
450 return sizeofW(StgTVarWatchQueue);
453 case INVARIANT_CHECK_QUEUE:
455 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
456 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
457 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
459 return sizeofW(StgInvariantCheckQueue);
462 case ATOMIC_INVARIANT:
464 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
466 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
467 return sizeofW(StgAtomicInvariant);
472 StgTVar *tv = (StgTVar *)p;
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
475 return sizeofW(StgTVar);
481 StgTRecChunk *tc = (StgTRecChunk *)p;
482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
483 for (i = 0; i < tc -> next_entry_idx; i ++) {
484 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
485 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
486 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
488 return sizeofW(StgTRecChunk);
493 StgTRecHeader *trec = (StgTRecHeader *)p;
494 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
495 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
496 return sizeofW(StgTRecHeader);
501 barf("checkClosure: found EVACUATED closure %d",
504 barf("checkClosure (closure type %d)", info->type);
510 #define PVM_PE_MASK 0xfffc0000
511 #define MAX_PVM_PES MAX_PES
512 #define MAX_PVM_TIDS MAX_PES
513 #define MAX_SLOTS 100000
516 looks_like_tid(StgInt tid)
518 StgInt hi = (tid & PVM_PE_MASK) >> 18;
519 StgInt lo = (tid & ~PVM_PE_MASK);
520 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
525 looks_like_slot(StgInt slot)
527 /* if tid is known better use looks_like_ga!! */
528 rtsBool ok = slot<MAX_SLOTS;
529 // This refers only to the no. of slots on the current PE
530 // rtsBool ok = slot<=highest_slot();
535 looks_like_ga(globalAddr *ga)
537 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
538 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
539 (ga)->payload.gc.slot<=highest_slot() :
540 (ga)->payload.gc.slot<MAX_SLOTS;
541 rtsBool ok = is_tid && is_slot;
548 /* -----------------------------------------------------------------------------
551 After garbage collection, the live heap is in a state where we can
552 run through and check that all the pointers point to the right
553 place. This function starts at a given position and sanity-checks
554 all the objects in the remainder of the chain.
555 -------------------------------------------------------------------------- */
558 checkHeap(bdescr *bd)
562 #if defined(THREADED_RTS)
563 // heap sanity checking doesn't work with SMP, because we can't
564 // zero the slop (see Updates.h).
568 for (; bd != NULL; bd = bd->link) {
570 while (p < bd->free) {
571 nat size = checkClosure((StgClosure *)p);
572 /* This is the smallest size of closure that can live in the heap */
573 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
577 while (p < bd->free &&
578 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
585 Check heap between start and end. Used after unpacking graphs.
588 checkHeapChunk(StgPtr start, StgPtr end)
590 extern globalAddr *LAGAlookup(StgClosure *addr);
594 for (p=start; p<end; p+=size) {
595 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
596 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
597 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
598 /* if it's a FM created during unpack and commoned up, it's not global */
599 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
600 size = sizeofW(StgFetchMe);
601 } else if (get_itbl((StgClosure*)p)->type == IND) {
602 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
603 size = sizeofW(StgInd);
605 size = checkClosure((StgClosure *)p);
606 /* This is the smallest size of closure that can live in the heap. */
607 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
613 checkHeapChunk(StgPtr start, StgPtr end)
618 for (p=start; p<end; p+=size) {
619 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
620 size = checkClosure((StgClosure *)p);
621 /* This is the smallest size of closure that can live in the heap. */
622 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
628 checkChain(bdescr *bd)
631 checkClosure((StgClosure *)bd->start);
637 checkTSO(StgTSO *tso)
640 StgPtr stack = tso->stack;
641 StgOffset stack_size = tso->stack_size;
642 StgPtr stack_end = stack + stack_size;
644 if (tso->what_next == ThreadRelocated) {
649 if (tso->what_next == ThreadKilled) {
650 /* The garbage collector doesn't bother following any pointers
651 * from dead threads, so don't check sanity here.
656 ASSERT(stack <= sp && sp < stack_end);
659 ASSERT(tso->par.magic==TSO_MAGIC);
661 switch (tso->why_blocked) {
663 checkClosureShallow(tso->block_info.closure);
664 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
665 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
667 case BlockedOnGA_NoSend:
668 checkClosureShallow(tso->block_info.closure);
669 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
671 case BlockedOnBlackHole:
672 checkClosureShallow(tso->block_info.closure);
673 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
674 get_itbl(tso->block_info.closure)->type==RBH);
679 #if defined(mingw32_HOST_OS)
680 case BlockedOnDoProc:
682 /* isOnBQ(blocked_queue) */
684 case BlockedOnException:
685 /* isOnSomeBQ(tso) */
686 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
689 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
692 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
696 Could check other values of why_blocked but I am more
697 lazy than paranoid (bad combination) -- HWL
701 /* if the link field is non-nil it most point to one of these
702 three closure types */
703 ASSERT(tso->link == END_TSO_QUEUE ||
704 get_itbl(tso->link)->type == TSO ||
705 get_itbl(tso->link)->type == BLOCKED_FETCH ||
706 get_itbl(tso->link)->type == CONSTR);
709 checkStackChunk(sp, stack_end);
714 checkTSOsSanity(void) {
718 debugBelch("Checking sanity of all runnable TSOs:");
720 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
721 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
722 debugBelch("TSO %p on PE %d ...", tso, i);
729 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
736 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
740 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
741 ASSERT(run_queue_hds[proc]!=NULL);
742 ASSERT(run_queue_tls[proc]!=NULL);
743 /* if either head or tail is NIL then the other one must be NIL, too */
744 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
745 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
746 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
748 prev=tso, tso=tso->link) {
749 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
750 (prev==END_TSO_QUEUE || prev->link==tso));
754 ASSERT(prev==run_queue_tls[proc]);
758 checkThreadQsSanity (rtsBool check_TSO_too)
762 for (p=0; p<RtsFlags.GranFlags.proc; p++)
763 checkThreadQSanity(p, check_TSO_too);
768 Check that all TSOs have been evacuated.
769 Optionally also check the sanity of the TSOs.
772 checkGlobalTSOList (rtsBool checkTSOs)
774 extern StgTSO *all_threads;
776 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
777 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
778 ASSERT(get_itbl(tso)->type == TSO);
784 /* -----------------------------------------------------------------------------
785 Check mutable list sanity.
786 -------------------------------------------------------------------------- */
789 checkMutableList( bdescr *mut_bd, nat gen )
795 for (bd = mut_bd; bd != NULL; bd = bd->link) {
796 for (q = bd->start; q < bd->free; q++) {
797 p = (StgClosure *)*q;
798 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
804 Check the static objects list.
807 checkStaticObjects ( StgClosure* static_objects )
809 StgClosure *p = static_objects;
812 while (p != END_OF_STATIC_LIST) {
815 switch (info->type) {
818 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
820 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
821 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
822 p = *IND_STATIC_LINK((StgClosure *)p);
827 p = *THUNK_STATIC_LINK((StgClosure *)p);
831 p = *FUN_STATIC_LINK((StgClosure *)p);
835 p = *STATIC_LINK(info,(StgClosure *)p);
839 barf("checkStaticObjetcs: strange closure %p (%s)",
846 Check the sanity of a blocking queue starting at bqe with closure being
847 the closure holding the blocking queue.
848 Note that in GUM we can have several different closure types in a
853 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
855 rtsBool end = rtsFalse;
856 StgInfoTable *info = get_itbl(closure);
858 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
861 switch (get_itbl(bqe)->type) {
864 checkClosure((StgClosure *)bqe);
866 end = (bqe==END_BQ_QUEUE);
870 checkClosure((StgClosure *)bqe);
875 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
876 get_itbl(bqe)->type, closure, info_type(closure));
882 checkBQ (StgTSO *bqe, StgClosure *closure)
884 rtsBool end = rtsFalse;
885 StgInfoTable *info = get_itbl(closure);
887 ASSERT(info->type == MVAR);
890 switch (get_itbl(bqe)->type) {
893 checkClosure((StgClosure *)bqe);
895 end = (bqe==END_BQ_QUEUE);
899 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
900 get_itbl(bqe)->type, closure, info_type(closure));
909 This routine checks the sanity of the LAGA and GALA tables. They are
910 implemented as lists through one hash table, LAtoGALAtable, because entries
911 in both tables have the same structure:
912 - the LAGA table maps local addresses to global addresses; it starts
913 with liveIndirections
914 - the GALA table maps global addresses to local addresses; it starts
921 /* hidden in parallel/Global.c; only accessed for testing here */
922 extern GALA *liveIndirections;
923 extern GALA *liveRemoteGAs;
924 extern HashTable *LAtoGALAtable;
927 checkLAGAtable(rtsBool check_closures)
930 nat n=0, m=0; // debugging
932 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
934 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
935 ASSERT(!gala->preferred || gala == gala0);
936 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
937 ASSERT(gala->next!=gala); // detect direct loops
938 if ( check_closures ) {
939 checkClosure((StgClosure *)gala->la);
943 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
945 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
946 ASSERT(!gala->preferred || gala == gala0);
947 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
948 ASSERT(gala->next!=gala); // detect direct loops
950 if ( check_closures ) {
951 checkClosure((StgClosure *)gala->la);