1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
209 StgFunInfoTable *fun_info;
211 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
212 fun_info = get_fun_itbl(fun);
214 p = (StgClosure *)payload;
215 switch (fun_info->f.fun_type) {
217 checkSmallBitmap( (StgPtr)payload,
218 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 checkLargeBitmap( (StgPtr)payload,
222 GET_FUN_LARGE_BITMAP(fun_info),
226 checkLargeBitmap( (StgPtr)payload,
231 checkSmallBitmap( (StgPtr)payload,
232 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 checkClosure( StgClosure* p )
242 const StgInfoTable *info;
244 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
246 /* Is it a static closure (i.e. in the data segment)? */
247 if (!HEAP_ALLOCED(p)) {
248 ASSERT(closure_STATIC(p));
250 ASSERT(!closure_STATIC(p));
254 switch (info->type) {
258 StgMVar *mvar = (StgMVar *)p;
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
260 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
261 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
264 checkBQ((StgBlockingQueueElement *)mvar->head, p);
266 checkBQ(mvar->head, p);
269 return sizeofW(StgMVar);
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
283 return stg_max(thunk_sizeW_fromITBL(info), sizeofW(StgHeader)+MIN_UPD_SIZE);
300 case IND_OLDGEN_PERM:
303 case SE_CAF_BLACKHOLE:
310 case CONSTR_CHARLIKE:
312 case CONSTR_NOCAF_STATIC:
317 for (i = 0; i < info->layout.payload.ptrs; i++) {
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
320 return sizeW_fromITBL(info);
324 StgBCO *bco = (StgBCO *)p;
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
326 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
329 return bco_sizeW(bco);
332 case IND_STATIC: /* (1, 0) closure */
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
334 return sizeW_fromITBL(info);
337 /* deal with these specially - the info table isn't
338 * representative of the actual layout.
340 { StgWeak *w = (StgWeak *)p;
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
347 return sizeW_fromITBL(info);
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
352 return THUNK_SELECTOR_sizeW();
356 /* we don't expect to see any of these after GC
357 * but they might appear during execution
360 StgInd *ind = (StgInd *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
362 q = (P_)p + sizeofW(StgInd);
363 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
376 case ATOMICALLY_FRAME:
377 case CATCH_RETRY_FRAME:
378 case CATCH_STM_FRAME:
379 barf("checkClosure: stack frame");
383 StgAP* ap = (StgAP *)p;
384 checkPAP (ap->fun, ap->payload, ap->n_args);
390 StgPAP* pap = (StgPAP *)p;
391 checkPAP (pap->fun, pap->payload, pap->n_args);
392 return pap_sizeW(pap);
397 StgAP_STACK *ap = (StgAP_STACK *)p;
398 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
399 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
400 return ap_stack_sizeW(ap);
404 return arr_words_sizeW((StgArrWords *)p);
407 case MUT_ARR_PTRS_FROZEN:
408 case MUT_ARR_PTRS_FROZEN0:
410 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
412 for (i = 0; i < a->ptrs; i++) {
413 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
415 return mut_arr_ptrs_sizeW(a);
419 checkTSO((StgTSO *)p);
420 return tso_sizeW((StgTSO *)p);
425 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
426 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
427 return sizeofW(StgBlockedFetch); // see size used in evacuate()
431 return sizeofW(StgFetchMe);
435 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
436 return sizeofW(StgFetchMe); // see size used in evacuate()
439 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
440 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
443 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
444 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
445 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
446 checkBQ(((StgRBH *)p)->blocking_queue, p);
447 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
448 return BLACKHOLE_sizeW(); // see size used in evacuate()
449 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
453 case TVAR_WAIT_QUEUE:
455 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
456 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
457 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
458 return sizeofW(StgTVarWaitQueue);
463 StgTVar *tv = (StgTVar *)p;
464 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
466 return sizeofW(StgTVar);
472 StgTRecChunk *tc = (StgTRecChunk *)p;
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
474 for (i = 0; i < tc -> next_entry_idx; i ++) {
475 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
476 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
477 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
479 return sizeofW(StgTRecChunk);
484 StgTRecHeader *trec = (StgTRecHeader *)p;
485 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
486 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
487 return sizeofW(StgTRecHeader);
492 barf("checkClosure: found EVACUATED closure %d",
495 barf("checkClosure (closure type %d)", info->type);
501 #define PVM_PE_MASK 0xfffc0000
502 #define MAX_PVM_PES MAX_PES
503 #define MAX_PVM_TIDS MAX_PES
504 #define MAX_SLOTS 100000
507 looks_like_tid(StgInt tid)
509 StgInt hi = (tid & PVM_PE_MASK) >> 18;
510 StgInt lo = (tid & ~PVM_PE_MASK);
511 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
516 looks_like_slot(StgInt slot)
518 /* if tid is known better use looks_like_ga!! */
519 rtsBool ok = slot<MAX_SLOTS;
520 // This refers only to the no. of slots on the current PE
521 // rtsBool ok = slot<=highest_slot();
526 looks_like_ga(globalAddr *ga)
528 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
529 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
530 (ga)->payload.gc.slot<=highest_slot() :
531 (ga)->payload.gc.slot<MAX_SLOTS;
532 rtsBool ok = is_tid && is_slot;
539 /* -----------------------------------------------------------------------------
542 After garbage collection, the live heap is in a state where we can
543 run through and check that all the pointers point to the right
544 place. This function starts at a given position and sanity-checks
545 all the objects in the remainder of the chain.
546 -------------------------------------------------------------------------- */
549 checkHeap(bdescr *bd)
553 for (; bd != NULL; bd = bd->link) {
555 while (p < bd->free) {
556 nat size = checkClosure((StgClosure *)p);
557 /* This is the smallest size of closure that can live in the heap */
558 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
562 while (p < bd->free &&
563 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
570 Check heap between start and end. Used after unpacking graphs.
573 checkHeapChunk(StgPtr start, StgPtr end)
575 extern globalAddr *LAGAlookup(StgClosure *addr);
579 for (p=start; p<end; p+=size) {
580 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
581 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
582 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
583 /* if it's a FM created during unpack and commoned up, it's not global */
584 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
585 size = sizeofW(StgFetchMe);
586 } else if (get_itbl((StgClosure*)p)->type == IND) {
587 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
590 size = checkClosure((StgClosure *)p);
591 /* This is the smallest size of closure that can live in the heap. */
592 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
598 checkHeapChunk(StgPtr start, StgPtr end)
603 for (p=start; p<end; p+=size) {
604 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
605 size = checkClosure((StgClosure *)p);
606 /* This is the smallest size of closure that can live in the heap. */
607 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
613 checkChain(bdescr *bd)
616 checkClosure((StgClosure *)bd->start);
622 checkTSO(StgTSO *tso)
625 StgPtr stack = tso->stack;
626 StgOffset stack_size = tso->stack_size;
627 StgPtr stack_end = stack + stack_size;
629 if (tso->what_next == ThreadRelocated) {
634 if (tso->what_next == ThreadKilled) {
635 /* The garbage collector doesn't bother following any pointers
636 * from dead threads, so don't check sanity here.
641 ASSERT(stack <= sp && sp < stack_end);
644 ASSERT(tso->par.magic==TSO_MAGIC);
646 switch (tso->why_blocked) {
648 checkClosureShallow(tso->block_info.closure);
649 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
650 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
652 case BlockedOnGA_NoSend:
653 checkClosureShallow(tso->block_info.closure);
654 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
656 case BlockedOnBlackHole:
657 checkClosureShallow(tso->block_info.closure);
658 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
659 get_itbl(tso->block_info.closure)->type==RBH);
664 #if defined(mingw32_HOST_OS)
665 case BlockedOnDoProc:
667 /* isOnBQ(blocked_queue) */
669 case BlockedOnException:
670 /* isOnSomeBQ(tso) */
671 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
674 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
677 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
681 Could check other values of why_blocked but I am more
682 lazy than paranoid (bad combination) -- HWL
686 /* if the link field is non-nil it most point to one of these
687 three closure types */
688 ASSERT(tso->link == END_TSO_QUEUE ||
689 get_itbl(tso->link)->type == TSO ||
690 get_itbl(tso->link)->type == BLOCKED_FETCH ||
691 get_itbl(tso->link)->type == CONSTR);
694 checkStackChunk(sp, stack_end);
699 checkTSOsSanity(void) {
703 debugBelch("Checking sanity of all runnable TSOs:");
705 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
706 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
707 debugBelch("TSO %p on PE %d ...", tso, i);
714 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
721 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
725 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
726 ASSERT(run_queue_hds[proc]!=NULL);
727 ASSERT(run_queue_tls[proc]!=NULL);
728 /* if either head or tail is NIL then the other one must be NIL, too */
729 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
730 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
731 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
733 prev=tso, tso=tso->link) {
734 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
735 (prev==END_TSO_QUEUE || prev->link==tso));
739 ASSERT(prev==run_queue_tls[proc]);
743 checkThreadQsSanity (rtsBool check_TSO_too)
747 for (p=0; p<RtsFlags.GranFlags.proc; p++)
748 checkThreadQSanity(p, check_TSO_too);
753 Check that all TSOs have been evacuated.
754 Optionally also check the sanity of the TSOs.
757 checkGlobalTSOList (rtsBool checkTSOs)
759 extern StgTSO *all_threads;
761 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
762 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
763 ASSERT(get_itbl(tso)->type == TSO);
769 /* -----------------------------------------------------------------------------
770 Check mutable list sanity.
771 -------------------------------------------------------------------------- */
774 checkMutableList( bdescr *mut_bd, nat gen )
780 for (bd = mut_bd; bd != NULL; bd = bd->link) {
781 for (q = bd->start; q < bd->free; q++) {
782 p = (StgClosure *)*q;
783 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
789 Check the static objects list.
792 checkStaticObjects ( StgClosure* static_objects )
794 StgClosure *p = static_objects;
797 while (p != END_OF_STATIC_LIST) {
800 switch (info->type) {
803 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
805 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
806 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
807 p = *IND_STATIC_LINK((StgClosure *)p);
812 p = *THUNK_STATIC_LINK((StgClosure *)p);
816 p = *FUN_STATIC_LINK((StgClosure *)p);
820 p = *STATIC_LINK(info,(StgClosure *)p);
824 barf("checkStaticObjetcs: strange closure %p (%s)",
831 Check the sanity of a blocking queue starting at bqe with closure being
832 the closure holding the blocking queue.
833 Note that in GUM we can have several different closure types in a
838 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
840 rtsBool end = rtsFalse;
841 StgInfoTable *info = get_itbl(closure);
843 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
846 switch (get_itbl(bqe)->type) {
849 checkClosure((StgClosure *)bqe);
851 end = (bqe==END_BQ_QUEUE);
855 checkClosure((StgClosure *)bqe);
860 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
861 get_itbl(bqe)->type, closure, info_type(closure));
867 checkBQ (StgTSO *bqe, StgClosure *closure)
869 rtsBool end = rtsFalse;
870 StgInfoTable *info = get_itbl(closure);
872 ASSERT(info->type == MVAR);
875 switch (get_itbl(bqe)->type) {
878 checkClosure((StgClosure *)bqe);
880 end = (bqe==END_BQ_QUEUE);
884 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
885 get_itbl(bqe)->type, closure, info_type(closure));
891 checkBQ (StgTSO *bqe, StgClosure *closure)
893 rtsBool end = rtsFalse;
894 StgInfoTable *info = get_itbl(closure);
896 ASSERT(info->type == MVAR);
899 switch (get_itbl(bqe)->type) {
901 checkClosure((StgClosure *)bqe);
903 end = (bqe==END_TSO_QUEUE);
907 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
908 get_itbl(bqe)->type, closure, info->type);
918 This routine checks the sanity of the LAGA and GALA tables. They are
919 implemented as lists through one hash table, LAtoGALAtable, because entries
920 in both tables have the same structure:
921 - the LAGA table maps local addresses to global addresses; it starts
922 with liveIndirections
923 - the GALA table maps global addresses to local addresses; it starts
930 /* hidden in parallel/Global.c; only accessed for testing here */
931 extern GALA *liveIndirections;
932 extern GALA *liveRemoteGAs;
933 extern HashTable *LAtoGALAtable;
936 checkLAGAtable(rtsBool check_closures)
939 nat n=0, m=0; // debugging
941 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
943 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
944 ASSERT(!gala->preferred || gala == gala0);
945 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
946 ASSERT(gala->next!=gala); // detect direct loops
947 if ( check_closures ) {
948 checkClosure((StgClosure *)gala->la);
952 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
954 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
955 ASSERT(!gala->preferred || gala == gala0);
956 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
957 ASSERT(gala->next!=gala); // detect direct loops
959 if ( check_closures ) {
960 checkClosure((StgClosure *)gala->la);