1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
209 StgFunInfoTable *fun_info;
211 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
212 fun_info = get_fun_itbl(fun);
214 p = (StgClosure *)payload;
215 switch (fun_info->f.fun_type) {
217 checkSmallBitmap( (StgPtr)payload,
218 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 checkLargeBitmap( (StgPtr)payload,
222 GET_FUN_LARGE_BITMAP(fun_info),
226 checkLargeBitmap( (StgPtr)payload,
231 checkSmallBitmap( (StgPtr)payload,
232 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 checkClosure( StgClosure* p )
242 const StgInfoTable *info;
244 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
246 /* Is it a static closure (i.e. in the data segment)? */
247 if (!HEAP_ALLOCED(p)) {
248 ASSERT(closure_STATIC(p));
250 ASSERT(!closure_STATIC(p));
254 switch (info->type) {
258 StgMVar *mvar = (StgMVar *)p;
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
260 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
261 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
264 checkBQ((StgBlockingQueueElement *)mvar->head, p);
266 checkBQ(mvar->head, p);
269 return sizeofW(StgMVar);
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
283 return stg_max(thunk_sizeW_fromITBL(info), sizeofW(StgHeader)+MIN_UPD_SIZE);
300 case IND_OLDGEN_PERM:
303 case SE_CAF_BLACKHOLE:
311 case CONSTR_CHARLIKE:
313 case CONSTR_NOCAF_STATIC:
318 for (i = 0; i < info->layout.payload.ptrs; i++) {
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
321 return sizeW_fromITBL(info);
325 StgBCO *bco = (StgBCO *)p;
326 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
330 return bco_sizeW(bco);
333 case IND_STATIC: /* (1, 0) closure */
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
335 return sizeW_fromITBL(info);
338 /* deal with these specially - the info table isn't
339 * representative of the actual layout.
341 { StgWeak *w = (StgWeak *)p;
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
348 return sizeW_fromITBL(info);
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
353 return THUNK_SELECTOR_sizeW();
357 /* we don't expect to see any of these after GC
358 * but they might appear during execution
361 StgInd *ind = (StgInd *)p;
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
363 q = (P_)p + sizeofW(StgInd);
364 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
377 case ATOMICALLY_FRAME:
378 case CATCH_RETRY_FRAME:
379 case CATCH_STM_FRAME:
380 barf("checkClosure: stack frame");
384 StgAP* ap = (StgAP *)p;
385 checkPAP (ap->fun, ap->payload, ap->n_args);
391 StgPAP* pap = (StgPAP *)p;
392 checkPAP (pap->fun, pap->payload, pap->n_args);
393 return pap_sizeW(pap);
398 StgAP_STACK *ap = (StgAP_STACK *)p;
399 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
400 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
401 return ap_stack_sizeW(ap);
405 return arr_words_sizeW((StgArrWords *)p);
408 case MUT_ARR_PTRS_FROZEN:
409 case MUT_ARR_PTRS_FROZEN0:
411 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
413 for (i = 0; i < a->ptrs; i++) {
414 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
416 return mut_arr_ptrs_sizeW(a);
420 checkTSO((StgTSO *)p);
421 return tso_sizeW((StgTSO *)p);
426 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
428 return sizeofW(StgBlockedFetch); // see size used in evacuate()
432 return sizeofW(StgFetchMe);
436 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
437 return sizeofW(StgFetchMe); // see size used in evacuate()
440 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
441 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
444 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
445 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
446 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
447 checkBQ(((StgRBH *)p)->blocking_queue, p);
448 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
449 return BLACKHOLE_sizeW(); // see size used in evacuate()
450 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
454 case TVAR_WAIT_QUEUE:
456 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
457 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
459 return sizeofW(StgTVarWaitQueue);
464 StgTVar *tv = (StgTVar *)p;
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
466 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
467 return sizeofW(StgTVar);
473 StgTRecChunk *tc = (StgTRecChunk *)p;
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
475 for (i = 0; i < tc -> next_entry_idx; i ++) {
476 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
477 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
478 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
480 return sizeofW(StgTRecChunk);
485 StgTRecHeader *trec = (StgTRecHeader *)p;
486 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
487 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
488 return sizeofW(StgTRecHeader);
493 barf("checkClosure: found EVACUATED closure %d",
496 barf("checkClosure (closure type %d)", info->type);
502 #define PVM_PE_MASK 0xfffc0000
503 #define MAX_PVM_PES MAX_PES
504 #define MAX_PVM_TIDS MAX_PES
505 #define MAX_SLOTS 100000
508 looks_like_tid(StgInt tid)
510 StgInt hi = (tid & PVM_PE_MASK) >> 18;
511 StgInt lo = (tid & ~PVM_PE_MASK);
512 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
517 looks_like_slot(StgInt slot)
519 /* if tid is known better use looks_like_ga!! */
520 rtsBool ok = slot<MAX_SLOTS;
521 // This refers only to the no. of slots on the current PE
522 // rtsBool ok = slot<=highest_slot();
527 looks_like_ga(globalAddr *ga)
529 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
530 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
531 (ga)->payload.gc.slot<=highest_slot() :
532 (ga)->payload.gc.slot<MAX_SLOTS;
533 rtsBool ok = is_tid && is_slot;
540 /* -----------------------------------------------------------------------------
543 After garbage collection, the live heap is in a state where we can
544 run through and check that all the pointers point to the right
545 place. This function starts at a given position and sanity-checks
546 all the objects in the remainder of the chain.
547 -------------------------------------------------------------------------- */
550 checkHeap(bdescr *bd)
554 for (; bd != NULL; bd = bd->link) {
556 while (p < bd->free) {
557 nat size = checkClosure((StgClosure *)p);
558 /* This is the smallest size of closure that can live in the heap */
559 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
563 while (p < bd->free &&
564 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
571 Check heap between start and end. Used after unpacking graphs.
574 checkHeapChunk(StgPtr start, StgPtr end)
576 extern globalAddr *LAGAlookup(StgClosure *addr);
580 for (p=start; p<end; p+=size) {
581 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
582 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
583 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
584 /* if it's a FM created during unpack and commoned up, it's not global */
585 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
586 size = sizeofW(StgFetchMe);
587 } else if (get_itbl((StgClosure*)p)->type == IND) {
588 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
591 size = checkClosure((StgClosure *)p);
592 /* This is the smallest size of closure that can live in the heap. */
593 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
599 checkHeapChunk(StgPtr start, StgPtr end)
604 for (p=start; p<end; p+=size) {
605 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
606 size = checkClosure((StgClosure *)p);
607 /* This is the smallest size of closure that can live in the heap. */
608 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
614 checkChain(bdescr *bd)
617 checkClosure((StgClosure *)bd->start);
623 checkTSO(StgTSO *tso)
626 StgPtr stack = tso->stack;
627 StgOffset stack_size = tso->stack_size;
628 StgPtr stack_end = stack + stack_size;
630 if (tso->what_next == ThreadRelocated) {
635 if (tso->what_next == ThreadKilled) {
636 /* The garbage collector doesn't bother following any pointers
637 * from dead threads, so don't check sanity here.
642 ASSERT(stack <= sp && sp < stack_end);
645 ASSERT(tso->par.magic==TSO_MAGIC);
647 switch (tso->why_blocked) {
649 checkClosureShallow(tso->block_info.closure);
650 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
651 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
653 case BlockedOnGA_NoSend:
654 checkClosureShallow(tso->block_info.closure);
655 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
657 case BlockedOnBlackHole:
658 checkClosureShallow(tso->block_info.closure);
659 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
660 get_itbl(tso->block_info.closure)->type==RBH);
665 #if defined(mingw32_HOST_OS)
666 case BlockedOnDoProc:
668 /* isOnBQ(blocked_queue) */
670 case BlockedOnException:
671 /* isOnSomeBQ(tso) */
672 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
675 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
678 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
682 Could check other values of why_blocked but I am more
683 lazy than paranoid (bad combination) -- HWL
687 /* if the link field is non-nil it most point to one of these
688 three closure types */
689 ASSERT(tso->link == END_TSO_QUEUE ||
690 get_itbl(tso->link)->type == TSO ||
691 get_itbl(tso->link)->type == BLOCKED_FETCH ||
692 get_itbl(tso->link)->type == CONSTR);
695 checkStackChunk(sp, stack_end);
700 checkTSOsSanity(void) {
704 debugBelch("Checking sanity of all runnable TSOs:");
706 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
707 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
708 debugBelch("TSO %p on PE %d ...", tso, i);
715 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
722 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
726 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
727 ASSERT(run_queue_hds[proc]!=NULL);
728 ASSERT(run_queue_tls[proc]!=NULL);
729 /* if either head or tail is NIL then the other one must be NIL, too */
730 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
731 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
732 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
734 prev=tso, tso=tso->link) {
735 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
736 (prev==END_TSO_QUEUE || prev->link==tso));
740 ASSERT(prev==run_queue_tls[proc]);
744 checkThreadQsSanity (rtsBool check_TSO_too)
748 for (p=0; p<RtsFlags.GranFlags.proc; p++)
749 checkThreadQSanity(p, check_TSO_too);
754 Check that all TSOs have been evacuated.
755 Optionally also check the sanity of the TSOs.
758 checkGlobalTSOList (rtsBool checkTSOs)
760 extern StgTSO *all_threads;
762 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
763 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
764 ASSERT(get_itbl(tso)->type == TSO);
770 /* -----------------------------------------------------------------------------
771 Check mutable list sanity.
772 -------------------------------------------------------------------------- */
775 checkMutableList( bdescr *mut_bd, nat gen )
781 for (bd = mut_bd; bd != NULL; bd = bd->link) {
782 for (q = bd->start; q < bd->free; q++) {
783 p = (StgClosure *)*q;
784 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
790 Check the static objects list.
793 checkStaticObjects ( StgClosure* static_objects )
795 StgClosure *p = static_objects;
798 while (p != END_OF_STATIC_LIST) {
801 switch (info->type) {
804 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
806 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
807 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
808 p = *IND_STATIC_LINK((StgClosure *)p);
813 p = *THUNK_STATIC_LINK((StgClosure *)p);
817 p = *FUN_STATIC_LINK((StgClosure *)p);
821 p = *STATIC_LINK(info,(StgClosure *)p);
825 barf("checkStaticObjetcs: strange closure %p (%s)",
832 Check the sanity of a blocking queue starting at bqe with closure being
833 the closure holding the blocking queue.
834 Note that in GUM we can have several different closure types in a
839 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
841 rtsBool end = rtsFalse;
842 StgInfoTable *info = get_itbl(closure);
844 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
847 switch (get_itbl(bqe)->type) {
850 checkClosure((StgClosure *)bqe);
852 end = (bqe==END_BQ_QUEUE);
856 checkClosure((StgClosure *)bqe);
861 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
862 get_itbl(bqe)->type, closure, info_type(closure));
868 checkBQ (StgTSO *bqe, StgClosure *closure)
870 rtsBool end = rtsFalse;
871 StgInfoTable *info = get_itbl(closure);
873 ASSERT(info->type == MVAR);
876 switch (get_itbl(bqe)->type) {
879 checkClosure((StgClosure *)bqe);
881 end = (bqe==END_BQ_QUEUE);
885 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
886 get_itbl(bqe)->type, closure, info_type(closure));
892 checkBQ (StgTSO *bqe, StgClosure *closure)
894 rtsBool end = rtsFalse;
895 StgInfoTable *info = get_itbl(closure);
897 ASSERT(info->type == MVAR);
900 switch (get_itbl(bqe)->type) {
902 checkClosure((StgClosure *)bqe);
904 end = (bqe==END_TSO_QUEUE);
908 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
909 get_itbl(bqe)->type, closure, info->type);
919 This routine checks the sanity of the LAGA and GALA tables. They are
920 implemented as lists through one hash table, LAtoGALAtable, because entries
921 in both tables have the same structure:
922 - the LAGA table maps local addresses to global addresses; it starts
923 with liveIndirections
924 - the GALA table maps global addresses to local addresses; it starts
931 /* hidden in parallel/Global.c; only accessed for testing here */
932 extern GALA *liveIndirections;
933 extern GALA *liveRemoteGAs;
934 extern HashTable *LAtoGALAtable;
937 checkLAGAtable(rtsBool check_closures)
940 nat n=0, m=0; // debugging
942 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
944 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
945 ASSERT(!gala->preferred || gala == gala0);
946 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
947 ASSERT(gala->next!=gala); // detect direct loops
948 if ( check_closures ) {
949 checkClosure((StgClosure *)gala->la);
953 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
955 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
956 ASSERT(!gala->preferred || gala == gala0);
957 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
958 ASSERT(gala->next!=gala); // detect direct loops
960 if ( check_closures ) {
961 checkClosure((StgClosure *)gala->la);