1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
209 StgFunInfoTable *fun_info;
211 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
212 fun_info = get_fun_itbl(fun);
214 p = (StgClosure *)payload;
215 switch (fun_info->f.fun_type) {
217 checkSmallBitmap( (StgPtr)payload,
218 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 checkLargeBitmap( (StgPtr)payload,
222 GET_FUN_LARGE_BITMAP(fun_info),
226 checkLargeBitmap( (StgPtr)payload,
231 checkSmallBitmap( (StgPtr)payload,
232 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 checkClosure( StgClosure* p )
242 const StgInfoTable *info;
244 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
246 /* Is it a static closure (i.e. in the data segment)? */
247 if (!HEAP_ALLOCED(p)) {
248 ASSERT(closure_STATIC(p));
250 ASSERT(!closure_STATIC(p));
254 switch (info->type) {
258 StgMVar *mvar = (StgMVar *)p;
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
260 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
261 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
264 checkBQ((StgBlockingQueueElement *)mvar->head, p);
266 checkBQ(mvar->head, p);
269 return sizeofW(StgMVar);
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
283 return thunk_sizeW_fromITBL(info);
300 case IND_OLDGEN_PERM:
303 case SE_CAF_BLACKHOLE:
311 case CONSTR_NOCAF_STATIC:
316 for (i = 0; i < info->layout.payload.ptrs; i++) {
317 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
319 return sizeW_fromITBL(info);
323 StgBCO *bco = (StgBCO *)p;
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
326 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
328 return bco_sizeW(bco);
331 case IND_STATIC: /* (1, 0) closure */
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
333 return sizeW_fromITBL(info);
336 /* deal with these specially - the info table isn't
337 * representative of the actual layout.
339 { StgWeak *w = (StgWeak *)p;
340 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
346 return sizeW_fromITBL(info);
350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
351 return THUNK_SELECTOR_sizeW();
355 /* we don't expect to see any of these after GC
356 * but they might appear during execution
358 StgInd *ind = (StgInd *)p;
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
360 return sizeofW(StgInd);
372 case ATOMICALLY_FRAME:
373 case CATCH_RETRY_FRAME:
374 case CATCH_STM_FRAME:
375 barf("checkClosure: stack frame");
379 StgAP* ap = (StgAP *)p;
380 checkPAP (ap->fun, ap->payload, ap->n_args);
386 StgPAP* pap = (StgPAP *)p;
387 checkPAP (pap->fun, pap->payload, pap->n_args);
388 return pap_sizeW(pap);
393 StgAP_STACK *ap = (StgAP_STACK *)p;
394 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
395 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
396 return ap_stack_sizeW(ap);
400 return arr_words_sizeW((StgArrWords *)p);
402 case MUT_ARR_PTRS_CLEAN:
403 case MUT_ARR_PTRS_DIRTY:
404 case MUT_ARR_PTRS_FROZEN:
405 case MUT_ARR_PTRS_FROZEN0:
407 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
409 for (i = 0; i < a->ptrs; i++) {
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
412 return mut_arr_ptrs_sizeW(a);
416 checkTSO((StgTSO *)p);
417 return tso_sizeW((StgTSO *)p);
422 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
423 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
424 return sizeofW(StgBlockedFetch); // see size used in evacuate()
428 return sizeofW(StgFetchMe);
432 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
433 return sizeofW(StgFetchMe); // see size used in evacuate()
436 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
437 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
440 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
441 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
442 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
443 checkBQ(((StgRBH *)p)->blocking_queue, p);
444 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
445 return BLACKHOLE_sizeW(); // see size used in evacuate()
446 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
450 case TVAR_WAIT_QUEUE:
452 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
453 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
455 return sizeofW(StgTVarWaitQueue);
460 StgTVar *tv = (StgTVar *)p;
461 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
463 return sizeofW(StgTVar);
469 StgTRecChunk *tc = (StgTRecChunk *)p;
470 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
471 for (i = 0; i < tc -> next_entry_idx; i ++) {
472 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
476 return sizeofW(StgTRecChunk);
481 StgTRecHeader *trec = (StgTRecHeader *)p;
482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
483 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
484 return sizeofW(StgTRecHeader);
489 barf("checkClosure: found EVACUATED closure %d",
492 barf("checkClosure (closure type %d)", info->type);
498 #define PVM_PE_MASK 0xfffc0000
499 #define MAX_PVM_PES MAX_PES
500 #define MAX_PVM_TIDS MAX_PES
501 #define MAX_SLOTS 100000
504 looks_like_tid(StgInt tid)
506 StgInt hi = (tid & PVM_PE_MASK) >> 18;
507 StgInt lo = (tid & ~PVM_PE_MASK);
508 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
513 looks_like_slot(StgInt slot)
515 /* if tid is known better use looks_like_ga!! */
516 rtsBool ok = slot<MAX_SLOTS;
517 // This refers only to the no. of slots on the current PE
518 // rtsBool ok = slot<=highest_slot();
523 looks_like_ga(globalAddr *ga)
525 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
526 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
527 (ga)->payload.gc.slot<=highest_slot() :
528 (ga)->payload.gc.slot<MAX_SLOTS;
529 rtsBool ok = is_tid && is_slot;
536 /* -----------------------------------------------------------------------------
539 After garbage collection, the live heap is in a state where we can
540 run through and check that all the pointers point to the right
541 place. This function starts at a given position and sanity-checks
542 all the objects in the remainder of the chain.
543 -------------------------------------------------------------------------- */
546 checkHeap(bdescr *bd)
550 #if defined(THREADED_RTS)
551 // heap sanity checking doesn't work with SMP, because we can't
552 // zero the slop (see Updates.h).
556 for (; bd != NULL; bd = bd->link) {
558 while (p < bd->free) {
559 nat size = checkClosure((StgClosure *)p);
560 /* This is the smallest size of closure that can live in the heap */
561 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
565 while (p < bd->free &&
566 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
573 Check heap between start and end. Used after unpacking graphs.
576 checkHeapChunk(StgPtr start, StgPtr end)
578 extern globalAddr *LAGAlookup(StgClosure *addr);
582 for (p=start; p<end; p+=size) {
583 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
584 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
585 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
586 /* if it's a FM created during unpack and commoned up, it's not global */
587 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
588 size = sizeofW(StgFetchMe);
589 } else if (get_itbl((StgClosure*)p)->type == IND) {
590 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
591 size = sizeofW(StgInd);
593 size = checkClosure((StgClosure *)p);
594 /* This is the smallest size of closure that can live in the heap. */
595 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
601 checkHeapChunk(StgPtr start, StgPtr end)
606 for (p=start; p<end; p+=size) {
607 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
608 size = checkClosure((StgClosure *)p);
609 /* This is the smallest size of closure that can live in the heap. */
610 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
616 checkChain(bdescr *bd)
619 checkClosure((StgClosure *)bd->start);
625 checkTSO(StgTSO *tso)
628 StgPtr stack = tso->stack;
629 StgOffset stack_size = tso->stack_size;
630 StgPtr stack_end = stack + stack_size;
632 if (tso->what_next == ThreadRelocated) {
637 if (tso->what_next == ThreadKilled) {
638 /* The garbage collector doesn't bother following any pointers
639 * from dead threads, so don't check sanity here.
644 ASSERT(stack <= sp && sp < stack_end);
647 ASSERT(tso->par.magic==TSO_MAGIC);
649 switch (tso->why_blocked) {
651 checkClosureShallow(tso->block_info.closure);
652 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
653 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
655 case BlockedOnGA_NoSend:
656 checkClosureShallow(tso->block_info.closure);
657 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
659 case BlockedOnBlackHole:
660 checkClosureShallow(tso->block_info.closure);
661 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
662 get_itbl(tso->block_info.closure)->type==RBH);
667 #if defined(mingw32_HOST_OS)
668 case BlockedOnDoProc:
670 /* isOnBQ(blocked_queue) */
672 case BlockedOnException:
673 /* isOnSomeBQ(tso) */
674 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
677 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
680 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
684 Could check other values of why_blocked but I am more
685 lazy than paranoid (bad combination) -- HWL
689 /* if the link field is non-nil it most point to one of these
690 three closure types */
691 ASSERT(tso->link == END_TSO_QUEUE ||
692 get_itbl(tso->link)->type == TSO ||
693 get_itbl(tso->link)->type == BLOCKED_FETCH ||
694 get_itbl(tso->link)->type == CONSTR);
697 checkStackChunk(sp, stack_end);
702 checkTSOsSanity(void) {
706 debugBelch("Checking sanity of all runnable TSOs:");
708 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
709 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
710 debugBelch("TSO %p on PE %d ...", tso, i);
717 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
724 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
728 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
729 ASSERT(run_queue_hds[proc]!=NULL);
730 ASSERT(run_queue_tls[proc]!=NULL);
731 /* if either head or tail is NIL then the other one must be NIL, too */
732 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
733 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
734 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
736 prev=tso, tso=tso->link) {
737 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
738 (prev==END_TSO_QUEUE || prev->link==tso));
742 ASSERT(prev==run_queue_tls[proc]);
746 checkThreadQsSanity (rtsBool check_TSO_too)
750 for (p=0; p<RtsFlags.GranFlags.proc; p++)
751 checkThreadQSanity(p, check_TSO_too);
756 Check that all TSOs have been evacuated.
757 Optionally also check the sanity of the TSOs.
760 checkGlobalTSOList (rtsBool checkTSOs)
762 extern StgTSO *all_threads;
764 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
765 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
766 ASSERT(get_itbl(tso)->type == TSO);
772 /* -----------------------------------------------------------------------------
773 Check mutable list sanity.
774 -------------------------------------------------------------------------- */
777 checkMutableList( bdescr *mut_bd, nat gen )
783 for (bd = mut_bd; bd != NULL; bd = bd->link) {
784 for (q = bd->start; q < bd->free; q++) {
785 p = (StgClosure *)*q;
786 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
792 Check the static objects list.
795 checkStaticObjects ( StgClosure* static_objects )
797 StgClosure *p = static_objects;
800 while (p != END_OF_STATIC_LIST) {
803 switch (info->type) {
806 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
808 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
809 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
810 p = *IND_STATIC_LINK((StgClosure *)p);
815 p = *THUNK_STATIC_LINK((StgClosure *)p);
819 p = *FUN_STATIC_LINK((StgClosure *)p);
823 p = *STATIC_LINK(info,(StgClosure *)p);
827 barf("checkStaticObjetcs: strange closure %p (%s)",
834 Check the sanity of a blocking queue starting at bqe with closure being
835 the closure holding the blocking queue.
836 Note that in GUM we can have several different closure types in a
841 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
843 rtsBool end = rtsFalse;
844 StgInfoTable *info = get_itbl(closure);
846 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
849 switch (get_itbl(bqe)->type) {
852 checkClosure((StgClosure *)bqe);
854 end = (bqe==END_BQ_QUEUE);
858 checkClosure((StgClosure *)bqe);
863 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
864 get_itbl(bqe)->type, closure, info_type(closure));
870 checkBQ (StgTSO *bqe, StgClosure *closure)
872 rtsBool end = rtsFalse;
873 StgInfoTable *info = get_itbl(closure);
875 ASSERT(info->type == MVAR);
878 switch (get_itbl(bqe)->type) {
881 checkClosure((StgClosure *)bqe);
883 end = (bqe==END_BQ_QUEUE);
887 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
888 get_itbl(bqe)->type, closure, info_type(closure));
897 This routine checks the sanity of the LAGA and GALA tables. They are
898 implemented as lists through one hash table, LAtoGALAtable, because entries
899 in both tables have the same structure:
900 - the LAGA table maps local addresses to global addresses; it starts
901 with liveIndirections
902 - the GALA table maps global addresses to local addresses; it starts
909 /* hidden in parallel/Global.c; only accessed for testing here */
910 extern GALA *liveIndirections;
911 extern GALA *liveRemoteGAs;
912 extern HashTable *LAtoGALAtable;
915 checkLAGAtable(rtsBool check_closures)
918 nat n=0, m=0; // debugging
920 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
922 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
923 ASSERT(!gala->preferred || gala == gala0);
924 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
925 ASSERT(gala->next!=gala); // detect direct loops
926 if ( check_closures ) {
927 checkClosure((StgClosure *)gala->la);
931 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
933 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
934 ASSERT(!gala->preferred || gala == gala0);
935 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
936 ASSERT(gala->next!=gala); // detect direct loops
938 if ( check_closures ) {
939 checkClosure((StgClosure *)gala->la);