1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
209 StgFunInfoTable *fun_info;
211 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
212 fun_info = get_fun_itbl(fun);
214 p = (StgClosure *)payload;
215 switch (fun_info->f.fun_type) {
217 checkSmallBitmap( (StgPtr)payload,
218 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 checkLargeBitmap( (StgPtr)payload,
222 GET_FUN_LARGE_BITMAP(fun_info),
226 checkLargeBitmap( (StgPtr)payload,
231 checkSmallBitmap( (StgPtr)payload,
232 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 checkClosure( StgClosure* p )
242 const StgInfoTable *info;
244 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
246 /* Is it a static closure (i.e. in the data segment)? */
247 if (!HEAP_ALLOCED(p)) {
248 ASSERT(closure_STATIC(p));
250 ASSERT(!closure_STATIC(p));
254 switch (info->type) {
258 StgMVar *mvar = (StgMVar *)p;
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
260 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
261 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
264 checkBQ((StgBlockingQueueElement *)mvar->head, p);
266 checkBQ(mvar->head, p);
269 return sizeofW(StgMVar);
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
283 return stg_max(thunk_sizeW_fromITBL(info), sizeofW(StgHeader)+MIN_UPD_SIZE);
300 case IND_OLDGEN_PERM:
303 case SE_CAF_BLACKHOLE:
310 case CONSTR_CHARLIKE:
312 case CONSTR_NOCAF_STATIC:
317 for (i = 0; i < info->layout.payload.ptrs; i++) {
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
320 return sizeW_fromITBL(info);
324 StgBCO *bco = (StgBCO *)p;
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
326 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
329 return bco_sizeW(bco);
332 case IND_STATIC: /* (1, 0) closure */
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
334 return sizeW_fromITBL(info);
337 /* deal with these specially - the info table isn't
338 * representative of the actual layout.
340 { StgWeak *w = (StgWeak *)p;
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
347 return sizeW_fromITBL(info);
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
352 return THUNK_SELECTOR_sizeW();
356 /* we don't expect to see any of these after GC
357 * but they might appear during execution
359 StgInd *ind = (StgInd *)p;
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
361 return sizeofW(StgHeader) + MIN_UPD_SIZE;
373 case ATOMICALLY_FRAME:
374 case CATCH_RETRY_FRAME:
375 case CATCH_STM_FRAME:
376 barf("checkClosure: stack frame");
380 StgAP* ap = (StgAP *)p;
381 checkPAP (ap->fun, ap->payload, ap->n_args);
387 StgPAP* pap = (StgPAP *)p;
388 checkPAP (pap->fun, pap->payload, pap->n_args);
389 return pap_sizeW(pap);
394 StgAP_STACK *ap = (StgAP_STACK *)p;
395 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
396 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
397 return ap_stack_sizeW(ap);
401 return arr_words_sizeW((StgArrWords *)p);
403 case MUT_ARR_PTRS_CLEAN:
404 case MUT_ARR_PTRS_DIRTY:
405 case MUT_ARR_PTRS_FROZEN:
406 case MUT_ARR_PTRS_FROZEN0:
408 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
410 for (i = 0; i < a->ptrs; i++) {
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
413 return mut_arr_ptrs_sizeW(a);
417 checkTSO((StgTSO *)p);
418 return tso_sizeW((StgTSO *)p);
423 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
424 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
425 return sizeofW(StgBlockedFetch); // see size used in evacuate()
429 return sizeofW(StgFetchMe);
433 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
434 return sizeofW(StgFetchMe); // see size used in evacuate()
437 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
438 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
441 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
442 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
443 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
444 checkBQ(((StgRBH *)p)->blocking_queue, p);
445 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
446 return BLACKHOLE_sizeW(); // see size used in evacuate()
447 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
451 case TVAR_WAIT_QUEUE:
453 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
455 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
456 return sizeofW(StgTVarWaitQueue);
461 StgTVar *tv = (StgTVar *)p;
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
463 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
464 return sizeofW(StgTVar);
470 StgTRecChunk *tc = (StgTRecChunk *)p;
471 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
472 for (i = 0; i < tc -> next_entry_idx; i ++) {
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
475 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
477 return sizeofW(StgTRecChunk);
482 StgTRecHeader *trec = (StgTRecHeader *)p;
483 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
484 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
485 return sizeofW(StgTRecHeader);
490 barf("checkClosure: found EVACUATED closure %d",
493 barf("checkClosure (closure type %d)", info->type);
499 #define PVM_PE_MASK 0xfffc0000
500 #define MAX_PVM_PES MAX_PES
501 #define MAX_PVM_TIDS MAX_PES
502 #define MAX_SLOTS 100000
505 looks_like_tid(StgInt tid)
507 StgInt hi = (tid & PVM_PE_MASK) >> 18;
508 StgInt lo = (tid & ~PVM_PE_MASK);
509 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
514 looks_like_slot(StgInt slot)
516 /* if tid is known better use looks_like_ga!! */
517 rtsBool ok = slot<MAX_SLOTS;
518 // This refers only to the no. of slots on the current PE
519 // rtsBool ok = slot<=highest_slot();
524 looks_like_ga(globalAddr *ga)
526 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
527 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
528 (ga)->payload.gc.slot<=highest_slot() :
529 (ga)->payload.gc.slot<MAX_SLOTS;
530 rtsBool ok = is_tid && is_slot;
537 /* -----------------------------------------------------------------------------
540 After garbage collection, the live heap is in a state where we can
541 run through and check that all the pointers point to the right
542 place. This function starts at a given position and sanity-checks
543 all the objects in the remainder of the chain.
544 -------------------------------------------------------------------------- */
547 checkHeap(bdescr *bd)
552 // heap sanity checking doesn't work with SMP, because we can't
553 // zero the slop (see Updates.h).
557 for (; bd != NULL; bd = bd->link) {
559 while (p < bd->free) {
560 nat size = checkClosure((StgClosure *)p);
561 /* This is the smallest size of closure that can live in the heap */
562 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
566 while (p < bd->free &&
567 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
574 Check heap between start and end. Used after unpacking graphs.
577 checkHeapChunk(StgPtr start, StgPtr end)
579 extern globalAddr *LAGAlookup(StgClosure *addr);
583 for (p=start; p<end; p+=size) {
584 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
585 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
586 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
587 /* if it's a FM created during unpack and commoned up, it's not global */
588 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
589 size = sizeofW(StgFetchMe);
590 } else if (get_itbl((StgClosure*)p)->type == IND) {
591 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
594 size = checkClosure((StgClosure *)p);
595 /* This is the smallest size of closure that can live in the heap. */
596 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
602 checkHeapChunk(StgPtr start, StgPtr end)
607 for (p=start; p<end; p+=size) {
608 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
609 size = checkClosure((StgClosure *)p);
610 /* This is the smallest size of closure that can live in the heap. */
611 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
617 checkChain(bdescr *bd)
620 checkClosure((StgClosure *)bd->start);
626 checkTSO(StgTSO *tso)
629 StgPtr stack = tso->stack;
630 StgOffset stack_size = tso->stack_size;
631 StgPtr stack_end = stack + stack_size;
633 if (tso->what_next == ThreadRelocated) {
638 if (tso->what_next == ThreadKilled) {
639 /* The garbage collector doesn't bother following any pointers
640 * from dead threads, so don't check sanity here.
645 ASSERT(stack <= sp && sp < stack_end);
648 ASSERT(tso->par.magic==TSO_MAGIC);
650 switch (tso->why_blocked) {
652 checkClosureShallow(tso->block_info.closure);
653 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
654 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
656 case BlockedOnGA_NoSend:
657 checkClosureShallow(tso->block_info.closure);
658 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
660 case BlockedOnBlackHole:
661 checkClosureShallow(tso->block_info.closure);
662 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
663 get_itbl(tso->block_info.closure)->type==RBH);
668 #if defined(mingw32_HOST_OS)
669 case BlockedOnDoProc:
671 /* isOnBQ(blocked_queue) */
673 case BlockedOnException:
674 /* isOnSomeBQ(tso) */
675 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
678 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
681 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
685 Could check other values of why_blocked but I am more
686 lazy than paranoid (bad combination) -- HWL
690 /* if the link field is non-nil it most point to one of these
691 three closure types */
692 ASSERT(tso->link == END_TSO_QUEUE ||
693 get_itbl(tso->link)->type == TSO ||
694 get_itbl(tso->link)->type == BLOCKED_FETCH ||
695 get_itbl(tso->link)->type == CONSTR);
698 checkStackChunk(sp, stack_end);
703 checkTSOsSanity(void) {
707 debugBelch("Checking sanity of all runnable TSOs:");
709 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
710 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
711 debugBelch("TSO %p on PE %d ...", tso, i);
718 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
725 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
729 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
730 ASSERT(run_queue_hds[proc]!=NULL);
731 ASSERT(run_queue_tls[proc]!=NULL);
732 /* if either head or tail is NIL then the other one must be NIL, too */
733 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
734 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
735 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
737 prev=tso, tso=tso->link) {
738 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
739 (prev==END_TSO_QUEUE || prev->link==tso));
743 ASSERT(prev==run_queue_tls[proc]);
747 checkThreadQsSanity (rtsBool check_TSO_too)
751 for (p=0; p<RtsFlags.GranFlags.proc; p++)
752 checkThreadQSanity(p, check_TSO_too);
757 Check that all TSOs have been evacuated.
758 Optionally also check the sanity of the TSOs.
761 checkGlobalTSOList (rtsBool checkTSOs)
763 extern StgTSO *all_threads;
765 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
766 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
767 ASSERT(get_itbl(tso)->type == TSO);
773 /* -----------------------------------------------------------------------------
774 Check mutable list sanity.
775 -------------------------------------------------------------------------- */
778 checkMutableList( bdescr *mut_bd, nat gen )
784 for (bd = mut_bd; bd != NULL; bd = bd->link) {
785 for (q = bd->start; q < bd->free; q++) {
786 p = (StgClosure *)*q;
787 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
793 Check the static objects list.
796 checkStaticObjects ( StgClosure* static_objects )
798 StgClosure *p = static_objects;
801 while (p != END_OF_STATIC_LIST) {
804 switch (info->type) {
807 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
809 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
810 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
811 p = *IND_STATIC_LINK((StgClosure *)p);
816 p = *THUNK_STATIC_LINK((StgClosure *)p);
820 p = *FUN_STATIC_LINK((StgClosure *)p);
824 p = *STATIC_LINK(info,(StgClosure *)p);
828 barf("checkStaticObjetcs: strange closure %p (%s)",
835 Check the sanity of a blocking queue starting at bqe with closure being
836 the closure holding the blocking queue.
837 Note that in GUM we can have several different closure types in a
842 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
844 rtsBool end = rtsFalse;
845 StgInfoTable *info = get_itbl(closure);
847 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
850 switch (get_itbl(bqe)->type) {
853 checkClosure((StgClosure *)bqe);
855 end = (bqe==END_BQ_QUEUE);
859 checkClosure((StgClosure *)bqe);
864 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
865 get_itbl(bqe)->type, closure, info_type(closure));
871 checkBQ (StgTSO *bqe, StgClosure *closure)
873 rtsBool end = rtsFalse;
874 StgInfoTable *info = get_itbl(closure);
876 ASSERT(info->type == MVAR);
879 switch (get_itbl(bqe)->type) {
882 checkClosure((StgClosure *)bqe);
884 end = (bqe==END_BQ_QUEUE);
888 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
889 get_itbl(bqe)->type, closure, info_type(closure));
898 This routine checks the sanity of the LAGA and GALA tables. They are
899 implemented as lists through one hash table, LAtoGALAtable, because entries
900 in both tables have the same structure:
901 - the LAGA table maps local addresses to global addresses; it starts
902 with liveIndirections
903 - the GALA table maps global addresses to local addresses; it starts
910 /* hidden in parallel/Global.c; only accessed for testing here */
911 extern GALA *liveIndirections;
912 extern GALA *liveRemoteGAs;
913 extern HashTable *LAtoGALAtable;
916 checkLAGAtable(rtsBool check_closures)
919 nat n=0, m=0; // debugging
921 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
923 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
924 ASSERT(!gala->preferred || gala == gala0);
925 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
926 ASSERT(gala->next!=gala); // detect direct loops
927 if ( check_closures ) {
928 checkClosure((StgClosure *)gala->la);
932 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
934 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
935 ASSERT(!gala->preferred || gala == gala0);
936 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
937 ASSERT(gala->next!=gala); // detect direct loops
939 if ( check_closures ) {
940 checkClosure((StgClosure *)gala->la);