1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkClosure( StgClosure* p )
208 const StgInfoTable *info;
210 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
212 /* Is it a static closure (i.e. in the data segment)? */
213 if (!HEAP_ALLOCED(p)) {
214 ASSERT(closure_STATIC(p));
216 ASSERT(!closure_STATIC(p));
220 switch (info->type) {
224 StgMVar *mvar = (StgMVar *)p;
225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
226 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
227 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
230 checkBQ((StgBlockingQueueElement *)mvar->head, p);
232 checkBQ(mvar->head, p);
235 return sizeofW(StgMVar);
246 for (i = 0; i < info->layout.payload.ptrs; i++) {
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
249 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
266 case IND_OLDGEN_PERM:
269 case SE_CAF_BLACKHOLE:
277 case CONSTR_CHARLIKE:
279 case CONSTR_NOCAF_STATIC:
284 for (i = 0; i < info->layout.payload.ptrs; i++) {
285 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
287 return sizeW_fromITBL(info);
291 StgBCO *bco = (StgBCO *)p;
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
293 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
294 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
296 return bco_sizeW(bco);
299 case IND_STATIC: /* (1, 0) closure */
300 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
301 return sizeW_fromITBL(info);
304 /* deal with these specially - the info table isn't
305 * representative of the actual layout.
307 { StgWeak *w = (StgWeak *)p;
308 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
309 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
310 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
312 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
314 return sizeW_fromITBL(info);
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
319 return THUNK_SELECTOR_sizeW();
323 /* we don't expect to see any of these after GC
324 * but they might appear during execution
327 StgInd *ind = (StgInd *)p;
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
329 q = (P_)p + sizeofW(StgInd);
330 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
343 case ATOMICALLY_FRAME:
344 case CATCH_RETRY_FRAME:
345 case CATCH_STM_FRAME:
346 barf("checkClosure: stack frame");
348 case AP: /* we can treat this as being the same as a PAP */
351 StgFunInfoTable *fun_info;
352 StgPAP* pap = (StgPAP *)p;
354 ASSERT(LOOKS_LIKE_CLOSURE_PTR(pap->fun));
355 fun_info = get_fun_itbl(pap->fun);
357 p = (StgClosure *)pap->payload;
358 switch (fun_info->f.fun_type) {
360 checkSmallBitmap( (StgPtr)pap->payload,
361 BITMAP_BITS(fun_info->f.b.bitmap), pap->n_args );
364 checkLargeBitmap( (StgPtr)pap->payload,
365 GET_FUN_LARGE_BITMAP(fun_info),
369 checkLargeBitmap( (StgPtr)pap->payload,
370 BCO_BITMAP(pap->fun),
374 checkSmallBitmap( (StgPtr)pap->payload,
375 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
379 return pap_sizeW(pap);
384 StgAP_STACK *ap = (StgAP_STACK *)p;
385 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
386 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
387 return ap_stack_sizeW(ap);
391 return arr_words_sizeW((StgArrWords *)p);
394 case MUT_ARR_PTRS_FROZEN:
395 case MUT_ARR_PTRS_FROZEN0:
397 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
399 for (i = 0; i < a->ptrs; i++) {
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
402 return mut_arr_ptrs_sizeW(a);
406 checkTSO((StgTSO *)p);
407 return tso_sizeW((StgTSO *)p);
412 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
413 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
414 return sizeofW(StgBlockedFetch); // see size used in evacuate()
418 return sizeofW(StgFetchMe);
422 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
423 return sizeofW(StgFetchMe); // see size used in evacuate()
426 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
427 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
430 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
431 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
432 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
433 checkBQ(((StgRBH *)p)->blocking_queue, p);
434 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
435 return BLACKHOLE_sizeW(); // see size used in evacuate()
436 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
440 case TVAR_WAIT_QUEUE:
442 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
444 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
445 return sizeofW(StgTVarWaitQueue);
450 StgTVar *tv = (StgTVar *)p;
451 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
452 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
453 return sizeofW(StgTVar);
459 StgTRecChunk *tc = (StgTRecChunk *)p;
460 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
461 for (i = 0; i < tc -> next_entry_idx; i ++) {
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
463 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
464 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
466 return sizeofW(StgTRecChunk);
471 StgTRecHeader *trec = (StgTRecHeader *)p;
472 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
474 return sizeofW(StgTRecHeader);
479 barf("checkClosure: found EVACUATED closure %d",
482 barf("checkClosure (closure type %d)", info->type);
488 #define PVM_PE_MASK 0xfffc0000
489 #define MAX_PVM_PES MAX_PES
490 #define MAX_PVM_TIDS MAX_PES
491 #define MAX_SLOTS 100000
494 looks_like_tid(StgInt tid)
496 StgInt hi = (tid & PVM_PE_MASK) >> 18;
497 StgInt lo = (tid & ~PVM_PE_MASK);
498 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
503 looks_like_slot(StgInt slot)
505 /* if tid is known better use looks_like_ga!! */
506 rtsBool ok = slot<MAX_SLOTS;
507 // This refers only to the no. of slots on the current PE
508 // rtsBool ok = slot<=highest_slot();
513 looks_like_ga(globalAddr *ga)
515 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
516 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
517 (ga)->payload.gc.slot<=highest_slot() :
518 (ga)->payload.gc.slot<MAX_SLOTS;
519 rtsBool ok = is_tid && is_slot;
526 /* -----------------------------------------------------------------------------
529 After garbage collection, the live heap is in a state where we can
530 run through and check that all the pointers point to the right
531 place. This function starts at a given position and sanity-checks
532 all the objects in the remainder of the chain.
533 -------------------------------------------------------------------------- */
536 checkHeap(bdescr *bd)
540 for (; bd != NULL; bd = bd->link) {
542 while (p < bd->free) {
543 nat size = checkClosure((StgClosure *)p);
544 /* This is the smallest size of closure that can live in the heap */
545 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
549 while (p < bd->free &&
550 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
557 Check heap between start and end. Used after unpacking graphs.
560 checkHeapChunk(StgPtr start, StgPtr end)
562 extern globalAddr *LAGAlookup(StgClosure *addr);
566 for (p=start; p<end; p+=size) {
567 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
568 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
569 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
570 /* if it's a FM created during unpack and commoned up, it's not global */
571 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
572 size = sizeofW(StgFetchMe);
573 } else if (get_itbl((StgClosure*)p)->type == IND) {
574 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
577 size = checkClosure((StgClosure *)p);
578 /* This is the smallest size of closure that can live in the heap. */
579 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
585 checkHeapChunk(StgPtr start, StgPtr end)
590 for (p=start; p<end; p+=size) {
591 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
592 size = checkClosure((StgClosure *)p);
593 /* This is the smallest size of closure that can live in the heap. */
594 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
600 checkChain(bdescr *bd)
603 checkClosure((StgClosure *)bd->start);
609 checkTSO(StgTSO *tso)
612 StgPtr stack = tso->stack;
613 StgOffset stack_size = tso->stack_size;
614 StgPtr stack_end = stack + stack_size;
616 if (tso->what_next == ThreadRelocated) {
621 if (tso->what_next == ThreadKilled) {
622 /* The garbage collector doesn't bother following any pointers
623 * from dead threads, so don't check sanity here.
628 ASSERT(stack <= sp && sp < stack_end);
631 ASSERT(tso->par.magic==TSO_MAGIC);
633 switch (tso->why_blocked) {
635 checkClosureShallow(tso->block_info.closure);
636 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
637 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
639 case BlockedOnGA_NoSend:
640 checkClosureShallow(tso->block_info.closure);
641 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
643 case BlockedOnBlackHole:
644 checkClosureShallow(tso->block_info.closure);
645 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
646 get_itbl(tso->block_info.closure)->type==RBH);
651 #if defined(mingw32_HOST_OS)
652 case BlockedOnDoProc:
654 /* isOnBQ(blocked_queue) */
656 case BlockedOnException:
657 /* isOnSomeBQ(tso) */
658 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
661 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
664 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
668 Could check other values of why_blocked but I am more
669 lazy than paranoid (bad combination) -- HWL
673 /* if the link field is non-nil it most point to one of these
674 three closure types */
675 ASSERT(tso->link == END_TSO_QUEUE ||
676 get_itbl(tso->link)->type == TSO ||
677 get_itbl(tso->link)->type == BLOCKED_FETCH ||
678 get_itbl(tso->link)->type == CONSTR);
681 checkStackChunk(sp, stack_end);
686 checkTSOsSanity(void) {
690 debugBelch("Checking sanity of all runnable TSOs:");
692 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
693 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
694 debugBelch("TSO %p on PE %d ...", tso, i);
701 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
708 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
712 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
713 ASSERT(run_queue_hds[proc]!=NULL);
714 ASSERT(run_queue_tls[proc]!=NULL);
715 /* if either head or tail is NIL then the other one must be NIL, too */
716 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
717 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
718 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
720 prev=tso, tso=tso->link) {
721 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
722 (prev==END_TSO_QUEUE || prev->link==tso));
726 ASSERT(prev==run_queue_tls[proc]);
730 checkThreadQsSanity (rtsBool check_TSO_too)
734 for (p=0; p<RtsFlags.GranFlags.proc; p++)
735 checkThreadQSanity(p, check_TSO_too);
740 Check that all TSOs have been evacuated.
741 Optionally also check the sanity of the TSOs.
744 checkGlobalTSOList (rtsBool checkTSOs)
746 extern StgTSO *all_threads;
748 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
749 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
750 ASSERT(get_itbl(tso)->type == TSO);
756 /* -----------------------------------------------------------------------------
757 Check mutable list sanity.
758 -------------------------------------------------------------------------- */
761 checkMutableList( bdescr *mut_bd, nat gen )
767 for (bd = mut_bd; bd != NULL; bd = bd->link) {
768 for (q = bd->start; q < bd->free; q++) {
769 p = (StgClosure *)*q;
770 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
776 Check the static objects list.
779 checkStaticObjects ( StgClosure* static_objects )
781 StgClosure *p = static_objects;
784 while (p != END_OF_STATIC_LIST) {
787 switch (info->type) {
790 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
792 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
793 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
794 p = IND_STATIC_LINK((StgClosure *)p);
799 p = THUNK_STATIC_LINK((StgClosure *)p);
803 p = FUN_STATIC_LINK((StgClosure *)p);
807 p = STATIC_LINK(info,(StgClosure *)p);
811 barf("checkStaticObjetcs: strange closure %p (%s)",
818 Check the sanity of a blocking queue starting at bqe with closure being
819 the closure holding the blocking queue.
820 Note that in GUM we can have several different closure types in a
825 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
827 rtsBool end = rtsFalse;
828 StgInfoTable *info = get_itbl(closure);
830 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
833 switch (get_itbl(bqe)->type) {
836 checkClosure((StgClosure *)bqe);
838 end = (bqe==END_BQ_QUEUE);
842 checkClosure((StgClosure *)bqe);
847 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
848 get_itbl(bqe)->type, closure, info_type(closure));
854 checkBQ (StgTSO *bqe, StgClosure *closure)
856 rtsBool end = rtsFalse;
857 StgInfoTable *info = get_itbl(closure);
859 ASSERT(info->type == MVAR);
862 switch (get_itbl(bqe)->type) {
865 checkClosure((StgClosure *)bqe);
867 end = (bqe==END_BQ_QUEUE);
871 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
872 get_itbl(bqe)->type, closure, info_type(closure));
878 checkBQ (StgTSO *bqe, StgClosure *closure)
880 rtsBool end = rtsFalse;
881 StgInfoTable *info = get_itbl(closure);
883 ASSERT(info->type == MVAR);
886 switch (get_itbl(bqe)->type) {
888 checkClosure((StgClosure *)bqe);
890 end = (bqe==END_TSO_QUEUE);
894 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
895 get_itbl(bqe)->type, closure, info->type);
905 This routine checks the sanity of the LAGA and GALA tables. They are
906 implemented as lists through one hash table, LAtoGALAtable, because entries
907 in both tables have the same structure:
908 - the LAGA table maps local addresses to global addresses; it starts
909 with liveIndirections
910 - the GALA table maps global addresses to local addresses; it starts
917 /* hidden in parallel/Global.c; only accessed for testing here */
918 extern GALA *liveIndirections;
919 extern GALA *liveRemoteGAs;
920 extern HashTable *LAtoGALAtable;
923 checkLAGAtable(rtsBool check_closures)
926 nat n=0, m=0; // debugging
928 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
930 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
931 ASSERT(!gala->preferred || gala == gala0);
932 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
933 ASSERT(gala->next!=gala); // detect direct loops
934 if ( check_closures ) {
935 checkClosure((StgClosure *)gala->la);
939 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
941 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
942 ASSERT(!gala->preferred || gala == gala0);
943 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
944 ASSERT(gala->next!=gala); // detect direct loops
946 if ( check_closures ) {
947 checkClosure((StgClosure *)gala->la);