1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.b.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
209 StgFunInfoTable *fun_info;
211 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
212 fun_info = get_fun_itbl(fun);
214 p = (StgClosure *)payload;
215 switch (fun_info->f.fun_type) {
217 checkSmallBitmap( (StgPtr)payload,
218 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
221 checkLargeBitmap( (StgPtr)payload,
222 GET_FUN_LARGE_BITMAP(fun_info),
226 checkLargeBitmap( (StgPtr)payload,
231 checkSmallBitmap( (StgPtr)payload,
232 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 checkClosure( StgClosure* p )
242 const StgInfoTable *info;
244 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
246 /* Is it a static closure (i.e. in the data segment)? */
247 if (!HEAP_ALLOCED(p)) {
248 ASSERT(closure_STATIC(p));
250 ASSERT(!closure_STATIC(p));
254 switch (info->type) {
258 StgMVar *mvar = (StgMVar *)p;
259 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
260 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
261 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
264 checkBQ((StgBlockingQueueElement *)mvar->head, p);
266 checkBQ(mvar->head, p);
269 return sizeofW(StgMVar);
280 for (i = 0; i < info->layout.payload.ptrs; i++) {
281 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
283 return stg_max(thunk_sizeW_fromITBL(info), sizeofW(StgHeader)+MIN_UPD_SIZE);
300 case IND_OLDGEN_PERM:
303 case SE_CAF_BLACKHOLE:
310 case CONSTR_CHARLIKE:
312 case CONSTR_NOCAF_STATIC:
317 for (i = 0; i < info->layout.payload.ptrs; i++) {
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
320 return sizeW_fromITBL(info);
324 StgBCO *bco = (StgBCO *)p;
325 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
326 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
329 return bco_sizeW(bco);
332 case IND_STATIC: /* (1, 0) closure */
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
334 return sizeW_fromITBL(info);
337 /* deal with these specially - the info table isn't
338 * representative of the actual layout.
340 { StgWeak *w = (StgWeak *)p;
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
347 return sizeW_fromITBL(info);
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
352 return THUNK_SELECTOR_sizeW();
356 /* we don't expect to see any of these after GC
357 * but they might appear during execution
359 StgInd *ind = (StgInd *)p;
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
361 return sizeofW(StgHeader) + MIN_UPD_SIZE;
373 case ATOMICALLY_FRAME:
374 case CATCH_RETRY_FRAME:
375 case CATCH_STM_FRAME:
376 barf("checkClosure: stack frame");
380 StgAP* ap = (StgAP *)p;
381 checkPAP (ap->fun, ap->payload, ap->n_args);
387 StgPAP* pap = (StgPAP *)p;
388 checkPAP (pap->fun, pap->payload, pap->n_args);
389 return pap_sizeW(pap);
394 StgAP_STACK *ap = (StgAP_STACK *)p;
395 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
396 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
397 return ap_stack_sizeW(ap);
401 return arr_words_sizeW((StgArrWords *)p);
404 case MUT_ARR_PTRS_FROZEN:
405 case MUT_ARR_PTRS_FROZEN0:
407 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
409 for (i = 0; i < a->ptrs; i++) {
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
412 return mut_arr_ptrs_sizeW(a);
416 checkTSO((StgTSO *)p);
417 return tso_sizeW((StgTSO *)p);
422 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
423 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
424 return sizeofW(StgBlockedFetch); // see size used in evacuate()
428 return sizeofW(StgFetchMe);
432 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
433 return sizeofW(StgFetchMe); // see size used in evacuate()
436 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
437 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
440 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
441 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
442 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
443 checkBQ(((StgRBH *)p)->blocking_queue, p);
444 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
445 return BLACKHOLE_sizeW(); // see size used in evacuate()
446 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
450 case TVAR_WAIT_QUEUE:
452 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
453 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
455 return sizeofW(StgTVarWaitQueue);
460 StgTVar *tv = (StgTVar *)p;
461 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
463 return sizeofW(StgTVar);
469 StgTRecChunk *tc = (StgTRecChunk *)p;
470 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
471 for (i = 0; i < tc -> next_entry_idx; i ++) {
472 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
473 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
476 return sizeofW(StgTRecChunk);
481 StgTRecHeader *trec = (StgTRecHeader *)p;
482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
483 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
484 return sizeofW(StgTRecHeader);
489 barf("checkClosure: found EVACUATED closure %d",
492 barf("checkClosure (closure type %d)", info->type);
498 #define PVM_PE_MASK 0xfffc0000
499 #define MAX_PVM_PES MAX_PES
500 #define MAX_PVM_TIDS MAX_PES
501 #define MAX_SLOTS 100000
504 looks_like_tid(StgInt tid)
506 StgInt hi = (tid & PVM_PE_MASK) >> 18;
507 StgInt lo = (tid & ~PVM_PE_MASK);
508 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
513 looks_like_slot(StgInt slot)
515 /* if tid is known better use looks_like_ga!! */
516 rtsBool ok = slot<MAX_SLOTS;
517 // This refers only to the no. of slots on the current PE
518 // rtsBool ok = slot<=highest_slot();
523 looks_like_ga(globalAddr *ga)
525 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
526 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
527 (ga)->payload.gc.slot<=highest_slot() :
528 (ga)->payload.gc.slot<MAX_SLOTS;
529 rtsBool ok = is_tid && is_slot;
536 /* -----------------------------------------------------------------------------
539 After garbage collection, the live heap is in a state where we can
540 run through and check that all the pointers point to the right
541 place. This function starts at a given position and sanity-checks
542 all the objects in the remainder of the chain.
543 -------------------------------------------------------------------------- */
546 checkHeap(bdescr *bd)
550 for (; bd != NULL; bd = bd->link) {
552 while (p < bd->free) {
553 nat size = checkClosure((StgClosure *)p);
554 /* This is the smallest size of closure that can live in the heap */
555 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
559 while (p < bd->free &&
560 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
567 Check heap between start and end. Used after unpacking graphs.
570 checkHeapChunk(StgPtr start, StgPtr end)
572 extern globalAddr *LAGAlookup(StgClosure *addr);
576 for (p=start; p<end; p+=size) {
577 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
578 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
579 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
580 /* if it's a FM created during unpack and commoned up, it's not global */
581 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
582 size = sizeofW(StgFetchMe);
583 } else if (get_itbl((StgClosure*)p)->type == IND) {
584 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
587 size = checkClosure((StgClosure *)p);
588 /* This is the smallest size of closure that can live in the heap. */
589 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
595 checkHeapChunk(StgPtr start, StgPtr end)
600 for (p=start; p<end; p+=size) {
601 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
602 size = checkClosure((StgClosure *)p);
603 /* This is the smallest size of closure that can live in the heap. */
604 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
610 checkChain(bdescr *bd)
613 checkClosure((StgClosure *)bd->start);
619 checkTSO(StgTSO *tso)
622 StgPtr stack = tso->stack;
623 StgOffset stack_size = tso->stack_size;
624 StgPtr stack_end = stack + stack_size;
626 if (tso->what_next == ThreadRelocated) {
631 if (tso->what_next == ThreadKilled) {
632 /* The garbage collector doesn't bother following any pointers
633 * from dead threads, so don't check sanity here.
638 ASSERT(stack <= sp && sp < stack_end);
641 ASSERT(tso->par.magic==TSO_MAGIC);
643 switch (tso->why_blocked) {
645 checkClosureShallow(tso->block_info.closure);
646 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
647 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
649 case BlockedOnGA_NoSend:
650 checkClosureShallow(tso->block_info.closure);
651 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
653 case BlockedOnBlackHole:
654 checkClosureShallow(tso->block_info.closure);
655 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
656 get_itbl(tso->block_info.closure)->type==RBH);
661 #if defined(mingw32_HOST_OS)
662 case BlockedOnDoProc:
664 /* isOnBQ(blocked_queue) */
666 case BlockedOnException:
667 /* isOnSomeBQ(tso) */
668 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
671 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
674 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
678 Could check other values of why_blocked but I am more
679 lazy than paranoid (bad combination) -- HWL
683 /* if the link field is non-nil it most point to one of these
684 three closure types */
685 ASSERT(tso->link == END_TSO_QUEUE ||
686 get_itbl(tso->link)->type == TSO ||
687 get_itbl(tso->link)->type == BLOCKED_FETCH ||
688 get_itbl(tso->link)->type == CONSTR);
691 checkStackChunk(sp, stack_end);
696 checkTSOsSanity(void) {
700 debugBelch("Checking sanity of all runnable TSOs:");
702 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
703 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
704 debugBelch("TSO %p on PE %d ...", tso, i);
711 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
718 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
722 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
723 ASSERT(run_queue_hds[proc]!=NULL);
724 ASSERT(run_queue_tls[proc]!=NULL);
725 /* if either head or tail is NIL then the other one must be NIL, too */
726 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
727 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
728 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
730 prev=tso, tso=tso->link) {
731 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
732 (prev==END_TSO_QUEUE || prev->link==tso));
736 ASSERT(prev==run_queue_tls[proc]);
740 checkThreadQsSanity (rtsBool check_TSO_too)
744 for (p=0; p<RtsFlags.GranFlags.proc; p++)
745 checkThreadQSanity(p, check_TSO_too);
750 Check that all TSOs have been evacuated.
751 Optionally also check the sanity of the TSOs.
754 checkGlobalTSOList (rtsBool checkTSOs)
756 extern StgTSO *all_threads;
758 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
759 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
760 ASSERT(get_itbl(tso)->type == TSO);
766 /* -----------------------------------------------------------------------------
767 Check mutable list sanity.
768 -------------------------------------------------------------------------- */
771 checkMutableList( bdescr *mut_bd, nat gen )
777 for (bd = mut_bd; bd != NULL; bd = bd->link) {
778 for (q = bd->start; q < bd->free; q++) {
779 p = (StgClosure *)*q;
780 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
786 Check the static objects list.
789 checkStaticObjects ( StgClosure* static_objects )
791 StgClosure *p = static_objects;
794 while (p != END_OF_STATIC_LIST) {
797 switch (info->type) {
800 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
802 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
803 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
804 p = *IND_STATIC_LINK((StgClosure *)p);
809 p = *THUNK_STATIC_LINK((StgClosure *)p);
813 p = *FUN_STATIC_LINK((StgClosure *)p);
817 p = *STATIC_LINK(info,(StgClosure *)p);
821 barf("checkStaticObjetcs: strange closure %p (%s)",
828 Check the sanity of a blocking queue starting at bqe with closure being
829 the closure holding the blocking queue.
830 Note that in GUM we can have several different closure types in a
835 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
837 rtsBool end = rtsFalse;
838 StgInfoTable *info = get_itbl(closure);
840 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
843 switch (get_itbl(bqe)->type) {
846 checkClosure((StgClosure *)bqe);
848 end = (bqe==END_BQ_QUEUE);
852 checkClosure((StgClosure *)bqe);
857 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
858 get_itbl(bqe)->type, closure, info_type(closure));
864 checkBQ (StgTSO *bqe, StgClosure *closure)
866 rtsBool end = rtsFalse;
867 StgInfoTable *info = get_itbl(closure);
869 ASSERT(info->type == MVAR);
872 switch (get_itbl(bqe)->type) {
875 checkClosure((StgClosure *)bqe);
877 end = (bqe==END_BQ_QUEUE);
881 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
882 get_itbl(bqe)->type, closure, info_type(closure));
891 This routine checks the sanity of the LAGA and GALA tables. They are
892 implemented as lists through one hash table, LAtoGALAtable, because entries
893 in both tables have the same structure:
894 - the LAGA table maps local addresses to global addresses; it starts
895 with liveIndirections
896 - the GALA table maps global addresses to local addresses; it starts
903 /* hidden in parallel/Global.c; only accessed for testing here */
904 extern GALA *liveIndirections;
905 extern GALA *liveRemoteGAs;
906 extern HashTable *LAtoGALAtable;
909 checkLAGAtable(rtsBool check_closures)
912 nat n=0, m=0; // debugging
914 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
916 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
917 ASSERT(!gala->preferred || gala == gala0);
918 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
919 ASSERT(gala->next!=gala); // detect direct loops
920 if ( check_closures ) {
921 checkClosure((StgClosure *)gala->la);
925 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
927 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
928 ASSERT(!gala->preferred || gala == gala0);
929 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
930 ASSERT(gala->next!=gala); // detect direct loops
932 if ( check_closures ) {
933 checkClosure((StgClosure *)gala->la);