1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.29 2001/07/24 05:04:59 ken Exp $
4 * (c) The GHC Team, 1998-2001
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that everything reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
28 #include "StoragePriv.h" // for END_OF_STATIC_LIST
30 /* -----------------------------------------------------------------------------
31 A valid pointer is either:
33 - a pointer to a static closure, or
34 - a pointer into the heap, and
35 - the block is not free
36 - either: - the object is large, or
37 - it is not after the free pointer in the block
38 - the contents of the pointer is not 0xaaaaaaaa
40 -------------------------------------------------------------------------- */
42 #define LOOKS_LIKE_PTR(r) \
43 ({ bdescr *bd = Bdescr((P_)r); \
44 LOOKS_LIKE_STATIC_CLOSURE(r) || \
47 && ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa) \
51 // NOT always true, but can be useful for spotting bugs: (generally
52 // true after GC, but not for things just allocated using allocate(),
54 // (bd->flags & BF_LARGE || bd->free > (P_)r)
56 /* -----------------------------------------------------------------------------
58 -------------------------------------------------------------------------- */
60 static StgOffset checkStackClosure ( StgClosure* c );
61 static StgOffset checkStackObject ( StgPtr sp );
62 static StgOffset checkSmallBitmap ( StgPtr payload, StgWord bitmap );
63 static StgOffset checkLargeBitmap ( StgPtr payload, StgLargeBitmap* );
64 static void checkClosureShallow ( StgClosure* p );
66 /* -----------------------------------------------------------------------------
68 -------------------------------------------------------------------------- */
71 checkSmallBitmap( StgPtr payload, StgWord bitmap )
76 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
77 if ((bitmap & 1) == 0) {
78 checkClosure((StgClosure *)payload[i]);
85 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap )
91 for (bmp=0; bmp<large_bitmap->size; bmp++) {
92 StgWord bitmap = large_bitmap->bitmap[bmp];
93 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
94 if ((bitmap & 1) == 0) {
95 checkClosure((StgClosure *)payload[i]);
103 checkStackClosure( StgClosure* c )
105 const StgInfoTable* info = get_itbl(c);
107 /* All activation records have 'bitmap' style layout info. */
108 switch (info->type) {
109 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
111 StgRetDyn* r = (StgRetDyn *)c;
112 return sizeofW(StgRetDyn) +
113 checkSmallBitmap(r->payload,r->liveness);
115 case RET_BCO: /* small bitmap (<= 32 entries) */
118 return 1 + checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
121 ASSERT(LOOKS_LIKE_PTR(((StgUpdateFrame*)c)->updatee));
124 /* check that the link field points to another stack frame */
125 ASSERT(get_itbl(((StgFrame*)c)->link)->type == UPDATE_FRAME ||
126 get_itbl(((StgFrame*)c)->link)->type == CATCH_FRAME ||
127 get_itbl(((StgFrame*)c)->link)->type == STOP_FRAME ||
128 get_itbl(((StgFrame*)c)->link)->type == SEQ_FRAME);
136 checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
137 case RET_BIG: /* large bitmap (> 32 entries) */
139 return 1 + checkLargeBitmap((StgPtr)c + 1,info->layout.large_bitmap);
141 case FUN_STATIC: /* probably a slow-entry point return address: */
142 #if 0 && defined(GRAN)
148 /* if none of the above, maybe it's a closure which looks a
149 * little like an infotable
151 checkClosureShallow(*(StgClosure **)c);
153 /* barf("checkStackClosure: weird activation record found on stack (%p).",c); */
158 * check that it looks like a valid closure - without checking its payload
159 * used to avoid recursion between checking PAPs and checking stack
164 checkClosureShallow( StgClosure* p )
167 ASSERT(LOOKS_LIKE_GHC_INFO(GET_INFO(p))
168 || IS_HUGS_CONSTR_INFO(GET_INFO(p)));
170 /* Is it a static closure (i.e. in the data segment)? */
171 if (LOOKS_LIKE_STATIC(p)) {
172 ASSERT(closure_STATIC(p));
174 ASSERT(!closure_STATIC(p));
175 ASSERT(LOOKS_LIKE_PTR(p));
179 // check an individual stack object
181 checkStackObject( StgPtr sp )
183 if (IS_ARG_TAG(*sp)) {
184 // Tagged words might be "stubbed" pointers, so there's no
185 // point checking to see whether they look like pointers or
186 // not (some of them will).
187 return ARG_SIZE(*sp) + 1;
188 } else if (LOOKS_LIKE_GHC_INFO(*(StgPtr *)sp)) {
189 return checkStackClosure((StgClosure *)sp);
190 } else { // must be an untagged closure pointer in the stack
191 checkClosureShallow(*(StgClosure **)sp);
196 // check sections of stack between update frames
198 checkStackChunk( StgPtr sp, StgPtr stack_end )
203 while (p < stack_end) {
204 p += checkStackObject( p );
206 // ASSERT( p == stack_end ); -- HWL
210 checkClosure( StgClosure* p )
212 const StgInfoTable *info;
214 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info));
216 /* Is it a static closure (i.e. in the data segment)? */
217 if (LOOKS_LIKE_STATIC(p)) {
218 ASSERT(closure_STATIC(p));
220 ASSERT(!closure_STATIC(p));
221 ASSERT(LOOKS_LIKE_PTR(p));
225 switch (info->type) {
229 StgMVar *mvar = (StgMVar *)p;
230 ASSERT(LOOKS_LIKE_PTR(mvar->head));
231 ASSERT(LOOKS_LIKE_PTR(mvar->tail));
232 ASSERT(LOOKS_LIKE_PTR(mvar->value));
235 checkBQ((StgBlockingQueueElement *)mvar->head, p);
237 checkBQ(mvar->head, p);
240 return sizeofW(StgMVar);
251 for (i = 0; i < info->layout.payload.ptrs; i++) {
252 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
254 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
258 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
259 /* fall through to basic ptr check */
274 case IND_OLDGEN_PERM:
277 case SE_CAF_BLACKHOLE:
287 case CONSTR_CHARLIKE:
289 case CONSTR_NOCAF_STATIC:
294 for (i = 0; i < info->layout.payload.ptrs; i++) {
295 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
297 return sizeW_fromITBL(info);
300 case IND_STATIC: /* (1, 0) closure */
301 ASSERT(LOOKS_LIKE_PTR(((StgIndStatic*)p)->indirectee));
302 return sizeW_fromITBL(info);
305 /* deal with these specially - the info table isn't
306 * representative of the actual layout.
308 { StgWeak *w = (StgWeak *)p;
309 ASSERT(LOOKS_LIKE_PTR(w->key));
310 ASSERT(LOOKS_LIKE_PTR(w->value));
311 ASSERT(LOOKS_LIKE_PTR(w->finalizer));
313 ASSERT(LOOKS_LIKE_PTR(w->link));
315 return sizeW_fromITBL(info);
319 ASSERT(LOOKS_LIKE_PTR(((StgSelector *)p)->selectee));
320 return sizeofW(StgHeader) + MIN_UPD_SIZE;
324 /* we don't expect to see any of these after GC
325 * but they might appear during execution
328 StgInd *ind = (StgInd *)p;
329 ASSERT(LOOKS_LIKE_PTR(ind->indirectee));
330 q = (P_)p + sizeofW(StgInd);
331 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
345 barf("checkClosure: stack frame");
347 case AP_UPD: /* we can treat this as being the same as a PAP */
350 StgPAP *pap = (StgPAP *)p;
351 ASSERT(LOOKS_LIKE_PTR(pap->fun));
352 checkStackChunk((StgPtr)pap->payload,
353 (StgPtr)pap->payload + pap->n_args
355 return pap_sizeW(pap);
359 return arr_words_sizeW((StgArrWords *)p);
362 case MUT_ARR_PTRS_FROZEN:
364 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
366 for (i = 0; i < a->ptrs; i++) {
367 ASSERT(LOOKS_LIKE_PTR(a->payload[i]));
369 return mut_arr_ptrs_sizeW(a);
373 checkTSO((StgTSO *)p);
374 return tso_sizeW((StgTSO *)p);
379 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
380 ASSERT(LOOKS_LIKE_PTR((((StgBlockedFetch *)p)->node)));
381 return sizeofW(StgBlockedFetch); // see size used in evacuate()
385 return sizeofW(StgFetchMe);
389 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
390 return sizeofW(StgFetchMe); // see size used in evacuate()
393 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
394 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
397 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
398 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
399 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
400 checkBQ(((StgRBH *)p)->blocking_queue, p);
401 ASSERT(LOOKS_LIKE_GHC_INFO(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
402 return BLACKHOLE_sizeW(); // see size used in evacuate()
403 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
408 barf("checkClosure: found EVACUATED closure %d",
411 barf("checkClosure (closure type %d)", info->type);
417 #define PVM_PE_MASK 0xfffc0000
418 #define MAX_PVM_PES MAX_PES
419 #define MAX_PVM_TIDS MAX_PES
420 #define MAX_SLOTS 100000
423 looks_like_tid(StgInt tid)
425 StgInt hi = (tid & PVM_PE_MASK) >> 18;
426 StgInt lo = (tid & ~PVM_PE_MASK);
427 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
432 looks_like_slot(StgInt slot)
434 /* if tid is known better use looks_like_ga!! */
435 rtsBool ok = slot<MAX_SLOTS;
436 // This refers only to the no. of slots on the current PE
437 // rtsBool ok = slot<=highest_slot();
442 looks_like_ga(globalAddr *ga)
444 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
445 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
446 (ga)->payload.gc.slot<=highest_slot() :
447 (ga)->payload.gc.slot<MAX_SLOTS;
448 rtsBool ok = is_tid && is_slot;
455 /* -----------------------------------------------------------------------------
458 After garbage collection, the live heap is in a state where we can
459 run through and check that all the pointers point to the right
460 place. This function starts at a given position and sanity-checks
461 all the objects in the remainder of the chain.
462 -------------------------------------------------------------------------- */
465 checkHeap(bdescr *bd)
469 for (; bd != NULL; bd = bd->link) {
471 while (p < bd->free) {
472 nat size = checkClosure((StgClosure *)p);
473 /* This is the smallest size of closure that can live in the heap */
474 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
478 while (p < bd->free &&
479 (*p < 0x1000 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; }
486 Check heap between start and end. Used after unpacking graphs.
489 checkHeapChunk(StgPtr start, StgPtr end)
491 extern globalAddr *LAGAlookup(StgClosure *addr);
495 for (p=start; p<end; p+=size) {
496 ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
497 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
498 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
499 /* if it's a FM created during unpack and commoned up, it's not global */
500 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
501 size = sizeofW(StgFetchMe);
502 } else if (get_itbl((StgClosure*)p)->type == IND) {
503 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
506 size = checkClosure((StgClosure *)p);
507 /* This is the smallest size of closure that can live in the heap. */
508 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
514 checkHeapChunk(StgPtr start, StgPtr end)
519 for (p=start; p<end; p+=size) {
520 ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
521 size = checkClosure((StgClosure *)p);
522 /* This is the smallest size of closure that can live in the heap. */
523 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
529 checkChain(bdescr *bd)
532 checkClosure((StgClosure *)bd->start);
537 /* check stack - making sure that update frames are linked correctly */
539 checkStack(StgPtr sp, StgPtr stack_end, StgUpdateFrame* su )
541 /* check everything down to the first update frame */
542 checkStackChunk( sp, (StgPtr)su );
543 while ( (StgPtr)su < stack_end) {
545 switch (get_itbl(su)->type) {
550 su = ((StgSeqFrame *)su)->link;
553 su = ((StgCatchFrame *)su)->link;
556 /* not quite: ASSERT((StgPtr)su == stack_end); */
559 barf("checkStack: weird record found on update frame list.");
561 checkStackChunk( sp, (StgPtr)su );
563 ASSERT((StgPtr)su == stack_end);
568 checkTSO(StgTSO *tso)
571 StgPtr stack = tso->stack;
572 StgUpdateFrame* su = tso->su;
573 StgOffset stack_size = tso->stack_size;
574 StgPtr stack_end = stack + stack_size;
576 if (tso->what_next == ThreadRelocated) {
581 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
582 /* The garbage collector doesn't bother following any pointers
583 * from dead threads, so don't check sanity here.
588 ASSERT(stack <= sp && sp < stack_end);
589 ASSERT(sp <= (StgPtr)su);
592 ASSERT(tso->par.magic==TSO_MAGIC);
594 switch (tso->why_blocked) {
596 checkClosureShallow(tso->block_info.closure);
597 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
598 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
600 case BlockedOnGA_NoSend:
601 checkClosureShallow(tso->block_info.closure);
602 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
604 case BlockedOnBlackHole:
605 checkClosureShallow(tso->block_info.closure);
606 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
607 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
608 get_itbl(tso->block_info.closure)->type==RBH);
613 /* isOnBQ(blocked_queue) */
615 case BlockedOnException:
616 /* isOnSomeBQ(tso) */
617 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
620 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
624 Could check other values of why_blocked but I am more
625 lazy than paranoid (bad combination) -- HWL
629 /* if the link field is non-nil it most point to one of these
630 three closure types */
631 ASSERT(tso->link == END_TSO_QUEUE ||
632 get_itbl(tso->link)->type == TSO ||
633 get_itbl(tso->link)->type == BLOCKED_FETCH ||
634 get_itbl(tso->link)->type == CONSTR);
637 checkStack(sp, stack_end, su);
642 checkTSOsSanity(void) {
646 belch("Checking sanity of all runnable TSOs:");
648 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
649 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
650 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
652 fprintf(stderr, "OK, ");
657 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
664 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
668 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
669 ASSERT(run_queue_hds[proc]!=NULL);
670 ASSERT(run_queue_tls[proc]!=NULL);
671 /* if either head or tail is NIL then the other one must be NIL, too */
672 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
673 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
674 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
676 prev=tso, tso=tso->link) {
677 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
678 (prev==END_TSO_QUEUE || prev->link==tso));
682 ASSERT(prev==run_queue_tls[proc]);
686 checkThreadQsSanity (rtsBool check_TSO_too)
690 for (p=0; p<RtsFlags.GranFlags.proc; p++)
691 checkThreadQSanity(p, check_TSO_too);
696 Check that all TSOs have been evacuated.
697 Optionally also check the sanity of the TSOs.
700 checkGlobalTSOList (rtsBool checkTSOs)
702 extern StgTSO *all_threads;
704 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
705 ASSERT(LOOKS_LIKE_PTR(tso));
706 ASSERT(get_itbl(tso)->type == TSO);
712 /* -----------------------------------------------------------------------------
713 Check mutable list sanity.
714 -------------------------------------------------------------------------- */
717 checkMutableList( StgMutClosure *p, nat gen )
721 for (; p != END_MUT_LIST; p = p->mut_link) {
723 ASSERT(closure_MUTABLE(p));
724 ASSERT(bd->gen_no == gen);
725 ASSERT(LOOKS_LIKE_PTR(p->mut_link));
730 checkMutOnceList( StgMutClosure *p, nat gen )
735 for (; p != END_MUT_LIST; p = p->mut_link) {
739 ASSERT(!closure_MUTABLE(p));
740 ASSERT(ip_STATIC(info) || bd->gen_no == gen);
741 ASSERT(LOOKS_LIKE_PTR(p->mut_link));
743 switch (info->type) {
746 case IND_OLDGEN_PERM:
750 barf("checkMutOnceList: strange closure %p (%s)",
751 p, info_type((StgClosure *)p));
756 /* -----------------------------------------------------------------------------
757 Check Blackhole Sanity
759 Test whether an object is already on the update list.
760 It isn't necessarily an rts error if it is - it might be a programming
763 Future versions might be able to test for a blackhole without traversing
764 the update frame list.
766 -------------------------------------------------------------------------- */
768 isBlackhole( StgTSO* tso, StgClosure* p )
770 StgUpdateFrame* su = tso->su;
772 switch (get_itbl(su)->type) {
774 if (su->updatee == p) {
781 su = ((StgSeqFrame *)su)->link;
784 su = ((StgCatchFrame *)su)->link;
789 barf("isBlackhole: weird record found on update frame list.");
795 Check the static objects list.
798 checkStaticObjects ( StgClosure* static_objects )
800 StgClosure *p = static_objects;
803 while (p != END_OF_STATIC_LIST) {
806 switch (info->type) {
809 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
811 ASSERT(LOOKS_LIKE_PTR(indirectee));
812 ASSERT(LOOKS_LIKE_GHC_INFO(indirectee->header.info));
813 p = IND_STATIC_LINK((StgClosure *)p);
818 p = THUNK_STATIC_LINK((StgClosure *)p);
822 p = FUN_STATIC_LINK((StgClosure *)p);
826 p = STATIC_LINK(info,(StgClosure *)p);
830 barf("checkStaticObjetcs: strange closure %p (%s)",
837 Check the sanity of a blocking queue starting at bqe with closure being
838 the closure holding the blocking queue.
839 Note that in GUM we can have several different closure types in a
844 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
846 rtsBool end = rtsFalse;
847 StgInfoTable *info = get_itbl(closure);
849 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
850 || info->type == FETCH_ME_BQ || info->type == RBH);
853 switch (get_itbl(bqe)->type) {
856 checkClosure((StgClosure *)bqe);
858 end = (bqe==END_BQ_QUEUE);
862 checkClosure((StgClosure *)bqe);
867 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
868 get_itbl(bqe)->type, closure, info_type(closure));
874 checkBQ (StgTSO *bqe, StgClosure *closure)
876 rtsBool end = rtsFalse;
877 StgInfoTable *info = get_itbl(closure);
879 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
882 switch (get_itbl(bqe)->type) {
885 checkClosure((StgClosure *)bqe);
887 end = (bqe==END_BQ_QUEUE);
891 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
892 get_itbl(bqe)->type, closure, info_type(closure));
898 checkBQ (StgTSO *bqe, StgClosure *closure)
900 rtsBool end = rtsFalse;
901 StgInfoTable *info = get_itbl(closure);
903 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
906 switch (get_itbl(bqe)->type) {
908 checkClosure((StgClosure *)bqe);
910 end = (bqe==END_TSO_QUEUE);
914 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
915 get_itbl(bqe)->type, closure, info->type);
925 This routine checks the sanity of the LAGA and GALA tables. They are
926 implemented as lists through one hash table, LAtoGALAtable, because entries
927 in both tables have the same structure:
928 - the LAGA table maps local addresses to global addresses; it starts
929 with liveIndirections
930 - the GALA table maps global addresses to local addresses; it starts
937 /* hidden in parallel/Global.c; only accessed for testing here */
938 extern GALA *liveIndirections;
939 extern GALA *liveRemoteGAs;
940 extern HashTable *LAtoGALAtable;
943 checkLAGAtable(rtsBool check_closures)
946 nat n=0, m=0; // debugging
948 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
950 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
951 ASSERT(!gala->preferred || gala == gala0);
952 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
953 ASSERT(gala->next!=gala); // detect direct loops
954 if ( check_closures ) {
955 checkClosure((StgClosure *)gala->la);
959 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
961 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
962 ASSERT(!gala->preferred || gala == gala0);
963 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
964 ASSERT(gala->next!=gala); // detect direct loops
966 if ( check_closures ) {
967 checkClosure((StgClosure *)gala->la);