1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.30 2001/08/14 13:40:09 sewardj Exp $
4 * (c) The GHC Team, 1998-2001
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that everything reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
17 #include "PosixSource.h"
20 #ifdef DEBUG /* whole file */
24 #include "BlockAlloc.h"
29 #include "StoragePriv.h" // for END_OF_STATIC_LIST
31 /* -----------------------------------------------------------------------------
32 A valid pointer is either:
34 - a pointer to a static closure, or
35 - a pointer into the heap, and
36 - the block is not free
37 - either: - the object is large, or
38 - it is not after the free pointer in the block
39 - the contents of the pointer is not 0xaaaaaaaa
41 -------------------------------------------------------------------------- */
43 #define LOOKS_LIKE_PTR(r) \
44 ({ bdescr *bd = Bdescr((P_)r); \
45 LOOKS_LIKE_STATIC_CLOSURE(r) || \
48 && ((StgWord)(*(StgPtr)r)!=0xaaaaaaaa) \
52 // NOT always true, but can be useful for spotting bugs: (generally
53 // true after GC, but not for things just allocated using allocate(),
55 // (bd->flags & BF_LARGE || bd->free > (P_)r)
57 /* -----------------------------------------------------------------------------
59 -------------------------------------------------------------------------- */
61 static StgOffset checkStackClosure ( StgClosure* c );
62 static StgOffset checkStackObject ( StgPtr sp );
63 static StgOffset checkSmallBitmap ( StgPtr payload, StgWord bitmap );
64 static StgOffset checkLargeBitmap ( StgPtr payload, StgLargeBitmap* );
65 static void checkClosureShallow ( StgClosure* p );
67 /* -----------------------------------------------------------------------------
69 -------------------------------------------------------------------------- */
72 checkSmallBitmap( StgPtr payload, StgWord bitmap )
77 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
78 if ((bitmap & 1) == 0) {
79 checkClosure((StgClosure *)payload[i]);
86 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap )
92 for (bmp=0; bmp<large_bitmap->size; bmp++) {
93 StgWord bitmap = large_bitmap->bitmap[bmp];
94 for(; bitmap != 0; ++i, bitmap >>= 1 ) {
95 if ((bitmap & 1) == 0) {
96 checkClosure((StgClosure *)payload[i]);
104 checkStackClosure( StgClosure* c )
106 const StgInfoTable* info = get_itbl(c);
108 /* All activation records have 'bitmap' style layout info. */
109 switch (info->type) {
110 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
112 StgRetDyn* r = (StgRetDyn *)c;
113 return sizeofW(StgRetDyn) +
114 checkSmallBitmap(r->payload,r->liveness);
116 case RET_BCO: /* small bitmap (<= 32 entries) */
119 return 1 + checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
122 ASSERT(LOOKS_LIKE_PTR(((StgUpdateFrame*)c)->updatee));
125 /* check that the link field points to another stack frame */
126 ASSERT(get_itbl(((StgFrame*)c)->link)->type == UPDATE_FRAME ||
127 get_itbl(((StgFrame*)c)->link)->type == CATCH_FRAME ||
128 get_itbl(((StgFrame*)c)->link)->type == STOP_FRAME ||
129 get_itbl(((StgFrame*)c)->link)->type == SEQ_FRAME);
137 checkSmallBitmap((StgPtr)c + 1,info->layout.bitmap);
138 case RET_BIG: /* large bitmap (> 32 entries) */
140 return 1 + checkLargeBitmap((StgPtr)c + 1,info->layout.large_bitmap);
142 case FUN_STATIC: /* probably a slow-entry point return address: */
143 #if 0 && defined(GRAN)
149 /* if none of the above, maybe it's a closure which looks a
150 * little like an infotable
152 checkClosureShallow(*(StgClosure **)c);
154 /* barf("checkStackClosure: weird activation record found on stack (%p).",c); */
159 * check that it looks like a valid closure - without checking its payload
160 * used to avoid recursion between checking PAPs and checking stack
165 checkClosureShallow( StgClosure* p )
168 ASSERT(LOOKS_LIKE_GHC_INFO(GET_INFO(p))
169 || IS_HUGS_CONSTR_INFO(GET_INFO(p)));
171 /* Is it a static closure (i.e. in the data segment)? */
172 if (LOOKS_LIKE_STATIC(p)) {
173 ASSERT(closure_STATIC(p));
175 ASSERT(!closure_STATIC(p));
176 ASSERT(LOOKS_LIKE_PTR(p));
180 // check an individual stack object
182 checkStackObject( StgPtr sp )
184 if (IS_ARG_TAG(*sp)) {
185 // Tagged words might be "stubbed" pointers, so there's no
186 // point checking to see whether they look like pointers or
187 // not (some of them will).
188 return ARG_SIZE(*sp) + 1;
189 } else if (LOOKS_LIKE_GHC_INFO(*(StgPtr *)sp)) {
190 return checkStackClosure((StgClosure *)sp);
191 } else { // must be an untagged closure pointer in the stack
192 checkClosureShallow(*(StgClosure **)sp);
197 // check sections of stack between update frames
199 checkStackChunk( StgPtr sp, StgPtr stack_end )
204 while (p < stack_end) {
205 p += checkStackObject( p );
207 // ASSERT( p == stack_end ); -- HWL
211 checkClosure( StgClosure* p )
213 const StgInfoTable *info;
215 ASSERT(LOOKS_LIKE_GHC_INFO(p->header.info));
217 /* Is it a static closure (i.e. in the data segment)? */
218 if (LOOKS_LIKE_STATIC(p)) {
219 ASSERT(closure_STATIC(p));
221 ASSERT(!closure_STATIC(p));
222 ASSERT(LOOKS_LIKE_PTR(p));
226 switch (info->type) {
230 StgMVar *mvar = (StgMVar *)p;
231 ASSERT(LOOKS_LIKE_PTR(mvar->head));
232 ASSERT(LOOKS_LIKE_PTR(mvar->tail));
233 ASSERT(LOOKS_LIKE_PTR(mvar->value));
236 checkBQ((StgBlockingQueueElement *)mvar->head, p);
238 checkBQ(mvar->head, p);
241 return sizeofW(StgMVar);
252 for (i = 0; i < info->layout.payload.ptrs; i++) {
253 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
255 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
259 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
260 /* fall through to basic ptr check */
275 case IND_OLDGEN_PERM:
278 case SE_CAF_BLACKHOLE:
288 case CONSTR_CHARLIKE:
290 case CONSTR_NOCAF_STATIC:
295 for (i = 0; i < info->layout.payload.ptrs; i++) {
296 ASSERT(LOOKS_LIKE_PTR(p->payload[i]));
298 return sizeW_fromITBL(info);
301 case IND_STATIC: /* (1, 0) closure */
302 ASSERT(LOOKS_LIKE_PTR(((StgIndStatic*)p)->indirectee));
303 return sizeW_fromITBL(info);
306 /* deal with these specially - the info table isn't
307 * representative of the actual layout.
309 { StgWeak *w = (StgWeak *)p;
310 ASSERT(LOOKS_LIKE_PTR(w->key));
311 ASSERT(LOOKS_LIKE_PTR(w->value));
312 ASSERT(LOOKS_LIKE_PTR(w->finalizer));
314 ASSERT(LOOKS_LIKE_PTR(w->link));
316 return sizeW_fromITBL(info);
320 ASSERT(LOOKS_LIKE_PTR(((StgSelector *)p)->selectee));
321 return sizeofW(StgHeader) + MIN_UPD_SIZE;
325 /* we don't expect to see any of these after GC
326 * but they might appear during execution
329 StgInd *ind = (StgInd *)p;
330 ASSERT(LOOKS_LIKE_PTR(ind->indirectee));
331 q = (P_)p + sizeofW(StgInd);
332 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
346 barf("checkClosure: stack frame");
348 case AP_UPD: /* we can treat this as being the same as a PAP */
351 StgPAP *pap = (StgPAP *)p;
352 ASSERT(LOOKS_LIKE_PTR(pap->fun));
353 checkStackChunk((StgPtr)pap->payload,
354 (StgPtr)pap->payload + pap->n_args
356 return pap_sizeW(pap);
360 return arr_words_sizeW((StgArrWords *)p);
363 case MUT_ARR_PTRS_FROZEN:
365 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
367 for (i = 0; i < a->ptrs; i++) {
368 ASSERT(LOOKS_LIKE_PTR(a->payload[i]));
370 return mut_arr_ptrs_sizeW(a);
374 checkTSO((StgTSO *)p);
375 return tso_sizeW((StgTSO *)p);
380 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
381 ASSERT(LOOKS_LIKE_PTR((((StgBlockedFetch *)p)->node)));
382 return sizeofW(StgBlockedFetch); // see size used in evacuate()
386 return sizeofW(StgFetchMe);
390 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
391 return sizeofW(StgFetchMe); // see size used in evacuate()
394 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
395 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
398 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
399 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
400 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
401 checkBQ(((StgRBH *)p)->blocking_queue, p);
402 ASSERT(LOOKS_LIKE_GHC_INFO(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
403 return BLACKHOLE_sizeW(); // see size used in evacuate()
404 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
409 barf("checkClosure: found EVACUATED closure %d",
412 barf("checkClosure (closure type %d)", info->type);
418 #define PVM_PE_MASK 0xfffc0000
419 #define MAX_PVM_PES MAX_PES
420 #define MAX_PVM_TIDS MAX_PES
421 #define MAX_SLOTS 100000
424 looks_like_tid(StgInt tid)
426 StgInt hi = (tid & PVM_PE_MASK) >> 18;
427 StgInt lo = (tid & ~PVM_PE_MASK);
428 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
433 looks_like_slot(StgInt slot)
435 /* if tid is known better use looks_like_ga!! */
436 rtsBool ok = slot<MAX_SLOTS;
437 // This refers only to the no. of slots on the current PE
438 // rtsBool ok = slot<=highest_slot();
443 looks_like_ga(globalAddr *ga)
445 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
446 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
447 (ga)->payload.gc.slot<=highest_slot() :
448 (ga)->payload.gc.slot<MAX_SLOTS;
449 rtsBool ok = is_tid && is_slot;
456 /* -----------------------------------------------------------------------------
459 After garbage collection, the live heap is in a state where we can
460 run through and check that all the pointers point to the right
461 place. This function starts at a given position and sanity-checks
462 all the objects in the remainder of the chain.
463 -------------------------------------------------------------------------- */
466 checkHeap(bdescr *bd)
470 for (; bd != NULL; bd = bd->link) {
472 while (p < bd->free) {
473 nat size = checkClosure((StgClosure *)p);
474 /* This is the smallest size of closure that can live in the heap */
475 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
479 while (p < bd->free &&
480 (*p < 0x1000 || !LOOKS_LIKE_GHC_INFO((void*)*p))) { p++; }
487 Check heap between start and end. Used after unpacking graphs.
490 checkHeapChunk(StgPtr start, StgPtr end)
492 extern globalAddr *LAGAlookup(StgClosure *addr);
496 for (p=start; p<end; p+=size) {
497 ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
498 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
499 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
500 /* if it's a FM created during unpack and commoned up, it's not global */
501 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
502 size = sizeofW(StgFetchMe);
503 } else if (get_itbl((StgClosure*)p)->type == IND) {
504 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
507 size = checkClosure((StgClosure *)p);
508 /* This is the smallest size of closure that can live in the heap. */
509 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
515 checkHeapChunk(StgPtr start, StgPtr end)
520 for (p=start; p<end; p+=size) {
521 ASSERT(LOOKS_LIKE_GHC_INFO((void*)*p));
522 size = checkClosure((StgClosure *)p);
523 /* This is the smallest size of closure that can live in the heap. */
524 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
530 checkChain(bdescr *bd)
533 checkClosure((StgClosure *)bd->start);
538 /* check stack - making sure that update frames are linked correctly */
540 checkStack(StgPtr sp, StgPtr stack_end, StgUpdateFrame* su )
542 /* check everything down to the first update frame */
543 checkStackChunk( sp, (StgPtr)su );
544 while ( (StgPtr)su < stack_end) {
546 switch (get_itbl(su)->type) {
551 su = ((StgSeqFrame *)su)->link;
554 su = ((StgCatchFrame *)su)->link;
557 /* not quite: ASSERT((StgPtr)su == stack_end); */
560 barf("checkStack: weird record found on update frame list.");
562 checkStackChunk( sp, (StgPtr)su );
564 ASSERT((StgPtr)su == stack_end);
569 checkTSO(StgTSO *tso)
572 StgPtr stack = tso->stack;
573 StgUpdateFrame* su = tso->su;
574 StgOffset stack_size = tso->stack_size;
575 StgPtr stack_end = stack + stack_size;
577 if (tso->what_next == ThreadRelocated) {
582 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
583 /* The garbage collector doesn't bother following any pointers
584 * from dead threads, so don't check sanity here.
589 ASSERT(stack <= sp && sp < stack_end);
590 ASSERT(sp <= (StgPtr)su);
593 ASSERT(tso->par.magic==TSO_MAGIC);
595 switch (tso->why_blocked) {
597 checkClosureShallow(tso->block_info.closure);
598 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
599 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
601 case BlockedOnGA_NoSend:
602 checkClosureShallow(tso->block_info.closure);
603 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
605 case BlockedOnBlackHole:
606 checkClosureShallow(tso->block_info.closure);
607 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
608 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
609 get_itbl(tso->block_info.closure)->type==RBH);
614 /* isOnBQ(blocked_queue) */
616 case BlockedOnException:
617 /* isOnSomeBQ(tso) */
618 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
621 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
625 Could check other values of why_blocked but I am more
626 lazy than paranoid (bad combination) -- HWL
630 /* if the link field is non-nil it most point to one of these
631 three closure types */
632 ASSERT(tso->link == END_TSO_QUEUE ||
633 get_itbl(tso->link)->type == TSO ||
634 get_itbl(tso->link)->type == BLOCKED_FETCH ||
635 get_itbl(tso->link)->type == CONSTR);
638 checkStack(sp, stack_end, su);
643 checkTSOsSanity(void) {
647 belch("Checking sanity of all runnable TSOs:");
649 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
650 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
651 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
653 fprintf(stderr, "OK, ");
658 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
665 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
669 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
670 ASSERT(run_queue_hds[proc]!=NULL);
671 ASSERT(run_queue_tls[proc]!=NULL);
672 /* if either head or tail is NIL then the other one must be NIL, too */
673 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
674 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
675 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
677 prev=tso, tso=tso->link) {
678 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
679 (prev==END_TSO_QUEUE || prev->link==tso));
683 ASSERT(prev==run_queue_tls[proc]);
687 checkThreadQsSanity (rtsBool check_TSO_too)
691 for (p=0; p<RtsFlags.GranFlags.proc; p++)
692 checkThreadQSanity(p, check_TSO_too);
697 Check that all TSOs have been evacuated.
698 Optionally also check the sanity of the TSOs.
701 checkGlobalTSOList (rtsBool checkTSOs)
703 extern StgTSO *all_threads;
705 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
706 ASSERT(LOOKS_LIKE_PTR(tso));
707 ASSERT(get_itbl(tso)->type == TSO);
713 /* -----------------------------------------------------------------------------
714 Check mutable list sanity.
715 -------------------------------------------------------------------------- */
718 checkMutableList( StgMutClosure *p, nat gen )
722 for (; p != END_MUT_LIST; p = p->mut_link) {
724 ASSERT(closure_MUTABLE(p));
725 ASSERT(bd->gen_no == gen);
726 ASSERT(LOOKS_LIKE_PTR(p->mut_link));
731 checkMutOnceList( StgMutClosure *p, nat gen )
736 for (; p != END_MUT_LIST; p = p->mut_link) {
740 ASSERT(!closure_MUTABLE(p));
741 ASSERT(ip_STATIC(info) || bd->gen_no == gen);
742 ASSERT(LOOKS_LIKE_PTR(p->mut_link));
744 switch (info->type) {
747 case IND_OLDGEN_PERM:
751 barf("checkMutOnceList: strange closure %p (%s)",
752 p, info_type((StgClosure *)p));
757 /* -----------------------------------------------------------------------------
758 Check Blackhole Sanity
760 Test whether an object is already on the update list.
761 It isn't necessarily an rts error if it is - it might be a programming
764 Future versions might be able to test for a blackhole without traversing
765 the update frame list.
767 -------------------------------------------------------------------------- */
769 isBlackhole( StgTSO* tso, StgClosure* p )
771 StgUpdateFrame* su = tso->su;
773 switch (get_itbl(su)->type) {
775 if (su->updatee == p) {
782 su = ((StgSeqFrame *)su)->link;
785 su = ((StgCatchFrame *)su)->link;
790 barf("isBlackhole: weird record found on update frame list.");
796 Check the static objects list.
799 checkStaticObjects ( StgClosure* static_objects )
801 StgClosure *p = static_objects;
804 while (p != END_OF_STATIC_LIST) {
807 switch (info->type) {
810 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
812 ASSERT(LOOKS_LIKE_PTR(indirectee));
813 ASSERT(LOOKS_LIKE_GHC_INFO(indirectee->header.info));
814 p = IND_STATIC_LINK((StgClosure *)p);
819 p = THUNK_STATIC_LINK((StgClosure *)p);
823 p = FUN_STATIC_LINK((StgClosure *)p);
827 p = STATIC_LINK(info,(StgClosure *)p);
831 barf("checkStaticObjetcs: strange closure %p (%s)",
838 Check the sanity of a blocking queue starting at bqe with closure being
839 the closure holding the blocking queue.
840 Note that in GUM we can have several different closure types in a
845 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
847 rtsBool end = rtsFalse;
848 StgInfoTable *info = get_itbl(closure);
850 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
851 || info->type == FETCH_ME_BQ || info->type == RBH);
854 switch (get_itbl(bqe)->type) {
857 checkClosure((StgClosure *)bqe);
859 end = (bqe==END_BQ_QUEUE);
863 checkClosure((StgClosure *)bqe);
868 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
869 get_itbl(bqe)->type, closure, info_type(closure));
875 checkBQ (StgTSO *bqe, StgClosure *closure)
877 rtsBool end = rtsFalse;
878 StgInfoTable *info = get_itbl(closure);
880 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
883 switch (get_itbl(bqe)->type) {
886 checkClosure((StgClosure *)bqe);
888 end = (bqe==END_BQ_QUEUE);
892 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
893 get_itbl(bqe)->type, closure, info_type(closure));
899 checkBQ (StgTSO *bqe, StgClosure *closure)
901 rtsBool end = rtsFalse;
902 StgInfoTable *info = get_itbl(closure);
904 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
907 switch (get_itbl(bqe)->type) {
909 checkClosure((StgClosure *)bqe);
911 end = (bqe==END_TSO_QUEUE);
915 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
916 get_itbl(bqe)->type, closure, info->type);
926 This routine checks the sanity of the LAGA and GALA tables. They are
927 implemented as lists through one hash table, LAtoGALAtable, because entries
928 in both tables have the same structure:
929 - the LAGA table maps local addresses to global addresses; it starts
930 with liveIndirections
931 - the GALA table maps global addresses to local addresses; it starts
938 /* hidden in parallel/Global.c; only accessed for testing here */
939 extern GALA *liveIndirections;
940 extern GALA *liveRemoteGAs;
941 extern HashTable *LAtoGALAtable;
944 checkLAGAtable(rtsBool check_closures)
947 nat n=0, m=0; // debugging
949 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
951 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
952 ASSERT(!gala->preferred || gala == gala0);
953 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
954 ASSERT(gala->next!=gala); // detect direct loops
955 if ( check_closures ) {
956 checkClosure((StgClosure *)gala->la);
960 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
962 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
963 ASSERT(!gala->preferred || gala == gala0);
964 ASSERT(LOOKS_LIKE_GHC_INFO(((StgClosure *)gala->la)->header.info));
965 ASSERT(gala->next!=gala); // detect direct loops
967 if ( check_closures ) {
968 checkClosure((StgClosure *)gala->la);