1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
133 case ATOMICALLY_FRAME:
134 case CATCH_RETRY_FRAME:
135 case CATCH_STM_FRAME:
137 // small bitmap cases (<= 32 entries)
141 size = BITMAP_SIZE(info->i.layout.bitmap);
142 checkSmallBitmap((StgPtr)c + 1,
143 BITMAP_BITS(info->i.layout.bitmap), size);
149 bco = (StgBCO *)*(c+1);
150 size = BCO_BITMAP_SIZE(bco);
151 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
155 case RET_BIG: // large bitmap (> 32 entries)
157 size = GET_LARGE_BITMAP(&info->i)->size;
158 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
163 StgFunInfoTable *fun_info;
166 ret_fun = (StgRetFun *)c;
167 fun_info = get_fun_itbl(ret_fun->fun);
168 size = ret_fun->size;
169 switch (fun_info->f.fun_type) {
171 checkSmallBitmap((StgPtr)ret_fun->payload,
172 BITMAP_BITS(fun_info->f.bitmap), size);
175 checkLargeBitmap((StgPtr)ret_fun->payload,
176 GET_FUN_LARGE_BITMAP(fun_info), size);
179 checkSmallBitmap((StgPtr)ret_fun->payload,
180 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
184 return sizeofW(StgRetFun) + size;
188 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
192 // check sections of stack between update frames
194 checkStackChunk( StgPtr sp, StgPtr stack_end )
199 while (p < stack_end) {
200 p += checkStackFrame( p );
202 // ASSERT( p == stack_end ); -- HWL
206 checkClosure( StgClosure* p )
208 const StgInfoTable *info;
210 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
212 /* Is it a static closure (i.e. in the data segment)? */
213 if (!HEAP_ALLOCED(p)) {
214 ASSERT(closure_STATIC(p));
216 ASSERT(!closure_STATIC(p));
220 switch (info->type) {
224 StgMVar *mvar = (StgMVar *)p;
225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
226 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
227 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
230 checkBQ((StgBlockingQueueElement *)mvar->head, p);
232 checkBQ(mvar->head, p);
235 return sizeofW(StgMVar);
246 for (i = 0; i < info->layout.payload.ptrs; i++) {
247 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
249 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
253 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
254 /* fall through to basic ptr check */
269 case IND_OLDGEN_PERM:
272 case SE_CAF_BLACKHOLE:
280 case CONSTR_CHARLIKE:
282 case CONSTR_NOCAF_STATIC:
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
290 return sizeW_fromITBL(info);
294 StgBCO *bco = (StgBCO *)p;
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
296 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
297 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
298 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
299 return bco_sizeW(bco);
302 case IND_STATIC: /* (1, 0) closure */
303 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
304 return sizeW_fromITBL(info);
307 /* deal with these specially - the info table isn't
308 * representative of the actual layout.
310 { StgWeak *w = (StgWeak *)p;
311 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
312 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
315 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
317 return sizeW_fromITBL(info);
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
322 return THUNK_SELECTOR_sizeW();
326 /* we don't expect to see any of these after GC
327 * but they might appear during execution
330 StgInd *ind = (StgInd *)p;
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
332 q = (P_)p + sizeofW(StgInd);
333 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
346 case ATOMICALLY_FRAME:
347 case CATCH_RETRY_FRAME:
348 case CATCH_STM_FRAME:
349 barf("checkClosure: stack frame");
351 case AP: /* we can treat this as being the same as a PAP */
354 StgFunInfoTable *fun_info;
355 StgPAP* pap = (StgPAP *)p;
357 ASSERT(LOOKS_LIKE_CLOSURE_PTR(pap->fun));
358 fun_info = get_fun_itbl(pap->fun);
360 p = (StgClosure *)pap->payload;
361 switch (fun_info->f.fun_type) {
363 checkSmallBitmap( (StgPtr)pap->payload,
364 BITMAP_BITS(fun_info->f.bitmap), pap->n_args );
367 checkLargeBitmap( (StgPtr)pap->payload,
368 GET_FUN_LARGE_BITMAP(fun_info),
372 checkLargeBitmap( (StgPtr)pap->payload,
373 BCO_BITMAP(pap->fun),
377 checkSmallBitmap( (StgPtr)pap->payload,
378 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
382 return pap_sizeW(pap);
387 StgAP_STACK *ap = (StgAP_STACK *)p;
388 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
389 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
390 return ap_stack_sizeW(ap);
394 return arr_words_sizeW((StgArrWords *)p);
397 case MUT_ARR_PTRS_FROZEN:
399 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
401 for (i = 0; i < a->ptrs; i++) {
402 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
404 return mut_arr_ptrs_sizeW(a);
408 checkTSO((StgTSO *)p);
409 return tso_sizeW((StgTSO *)p);
414 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
415 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
416 return sizeofW(StgBlockedFetch); // see size used in evacuate()
420 return sizeofW(StgFetchMe);
424 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
425 return sizeofW(StgFetchMe); // see size used in evacuate()
428 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
429 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
432 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
433 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
434 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
435 checkBQ(((StgRBH *)p)->blocking_queue, p);
436 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
437 return BLACKHOLE_sizeW(); // see size used in evacuate()
438 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
442 case TVAR_WAIT_QUEUE:
444 StgTVarWaitQueue *wq = (StgTVarWaitQueue *)p;
445 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
446 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
447 return sizeofW(StgTVarWaitQueue);
452 StgTVar *tv = (StgTVar *)p;
453 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
454 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_wait_queue_entry));
455 return sizeofW(StgTVar);
461 StgTRecChunk *tc = (StgTRecChunk *)p;
462 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
463 for (i = 0; i < tc -> next_entry_idx; i ++) {
464 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
466 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
468 return sizeofW(StgTRecChunk);
473 StgTRecHeader *trec = (StgTRecHeader *)p;
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
475 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
476 return sizeofW(StgTRecHeader);
481 barf("checkClosure: found EVACUATED closure %d",
484 barf("checkClosure (closure type %d)", info->type);
490 #define PVM_PE_MASK 0xfffc0000
491 #define MAX_PVM_PES MAX_PES
492 #define MAX_PVM_TIDS MAX_PES
493 #define MAX_SLOTS 100000
496 looks_like_tid(StgInt tid)
498 StgInt hi = (tid & PVM_PE_MASK) >> 18;
499 StgInt lo = (tid & ~PVM_PE_MASK);
500 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
505 looks_like_slot(StgInt slot)
507 /* if tid is known better use looks_like_ga!! */
508 rtsBool ok = slot<MAX_SLOTS;
509 // This refers only to the no. of slots on the current PE
510 // rtsBool ok = slot<=highest_slot();
515 looks_like_ga(globalAddr *ga)
517 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
518 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
519 (ga)->payload.gc.slot<=highest_slot() :
520 (ga)->payload.gc.slot<MAX_SLOTS;
521 rtsBool ok = is_tid && is_slot;
528 /* -----------------------------------------------------------------------------
531 After garbage collection, the live heap is in a state where we can
532 run through and check that all the pointers point to the right
533 place. This function starts at a given position and sanity-checks
534 all the objects in the remainder of the chain.
535 -------------------------------------------------------------------------- */
538 checkHeap(bdescr *bd)
542 for (; bd != NULL; bd = bd->link) {
544 while (p < bd->free) {
545 nat size = checkClosure((StgClosure *)p);
546 /* This is the smallest size of closure that can live in the heap */
547 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
551 while (p < bd->free &&
552 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
559 Check heap between start and end. Used after unpacking graphs.
562 checkHeapChunk(StgPtr start, StgPtr end)
564 extern globalAddr *LAGAlookup(StgClosure *addr);
568 for (p=start; p<end; p+=size) {
569 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
570 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
571 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
572 /* if it's a FM created during unpack and commoned up, it's not global */
573 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
574 size = sizeofW(StgFetchMe);
575 } else if (get_itbl((StgClosure*)p)->type == IND) {
576 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
579 size = checkClosure((StgClosure *)p);
580 /* This is the smallest size of closure that can live in the heap. */
581 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
587 checkHeapChunk(StgPtr start, StgPtr end)
592 for (p=start; p<end; p+=size) {
593 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
594 size = checkClosure((StgClosure *)p);
595 /* This is the smallest size of closure that can live in the heap. */
596 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
602 checkChain(bdescr *bd)
605 checkClosure((StgClosure *)bd->start);
611 checkTSO(StgTSO *tso)
614 StgPtr stack = tso->stack;
615 StgOffset stack_size = tso->stack_size;
616 StgPtr stack_end = stack + stack_size;
618 if (tso->what_next == ThreadRelocated) {
623 if (tso->what_next == ThreadKilled) {
624 /* The garbage collector doesn't bother following any pointers
625 * from dead threads, so don't check sanity here.
630 ASSERT(stack <= sp && sp < stack_end);
633 ASSERT(tso->par.magic==TSO_MAGIC);
635 switch (tso->why_blocked) {
637 checkClosureShallow(tso->block_info.closure);
638 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
639 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
641 case BlockedOnGA_NoSend:
642 checkClosureShallow(tso->block_info.closure);
643 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
645 case BlockedOnBlackHole:
646 checkClosureShallow(tso->block_info.closure);
647 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
648 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
649 get_itbl(tso->block_info.closure)->type==RBH);
654 #if defined(mingw32_HOST_OS)
655 case BlockedOnDoProc:
657 /* isOnBQ(blocked_queue) */
659 case BlockedOnException:
660 /* isOnSomeBQ(tso) */
661 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
664 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
667 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
671 Could check other values of why_blocked but I am more
672 lazy than paranoid (bad combination) -- HWL
676 /* if the link field is non-nil it most point to one of these
677 three closure types */
678 ASSERT(tso->link == END_TSO_QUEUE ||
679 get_itbl(tso->link)->type == TSO ||
680 get_itbl(tso->link)->type == BLOCKED_FETCH ||
681 get_itbl(tso->link)->type == CONSTR);
684 checkStackChunk(sp, stack_end);
689 checkTSOsSanity(void) {
693 debugBelch("Checking sanity of all runnable TSOs:");
695 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
696 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
697 debugBelch("TSO %p on PE %d ...", tso, i);
704 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
711 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
715 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
716 ASSERT(run_queue_hds[proc]!=NULL);
717 ASSERT(run_queue_tls[proc]!=NULL);
718 /* if either head or tail is NIL then the other one must be NIL, too */
719 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
720 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
721 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
723 prev=tso, tso=tso->link) {
724 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
725 (prev==END_TSO_QUEUE || prev->link==tso));
729 ASSERT(prev==run_queue_tls[proc]);
733 checkThreadQsSanity (rtsBool check_TSO_too)
737 for (p=0; p<RtsFlags.GranFlags.proc; p++)
738 checkThreadQSanity(p, check_TSO_too);
743 Check that all TSOs have been evacuated.
744 Optionally also check the sanity of the TSOs.
747 checkGlobalTSOList (rtsBool checkTSOs)
749 extern StgTSO *all_threads;
751 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
752 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
753 ASSERT(get_itbl(tso)->type == TSO);
759 /* -----------------------------------------------------------------------------
760 Check mutable list sanity.
761 -------------------------------------------------------------------------- */
764 checkMutableList( bdescr *mut_bd, nat gen )
770 for (bd = mut_bd; bd != NULL; bd = bd->link) {
771 for (q = bd->start; q < bd->free; q++) {
772 p = (StgClosure *)*q;
773 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
779 Check the static objects list.
782 checkStaticObjects ( StgClosure* static_objects )
784 StgClosure *p = static_objects;
787 while (p != END_OF_STATIC_LIST) {
790 switch (info->type) {
793 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
795 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
796 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
797 p = IND_STATIC_LINK((StgClosure *)p);
802 p = THUNK_STATIC_LINK((StgClosure *)p);
806 p = FUN_STATIC_LINK((StgClosure *)p);
810 p = STATIC_LINK(info,(StgClosure *)p);
814 barf("checkStaticObjetcs: strange closure %p (%s)",
821 Check the sanity of a blocking queue starting at bqe with closure being
822 the closure holding the blocking queue.
823 Note that in GUM we can have several different closure types in a
828 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
830 rtsBool end = rtsFalse;
831 StgInfoTable *info = get_itbl(closure);
833 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
834 || info->type == FETCH_ME_BQ || info->type == RBH);
837 switch (get_itbl(bqe)->type) {
840 checkClosure((StgClosure *)bqe);
842 end = (bqe==END_BQ_QUEUE);
846 checkClosure((StgClosure *)bqe);
851 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
852 get_itbl(bqe)->type, closure, info_type(closure));
858 checkBQ (StgTSO *bqe, StgClosure *closure)
860 rtsBool end = rtsFalse;
861 StgInfoTable *info = get_itbl(closure);
863 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
866 switch (get_itbl(bqe)->type) {
869 checkClosure((StgClosure *)bqe);
871 end = (bqe==END_BQ_QUEUE);
875 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
876 get_itbl(bqe)->type, closure, info_type(closure));
882 checkBQ (StgTSO *bqe, StgClosure *closure)
884 rtsBool end = rtsFalse;
885 StgInfoTable *info = get_itbl(closure);
887 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
890 switch (get_itbl(bqe)->type) {
892 checkClosure((StgClosure *)bqe);
894 end = (bqe==END_TSO_QUEUE);
898 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
899 get_itbl(bqe)->type, closure, info->type);
909 This routine checks the sanity of the LAGA and GALA tables. They are
910 implemented as lists through one hash table, LAtoGALAtable, because entries
911 in both tables have the same structure:
912 - the LAGA table maps local addresses to global addresses; it starts
913 with liveIndirections
914 - the GALA table maps global addresses to local addresses; it starts
921 /* hidden in parallel/Global.c; only accessed for testing here */
922 extern GALA *liveIndirections;
923 extern GALA *liveRemoteGAs;
924 extern HashTable *LAtoGALAtable;
927 checkLAGAtable(rtsBool check_closures)
930 nat n=0, m=0; // debugging
932 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
934 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
935 ASSERT(!gala->preferred || gala == gala0);
936 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
937 ASSERT(gala->next!=gala); // detect direct loops
938 if ( check_closures ) {
939 checkClosure((StgClosure *)gala->la);
943 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
945 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
946 ASSERT(!gala->preferred || gala == gala0);
947 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
948 ASSERT(gala->next!=gala); // detect direct loops
950 if ( check_closures ) {
951 checkClosure((StgClosure *)gala->la);