1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *fun, StgClosure** payload, StgWord n_args)
210 StgFunInfoTable *fun_info;
212 fun = UNTAG_CLOSURE(fun);
213 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
214 fun_info = get_fun_itbl(fun);
216 p = (StgClosure *)payload;
217 switch (fun_info->f.fun_type) {
219 checkSmallBitmap( (StgPtr)payload,
220 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
223 checkLargeBitmap( (StgPtr)payload,
224 GET_FUN_LARGE_BITMAP(fun_info),
228 checkLargeBitmap( (StgPtr)payload,
233 checkSmallBitmap( (StgPtr)payload,
234 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
239 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(fun) == 1
240 : GET_CLOSURE_TAG(fun) == fun_info->f.arity);
245 checkClosure( StgClosure* p )
247 const StgInfoTable *info;
249 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
251 p = UNTAG_CLOSURE(p);
252 /* Is it a static closure (i.e. in the data segment)? */
253 if (!HEAP_ALLOCED(p)) {
254 ASSERT(closure_STATIC(p));
256 ASSERT(!closure_STATIC(p));
260 switch (info->type) {
265 StgMVar *mvar = (StgMVar *)p;
266 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
267 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
268 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
271 checkBQ((StgBlockingQueueElement *)mvar->head, p);
273 checkBQ(mvar->head, p);
276 return sizeofW(StgMVar);
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
290 return thunk_sizeW_fromITBL(info);
307 case IND_OLDGEN_PERM:
310 case SE_CAF_BLACKHOLE:
318 case CONSTR_NOCAF_STATIC:
323 for (i = 0; i < info->layout.payload.ptrs; i++) {
324 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
326 return sizeW_fromITBL(info);
330 StgBCO *bco = (StgBCO *)p;
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
334 return bco_sizeW(bco);
337 case IND_STATIC: /* (1, 0) closure */
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
339 return sizeW_fromITBL(info);
342 /* deal with these specially - the info table isn't
343 * representative of the actual layout.
345 { StgWeak *w = (StgWeak *)p;
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
352 return sizeW_fromITBL(info);
356 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
357 return THUNK_SELECTOR_sizeW();
361 /* we don't expect to see any of these after GC
362 * but they might appear during execution
364 StgInd *ind = (StgInd *)p;
365 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
366 return sizeofW(StgInd);
376 case ATOMICALLY_FRAME:
377 case CATCH_RETRY_FRAME:
378 case CATCH_STM_FRAME:
379 barf("checkClosure: stack frame");
383 StgAP* ap = (StgAP *)p;
384 checkPAP (ap->fun, ap->payload, ap->n_args);
390 StgPAP* pap = (StgPAP *)p;
391 checkPAP (pap->fun, pap->payload, pap->n_args);
392 return pap_sizeW(pap);
397 StgAP_STACK *ap = (StgAP_STACK *)p;
398 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
399 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
400 return ap_stack_sizeW(ap);
404 return arr_words_sizeW((StgArrWords *)p);
406 case MUT_ARR_PTRS_CLEAN:
407 case MUT_ARR_PTRS_DIRTY:
408 case MUT_ARR_PTRS_FROZEN:
409 case MUT_ARR_PTRS_FROZEN0:
411 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
413 for (i = 0; i < a->ptrs; i++) {
414 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
416 return mut_arr_ptrs_sizeW(a);
420 checkTSO((StgTSO *)p);
421 return tso_sizeW((StgTSO *)p);
426 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
428 return sizeofW(StgBlockedFetch); // see size used in evacuate()
432 return sizeofW(StgFetchMe);
436 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
437 return sizeofW(StgFetchMe); // see size used in evacuate()
440 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
441 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
444 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
445 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
446 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
447 checkBQ(((StgRBH *)p)->blocking_queue, p);
448 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
449 return BLACKHOLE_sizeW(); // see size used in evacuate()
450 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
454 case TVAR_WATCH_QUEUE:
456 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
457 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
459 return sizeofW(StgTVarWatchQueue);
462 case INVARIANT_CHECK_QUEUE:
464 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
466 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
467 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
468 return sizeofW(StgInvariantCheckQueue);
471 case ATOMIC_INVARIANT:
473 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
475 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
476 return sizeofW(StgAtomicInvariant);
481 StgTVar *tv = (StgTVar *)p;
482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
483 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
484 return sizeofW(StgTVar);
490 StgTRecChunk *tc = (StgTRecChunk *)p;
491 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
492 for (i = 0; i < tc -> next_entry_idx; i ++) {
493 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
494 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
495 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
497 return sizeofW(StgTRecChunk);
502 StgTRecHeader *trec = (StgTRecHeader *)p;
503 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
504 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
505 return sizeofW(StgTRecHeader);
510 barf("checkClosure: found EVACUATED closure %d",
513 barf("checkClosure (closure type %d)", info->type);
519 #define PVM_PE_MASK 0xfffc0000
520 #define MAX_PVM_PES MAX_PES
521 #define MAX_PVM_TIDS MAX_PES
522 #define MAX_SLOTS 100000
525 looks_like_tid(StgInt tid)
527 StgInt hi = (tid & PVM_PE_MASK) >> 18;
528 StgInt lo = (tid & ~PVM_PE_MASK);
529 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
534 looks_like_slot(StgInt slot)
536 /* if tid is known better use looks_like_ga!! */
537 rtsBool ok = slot<MAX_SLOTS;
538 // This refers only to the no. of slots on the current PE
539 // rtsBool ok = slot<=highest_slot();
544 looks_like_ga(globalAddr *ga)
546 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
547 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
548 (ga)->payload.gc.slot<=highest_slot() :
549 (ga)->payload.gc.slot<MAX_SLOTS;
550 rtsBool ok = is_tid && is_slot;
557 /* -----------------------------------------------------------------------------
560 After garbage collection, the live heap is in a state where we can
561 run through and check that all the pointers point to the right
562 place. This function starts at a given position and sanity-checks
563 all the objects in the remainder of the chain.
564 -------------------------------------------------------------------------- */
567 checkHeap(bdescr *bd)
571 #if defined(THREADED_RTS)
572 // heap sanity checking doesn't work with SMP, because we can't
573 // zero the slop (see Updates.h).
577 for (; bd != NULL; bd = bd->link) {
579 while (p < bd->free) {
580 nat size = checkClosure((StgClosure *)p);
581 /* This is the smallest size of closure that can live in the heap */
582 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
586 while (p < bd->free &&
587 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
594 Check heap between start and end. Used after unpacking graphs.
597 checkHeapChunk(StgPtr start, StgPtr end)
599 extern globalAddr *LAGAlookup(StgClosure *addr);
603 for (p=start; p<end; p+=size) {
604 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
605 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
606 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
607 /* if it's a FM created during unpack and commoned up, it's not global */
608 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
609 size = sizeofW(StgFetchMe);
610 } else if (get_itbl((StgClosure*)p)->type == IND) {
611 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
612 size = sizeofW(StgInd);
614 size = checkClosure((StgClosure *)p);
615 /* This is the smallest size of closure that can live in the heap. */
616 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
622 checkHeapChunk(StgPtr start, StgPtr end)
627 for (p=start; p<end; p+=size) {
628 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
629 size = checkClosure((StgClosure *)p);
630 /* This is the smallest size of closure that can live in the heap. */
631 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
637 checkChain(bdescr *bd)
640 checkClosure((StgClosure *)bd->start);
646 checkTSO(StgTSO *tso)
649 StgPtr stack = tso->stack;
650 StgOffset stack_size = tso->stack_size;
651 StgPtr stack_end = stack + stack_size;
653 if (tso->what_next == ThreadRelocated) {
658 if (tso->what_next == ThreadKilled) {
659 /* The garbage collector doesn't bother following any pointers
660 * from dead threads, so don't check sanity here.
665 ASSERT(stack <= sp && sp < stack_end);
668 ASSERT(tso->par.magic==TSO_MAGIC);
670 switch (tso->why_blocked) {
672 checkClosureShallow(tso->block_info.closure);
673 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
674 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
676 case BlockedOnGA_NoSend:
677 checkClosureShallow(tso->block_info.closure);
678 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
680 case BlockedOnBlackHole:
681 checkClosureShallow(tso->block_info.closure);
682 ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
683 get_itbl(tso->block_info.closure)->type==RBH);
688 #if defined(mingw32_HOST_OS)
689 case BlockedOnDoProc:
691 /* isOnBQ(blocked_queue) */
693 case BlockedOnException:
694 /* isOnSomeBQ(tso) */
695 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
698 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
701 ASSERT(tso->block_info.closure == END_TSO_QUEUE);
705 Could check other values of why_blocked but I am more
706 lazy than paranoid (bad combination) -- HWL
710 /* if the link field is non-nil it most point to one of these
711 three closure types */
712 ASSERT(tso->link == END_TSO_QUEUE ||
713 get_itbl(tso->link)->type == TSO ||
714 get_itbl(tso->link)->type == BLOCKED_FETCH ||
715 get_itbl(tso->link)->type == CONSTR);
718 checkStackChunk(sp, stack_end);
723 checkTSOsSanity(void) {
727 debugBelch("Checking sanity of all runnable TSOs:");
729 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
730 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
731 debugBelch("TSO %p on PE %d ...", tso, i);
738 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
745 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
749 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
750 ASSERT(run_queue_hds[proc]!=NULL);
751 ASSERT(run_queue_tls[proc]!=NULL);
752 /* if either head or tail is NIL then the other one must be NIL, too */
753 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
754 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
755 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
757 prev=tso, tso=tso->link) {
758 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
759 (prev==END_TSO_QUEUE || prev->link==tso));
763 ASSERT(prev==run_queue_tls[proc]);
767 checkThreadQsSanity (rtsBool check_TSO_too)
771 for (p=0; p<RtsFlags.GranFlags.proc; p++)
772 checkThreadQSanity(p, check_TSO_too);
777 Check that all TSOs have been evacuated.
778 Optionally also check the sanity of the TSOs.
781 checkGlobalTSOList (rtsBool checkTSOs)
783 extern StgTSO *all_threads;
785 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
786 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
787 ASSERT(get_itbl(tso)->type == TSO);
793 /* -----------------------------------------------------------------------------
794 Check mutable list sanity.
795 -------------------------------------------------------------------------- */
798 checkMutableList( bdescr *mut_bd, nat gen )
804 for (bd = mut_bd; bd != NULL; bd = bd->link) {
805 for (q = bd->start; q < bd->free; q++) {
806 p = (StgClosure *)*q;
807 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
813 Check the static objects list.
816 checkStaticObjects ( StgClosure* static_objects )
818 StgClosure *p = static_objects;
821 while (p != END_OF_STATIC_LIST) {
824 switch (info->type) {
827 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
829 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
830 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
831 p = *IND_STATIC_LINK((StgClosure *)p);
836 p = *THUNK_STATIC_LINK((StgClosure *)p);
840 p = *FUN_STATIC_LINK((StgClosure *)p);
844 p = *STATIC_LINK(info,(StgClosure *)p);
848 barf("checkStaticObjetcs: strange closure %p (%s)",
855 Check the sanity of a blocking queue starting at bqe with closure being
856 the closure holding the blocking queue.
857 Note that in GUM we can have several different closure types in a
862 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
864 rtsBool end = rtsFalse;
865 StgInfoTable *info = get_itbl(closure);
867 ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
870 switch (get_itbl(bqe)->type) {
873 checkClosure((StgClosure *)bqe);
875 end = (bqe==END_BQ_QUEUE);
879 checkClosure((StgClosure *)bqe);
884 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
885 get_itbl(bqe)->type, closure, info_type(closure));
891 checkBQ (StgTSO *bqe, StgClosure *closure)
893 rtsBool end = rtsFalse;
894 StgInfoTable *info = get_itbl(closure);
896 ASSERT(info->type == MVAR);
899 switch (get_itbl(bqe)->type) {
902 checkClosure((StgClosure *)bqe);
904 end = (bqe==END_BQ_QUEUE);
908 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
909 get_itbl(bqe)->type, closure, info_type(closure));
918 This routine checks the sanity of the LAGA and GALA tables. They are
919 implemented as lists through one hash table, LAtoGALAtable, because entries
920 in both tables have the same structure:
921 - the LAGA table maps local addresses to global addresses; it starts
922 with liveIndirections
923 - the GALA table maps global addresses to local addresses; it starts
930 /* hidden in parallel/Global.c; only accessed for testing here */
931 extern GALA *liveIndirections;
932 extern GALA *liveRemoteGAs;
933 extern HashTable *LAtoGALAtable;
936 checkLAGAtable(rtsBool check_closures)
939 nat n=0, m=0; // debugging
941 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
943 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
944 ASSERT(!gala->preferred || gala == gala0);
945 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
946 ASSERT(gala->next!=gala); // detect direct loops
947 if ( check_closures ) {
948 checkClosure((StgClosure *)gala->la);
952 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
954 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
955 ASSERT(!gala->preferred || gala == gala0);
956 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
957 ASSERT(gala->next!=gala); // detect direct loops
959 if ( check_closures ) {
960 checkClosure((StgClosure *)gala->la);