1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.32 2003/03/24 14:46:56 simonmar Exp $
4 * (c) The GHC Team, 1998-2001
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that everything reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
17 #include "PosixSource.h"
20 #ifdef DEBUG /* whole file */
24 #include "BlockAlloc.h"
29 #include "StoragePriv.h" // for END_OF_STATIC_LIST
32 /* -----------------------------------------------------------------------------
34 -------------------------------------------------------------------------- */
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
40 /* -----------------------------------------------------------------------------
42 -------------------------------------------------------------------------- */
45 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
83 checkClosureShallow( StgClosure* p )
85 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
87 /* Is it a static closure? */
88 if (!HEAP_ALLOCED(p)) {
89 ASSERT(closure_STATIC(p));
91 ASSERT(!closure_STATIC(p));
95 // check an individual stack object
97 checkStackFrame( StgPtr c )
100 const StgRetInfoTable* info;
102 info = get_ret_itbl((StgClosure *)c);
104 /* All activation records have 'bitmap' style layout info. */
105 switch (info->i.type) {
106 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
115 p = (P_)(r->payload);
116 checkSmallBitmap(p,GET_LIVENESS(r->liveness),RET_DYN_SIZE);
119 // skip over the non-pointers
120 p += GET_NONPTRS(dyn);
122 // follow the ptr words
123 for (size = GET_PTRS(dyn); size > 0; size--) {
124 checkClosureShallow((StgClosure *)*p);
128 return sizeofW(StgRetDyn) + RET_DYN_SIZE +
129 GET_NONPTRS(dyn) + GET_PTRS(dyn);
133 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
135 // small bitmap cases (<= 32 entries)
139 size = BITMAP_SIZE(info->i.layout.bitmap);
140 checkSmallBitmap((StgPtr)c + 1,
141 BITMAP_BITS(info->i.layout.bitmap), size);
147 bco = (StgBCO *)*(c+1);
148 size = BCO_BITMAP_SIZE(bco);
149 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
153 case RET_BIG: // large bitmap (> 32 entries)
155 size = info->i.layout.large_bitmap->size;
156 checkLargeBitmap((StgPtr)c + 1, info->i.layout.large_bitmap, size);
161 StgFunInfoTable *fun_info;
164 ret_fun = (StgRetFun *)c;
165 fun_info = get_fun_itbl(ret_fun->fun);
166 size = ret_fun->size;
167 switch (fun_info->fun_type) {
169 checkSmallBitmap((StgPtr)ret_fun->payload,
170 BITMAP_BITS(fun_info->bitmap), size);
173 checkLargeBitmap((StgPtr)ret_fun->payload,
174 (StgLargeBitmap *)fun_info->bitmap, size);
177 checkSmallBitmap((StgPtr)ret_fun->payload,
178 BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]),
182 return sizeofW(StgRetFun) + size;
186 barf("checkStackFrame: weird activation record found on stack (%p).",c);
190 // check sections of stack between update frames
192 checkStackChunk( StgPtr sp, StgPtr stack_end )
197 while (p < stack_end) {
198 p += checkStackFrame( p );
200 // ASSERT( p == stack_end ); -- HWL
204 checkClosure( StgClosure* p )
206 const StgInfoTable *info;
208 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
210 /* Is it a static closure (i.e. in the data segment)? */
211 if (!HEAP_ALLOCED(p)) {
212 ASSERT(closure_STATIC(p));
214 ASSERT(!closure_STATIC(p));
218 switch (info->type) {
222 StgMVar *mvar = (StgMVar *)p;
223 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
228 checkBQ((StgBlockingQueueElement *)mvar->head, p);
230 checkBQ(mvar->head, p);
233 return sizeofW(StgMVar);
244 for (i = 0; i < info->layout.payload.ptrs; i++) {
245 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
247 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
251 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
252 /* fall through to basic ptr check */
267 case IND_OLDGEN_PERM:
270 case SE_CAF_BLACKHOLE:
279 case CONSTR_CHARLIKE:
281 case CONSTR_NOCAF_STATIC:
286 for (i = 0; i < info->layout.payload.ptrs; i++) {
287 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
289 return sizeW_fromITBL(info);
293 StgBCO *bco = (StgBCO *)p;
294 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
296 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
297 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
298 return bco_sizeW(bco);
301 case IND_STATIC: /* (1, 0) closure */
302 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
303 return sizeW_fromITBL(info);
306 /* deal with these specially - the info table isn't
307 * representative of the actual layout.
309 { StgWeak *w = (StgWeak *)p;
310 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
311 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
312 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
314 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
316 return sizeW_fromITBL(info);
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
321 return sizeofW(StgHeader) + MIN_UPD_SIZE;
325 /* we don't expect to see any of these after GC
326 * but they might appear during execution
329 StgInd *ind = (StgInd *)p;
330 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
331 q = (P_)p + sizeofW(StgInd);
332 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
345 barf("checkClosure: stack frame");
347 case AP: /* we can treat this as being the same as a PAP */
350 StgFunInfoTable *fun_info;
351 StgPAP* pap = (StgPAP *)p;
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(pap->fun));
354 fun_info = get_fun_itbl(pap->fun);
356 p = (StgClosure *)pap->payload;
357 switch (fun_info->fun_type) {
359 checkSmallBitmap( (StgPtr)pap->payload,
360 BITMAP_BITS(fun_info->bitmap), pap->n_args );
363 checkLargeBitmap( (StgPtr)pap->payload,
364 (StgLargeBitmap *)fun_info->bitmap,
368 checkLargeBitmap( (StgPtr)pap->payload,
369 BCO_BITMAP(pap->fun),
373 checkSmallBitmap( (StgPtr)pap->payload,
374 BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]),
378 return pap_sizeW(pap);
383 StgAP_STACK *ap = (StgAP_STACK *)p;
384 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
385 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
386 return ap_stack_sizeW(ap);
390 return arr_words_sizeW((StgArrWords *)p);
393 case MUT_ARR_PTRS_FROZEN:
395 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
397 for (i = 0; i < a->ptrs; i++) {
398 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
400 return mut_arr_ptrs_sizeW(a);
404 checkTSO((StgTSO *)p);
405 return tso_sizeW((StgTSO *)p);
410 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
412 return sizeofW(StgBlockedFetch); // see size used in evacuate()
416 return sizeofW(StgFetchMe);
420 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
421 return sizeofW(StgFetchMe); // see size used in evacuate()
424 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
425 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
428 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
429 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
430 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
431 checkBQ(((StgRBH *)p)->blocking_queue, p);
432 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
433 return BLACKHOLE_sizeW(); // see size used in evacuate()
434 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
439 barf("checkClosure: found EVACUATED closure %d",
442 barf("checkClosure (closure type %d)", info->type);
448 #define PVM_PE_MASK 0xfffc0000
449 #define MAX_PVM_PES MAX_PES
450 #define MAX_PVM_TIDS MAX_PES
451 #define MAX_SLOTS 100000
454 looks_like_tid(StgInt tid)
456 StgInt hi = (tid & PVM_PE_MASK) >> 18;
457 StgInt lo = (tid & ~PVM_PE_MASK);
458 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
463 looks_like_slot(StgInt slot)
465 /* if tid is known better use looks_like_ga!! */
466 rtsBool ok = slot<MAX_SLOTS;
467 // This refers only to the no. of slots on the current PE
468 // rtsBool ok = slot<=highest_slot();
473 looks_like_ga(globalAddr *ga)
475 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
476 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
477 (ga)->payload.gc.slot<=highest_slot() :
478 (ga)->payload.gc.slot<MAX_SLOTS;
479 rtsBool ok = is_tid && is_slot;
486 /* -----------------------------------------------------------------------------
489 After garbage collection, the live heap is in a state where we can
490 run through and check that all the pointers point to the right
491 place. This function starts at a given position and sanity-checks
492 all the objects in the remainder of the chain.
493 -------------------------------------------------------------------------- */
496 checkHeap(bdescr *bd)
500 for (; bd != NULL; bd = bd->link) {
502 while (p < bd->free) {
503 nat size = checkClosure((StgClosure *)p);
504 /* This is the smallest size of closure that can live in the heap */
505 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
509 while (p < bd->free &&
510 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
517 Check heap between start and end. Used after unpacking graphs.
520 checkHeapChunk(StgPtr start, StgPtr end)
522 extern globalAddr *LAGAlookup(StgClosure *addr);
526 for (p=start; p<end; p+=size) {
527 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
528 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
529 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
530 /* if it's a FM created during unpack and commoned up, it's not global */
531 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
532 size = sizeofW(StgFetchMe);
533 } else if (get_itbl((StgClosure*)p)->type == IND) {
534 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
537 size = checkClosure((StgClosure *)p);
538 /* This is the smallest size of closure that can live in the heap. */
539 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
545 checkHeapChunk(StgPtr start, StgPtr end)
550 for (p=start; p<end; p+=size) {
551 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
552 size = checkClosure((StgClosure *)p);
553 /* This is the smallest size of closure that can live in the heap. */
554 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
560 checkChain(bdescr *bd)
563 checkClosure((StgClosure *)bd->start);
569 checkTSO(StgTSO *tso)
572 StgPtr stack = tso->stack;
573 StgOffset stack_size = tso->stack_size;
574 StgPtr stack_end = stack + stack_size;
576 if (tso->what_next == ThreadRelocated) {
581 if (tso->what_next == ThreadKilled) {
582 /* The garbage collector doesn't bother following any pointers
583 * from dead threads, so don't check sanity here.
588 ASSERT(stack <= sp && sp < stack_end);
591 ASSERT(tso->par.magic==TSO_MAGIC);
593 switch (tso->why_blocked) {
595 checkClosureShallow(tso->block_info.closure);
596 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
597 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
599 case BlockedOnGA_NoSend:
600 checkClosureShallow(tso->block_info.closure);
601 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
603 case BlockedOnBlackHole:
604 checkClosureShallow(tso->block_info.closure);
605 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
606 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
607 get_itbl(tso->block_info.closure)->type==RBH);
612 /* isOnBQ(blocked_queue) */
614 case BlockedOnException:
615 /* isOnSomeBQ(tso) */
616 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
619 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
623 Could check other values of why_blocked but I am more
624 lazy than paranoid (bad combination) -- HWL
628 /* if the link field is non-nil it most point to one of these
629 three closure types */
630 ASSERT(tso->link == END_TSO_QUEUE ||
631 get_itbl(tso->link)->type == TSO ||
632 get_itbl(tso->link)->type == BLOCKED_FETCH ||
633 get_itbl(tso->link)->type == CONSTR);
636 checkStackChunk(sp, stack_end);
641 checkTSOsSanity(void) {
645 belch("Checking sanity of all runnable TSOs:");
647 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
648 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
649 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
651 fprintf(stderr, "OK, ");
656 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
663 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
667 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
668 ASSERT(run_queue_hds[proc]!=NULL);
669 ASSERT(run_queue_tls[proc]!=NULL);
670 /* if either head or tail is NIL then the other one must be NIL, too */
671 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
672 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
673 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
675 prev=tso, tso=tso->link) {
676 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
677 (prev==END_TSO_QUEUE || prev->link==tso));
681 ASSERT(prev==run_queue_tls[proc]);
685 checkThreadQsSanity (rtsBool check_TSO_too)
689 for (p=0; p<RtsFlags.GranFlags.proc; p++)
690 checkThreadQSanity(p, check_TSO_too);
695 Check that all TSOs have been evacuated.
696 Optionally also check the sanity of the TSOs.
699 checkGlobalTSOList (rtsBool checkTSOs)
701 extern StgTSO *all_threads;
703 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
704 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
705 ASSERT(get_itbl(tso)->type == TSO);
711 /* -----------------------------------------------------------------------------
712 Check mutable list sanity.
713 -------------------------------------------------------------------------- */
716 checkMutableList( StgMutClosure *p, nat gen )
720 for (; p != END_MUT_LIST; p = p->mut_link) {
722 ASSERT(closure_MUTABLE(p));
723 ASSERT(bd->gen_no == gen);
724 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
729 checkMutOnceList( StgMutClosure *p, nat gen )
734 for (; p != END_MUT_LIST; p = p->mut_link) {
738 ASSERT(!closure_MUTABLE(p));
739 ASSERT(ip_STATIC(info) || bd->gen_no == gen);
740 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
742 switch (info->type) {
745 case IND_OLDGEN_PERM:
749 barf("checkMutOnceList: strange closure %p (%s)",
750 p, info_type((StgClosure *)p));
756 Check the static objects list.
759 checkStaticObjects ( StgClosure* static_objects )
761 StgClosure *p = static_objects;
764 while (p != END_OF_STATIC_LIST) {
767 switch (info->type) {
770 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
772 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
773 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
774 p = IND_STATIC_LINK((StgClosure *)p);
779 p = THUNK_STATIC_LINK((StgClosure *)p);
783 p = FUN_STATIC_LINK((StgClosure *)p);
787 p = STATIC_LINK(info,(StgClosure *)p);
791 barf("checkStaticObjetcs: strange closure %p (%s)",
798 Check the sanity of a blocking queue starting at bqe with closure being
799 the closure holding the blocking queue.
800 Note that in GUM we can have several different closure types in a
805 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
807 rtsBool end = rtsFalse;
808 StgInfoTable *info = get_itbl(closure);
810 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
811 || info->type == FETCH_ME_BQ || info->type == RBH);
814 switch (get_itbl(bqe)->type) {
817 checkClosure((StgClosure *)bqe);
819 end = (bqe==END_BQ_QUEUE);
823 checkClosure((StgClosure *)bqe);
828 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
829 get_itbl(bqe)->type, closure, info_type(closure));
835 checkBQ (StgTSO *bqe, StgClosure *closure)
837 rtsBool end = rtsFalse;
838 StgInfoTable *info = get_itbl(closure);
840 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
843 switch (get_itbl(bqe)->type) {
846 checkClosure((StgClosure *)bqe);
848 end = (bqe==END_BQ_QUEUE);
852 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
853 get_itbl(bqe)->type, closure, info_type(closure));
859 checkBQ (StgTSO *bqe, StgClosure *closure)
861 rtsBool end = rtsFalse;
862 StgInfoTable *info = get_itbl(closure);
864 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
867 switch (get_itbl(bqe)->type) {
869 checkClosure((StgClosure *)bqe);
871 end = (bqe==END_TSO_QUEUE);
875 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
876 get_itbl(bqe)->type, closure, info->type);
886 This routine checks the sanity of the LAGA and GALA tables. They are
887 implemented as lists through one hash table, LAtoGALAtable, because entries
888 in both tables have the same structure:
889 - the LAGA table maps local addresses to global addresses; it starts
890 with liveIndirections
891 - the GALA table maps global addresses to local addresses; it starts
898 /* hidden in parallel/Global.c; only accessed for testing here */
899 extern GALA *liveIndirections;
900 extern GALA *liveRemoteGAs;
901 extern HashTable *LAtoGALAtable;
904 checkLAGAtable(rtsBool check_closures)
907 nat n=0, m=0; // debugging
909 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
911 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
912 ASSERT(!gala->preferred || gala == gala0);
913 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
914 ASSERT(gala->next!=gala); // detect direct loops
915 if ( check_closures ) {
916 checkClosure((StgClosure *)gala->la);
920 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
922 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
923 ASSERT(!gala->preferred || gala == gala0);
924 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
925 ASSERT(gala->next!=gala); // detect direct loops
927 if ( check_closures ) {
928 checkClosure((StgClosure *)gala->la);