1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.33 2003/04/22 16:25:12 simonmar Exp $
4 * (c) The GHC Team, 1998-2001
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that everything reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
17 #include "PosixSource.h"
20 #ifdef DEBUG /* whole file */
24 #include "BlockAlloc.h"
29 #include "StoragePriv.h" // for END_OF_STATIC_LIST
32 /* -----------------------------------------------------------------------------
34 -------------------------------------------------------------------------- */
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
40 /* -----------------------------------------------------------------------------
42 -------------------------------------------------------------------------- */
45 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
83 checkClosureShallow( StgClosure* p )
85 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
87 /* Is it a static closure? */
88 if (!HEAP_ALLOCED(p)) {
89 ASSERT(closure_STATIC(p));
91 ASSERT(!closure_STATIC(p));
95 // check an individual stack object
97 checkStackFrame( StgPtr c )
100 const StgRetInfoTable* info;
102 info = get_ret_itbl((StgClosure *)c);
104 /* All activation records have 'bitmap' style layout info. */
105 switch (info->i.type) {
106 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
115 p = (P_)(r->payload);
116 checkSmallBitmap(p,GET_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
117 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119 // skip over the non-pointers
120 p += GET_NONPTRS(dyn);
122 // follow the ptr words
123 for (size = GET_PTRS(dyn); size > 0; size--) {
124 checkClosureShallow((StgClosure *)*p);
128 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
129 RET_DYN_NONPTR_REGS_SIZE +
130 GET_NONPTRS(dyn) + GET_PTRS(dyn);
134 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 // small bitmap cases (<= 32 entries)
140 size = BITMAP_SIZE(info->i.layout.bitmap);
141 checkSmallBitmap((StgPtr)c + 1,
142 BITMAP_BITS(info->i.layout.bitmap), size);
148 bco = (StgBCO *)*(c+1);
149 size = BCO_BITMAP_SIZE(bco);
150 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 case RET_BIG: // large bitmap (> 32 entries)
156 size = info->i.layout.large_bitmap->size;
157 checkLargeBitmap((StgPtr)c + 1, info->i.layout.large_bitmap, size);
162 StgFunInfoTable *fun_info;
165 ret_fun = (StgRetFun *)c;
166 fun_info = get_fun_itbl(ret_fun->fun);
167 size = ret_fun->size;
168 switch (fun_info->fun_type) {
170 checkSmallBitmap((StgPtr)ret_fun->payload,
171 BITMAP_BITS(fun_info->bitmap), size);
174 checkLargeBitmap((StgPtr)ret_fun->payload,
175 (StgLargeBitmap *)fun_info->bitmap, size);
178 checkSmallBitmap((StgPtr)ret_fun->payload,
179 BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]),
183 return sizeofW(StgRetFun) + size;
187 barf("checkStackFrame: weird activation record found on stack (%p).",c);
191 // check sections of stack between update frames
193 checkStackChunk( StgPtr sp, StgPtr stack_end )
198 while (p < stack_end) {
199 p += checkStackFrame( p );
201 // ASSERT( p == stack_end ); -- HWL
205 checkClosure( StgClosure* p )
207 const StgInfoTable *info;
209 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
211 /* Is it a static closure (i.e. in the data segment)? */
212 if (!HEAP_ALLOCED(p)) {
213 ASSERT(closure_STATIC(p));
215 ASSERT(!closure_STATIC(p));
219 switch (info->type) {
223 StgMVar *mvar = (StgMVar *)p;
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
226 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
229 checkBQ((StgBlockingQueueElement *)mvar->head, p);
231 checkBQ(mvar->head, p);
234 return sizeofW(StgMVar);
245 for (i = 0; i < info->layout.payload.ptrs; i++) {
246 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
248 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
252 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
253 /* fall through to basic ptr check */
268 case IND_OLDGEN_PERM:
271 case SE_CAF_BLACKHOLE:
280 case CONSTR_CHARLIKE:
282 case CONSTR_NOCAF_STATIC:
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
290 return sizeW_fromITBL(info);
294 StgBCO *bco = (StgBCO *)p;
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
296 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
297 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
298 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
299 return bco_sizeW(bco);
302 case IND_STATIC: /* (1, 0) closure */
303 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
304 return sizeW_fromITBL(info);
307 /* deal with these specially - the info table isn't
308 * representative of the actual layout.
310 { StgWeak *w = (StgWeak *)p;
311 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
312 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
315 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
317 return sizeW_fromITBL(info);
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
322 return sizeofW(StgHeader) + MIN_UPD_SIZE;
326 /* we don't expect to see any of these after GC
327 * but they might appear during execution
330 StgInd *ind = (StgInd *)p;
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
332 q = (P_)p + sizeofW(StgInd);
333 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
346 barf("checkClosure: stack frame");
348 case AP: /* we can treat this as being the same as a PAP */
351 StgFunInfoTable *fun_info;
352 StgPAP* pap = (StgPAP *)p;
354 ASSERT(LOOKS_LIKE_CLOSURE_PTR(pap->fun));
355 fun_info = get_fun_itbl(pap->fun);
357 p = (StgClosure *)pap->payload;
358 switch (fun_info->fun_type) {
360 checkSmallBitmap( (StgPtr)pap->payload,
361 BITMAP_BITS(fun_info->bitmap), pap->n_args );
364 checkLargeBitmap( (StgPtr)pap->payload,
365 (StgLargeBitmap *)fun_info->bitmap,
369 checkLargeBitmap( (StgPtr)pap->payload,
370 BCO_BITMAP(pap->fun),
374 checkSmallBitmap( (StgPtr)pap->payload,
375 BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]),
379 return pap_sizeW(pap);
384 StgAP_STACK *ap = (StgAP_STACK *)p;
385 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
386 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
387 return ap_stack_sizeW(ap);
391 return arr_words_sizeW((StgArrWords *)p);
394 case MUT_ARR_PTRS_FROZEN:
396 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
398 for (i = 0; i < a->ptrs; i++) {
399 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
401 return mut_arr_ptrs_sizeW(a);
405 checkTSO((StgTSO *)p);
406 return tso_sizeW((StgTSO *)p);
411 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
412 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
413 return sizeofW(StgBlockedFetch); // see size used in evacuate()
417 return sizeofW(StgFetchMe);
421 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
422 return sizeofW(StgFetchMe); // see size used in evacuate()
425 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
426 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
429 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
430 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
431 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
432 checkBQ(((StgRBH *)p)->blocking_queue, p);
433 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
434 return BLACKHOLE_sizeW(); // see size used in evacuate()
435 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
440 barf("checkClosure: found EVACUATED closure %d",
443 barf("checkClosure (closure type %d)", info->type);
449 #define PVM_PE_MASK 0xfffc0000
450 #define MAX_PVM_PES MAX_PES
451 #define MAX_PVM_TIDS MAX_PES
452 #define MAX_SLOTS 100000
455 looks_like_tid(StgInt tid)
457 StgInt hi = (tid & PVM_PE_MASK) >> 18;
458 StgInt lo = (tid & ~PVM_PE_MASK);
459 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
464 looks_like_slot(StgInt slot)
466 /* if tid is known better use looks_like_ga!! */
467 rtsBool ok = slot<MAX_SLOTS;
468 // This refers only to the no. of slots on the current PE
469 // rtsBool ok = slot<=highest_slot();
474 looks_like_ga(globalAddr *ga)
476 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
477 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
478 (ga)->payload.gc.slot<=highest_slot() :
479 (ga)->payload.gc.slot<MAX_SLOTS;
480 rtsBool ok = is_tid && is_slot;
487 /* -----------------------------------------------------------------------------
490 After garbage collection, the live heap is in a state where we can
491 run through and check that all the pointers point to the right
492 place. This function starts at a given position and sanity-checks
493 all the objects in the remainder of the chain.
494 -------------------------------------------------------------------------- */
497 checkHeap(bdescr *bd)
501 for (; bd != NULL; bd = bd->link) {
503 while (p < bd->free) {
504 nat size = checkClosure((StgClosure *)p);
505 /* This is the smallest size of closure that can live in the heap */
506 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
510 while (p < bd->free &&
511 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
518 Check heap between start and end. Used after unpacking graphs.
521 checkHeapChunk(StgPtr start, StgPtr end)
523 extern globalAddr *LAGAlookup(StgClosure *addr);
527 for (p=start; p<end; p+=size) {
528 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
529 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
530 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
531 /* if it's a FM created during unpack and commoned up, it's not global */
532 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
533 size = sizeofW(StgFetchMe);
534 } else if (get_itbl((StgClosure*)p)->type == IND) {
535 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
538 size = checkClosure((StgClosure *)p);
539 /* This is the smallest size of closure that can live in the heap. */
540 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
546 checkHeapChunk(StgPtr start, StgPtr end)
551 for (p=start; p<end; p+=size) {
552 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
553 size = checkClosure((StgClosure *)p);
554 /* This is the smallest size of closure that can live in the heap. */
555 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
561 checkChain(bdescr *bd)
564 checkClosure((StgClosure *)bd->start);
570 checkTSO(StgTSO *tso)
573 StgPtr stack = tso->stack;
574 StgOffset stack_size = tso->stack_size;
575 StgPtr stack_end = stack + stack_size;
577 if (tso->what_next == ThreadRelocated) {
582 if (tso->what_next == ThreadKilled) {
583 /* The garbage collector doesn't bother following any pointers
584 * from dead threads, so don't check sanity here.
589 ASSERT(stack <= sp && sp < stack_end);
592 ASSERT(tso->par.magic==TSO_MAGIC);
594 switch (tso->why_blocked) {
596 checkClosureShallow(tso->block_info.closure);
597 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
598 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
600 case BlockedOnGA_NoSend:
601 checkClosureShallow(tso->block_info.closure);
602 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
604 case BlockedOnBlackHole:
605 checkClosureShallow(tso->block_info.closure);
606 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
607 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
608 get_itbl(tso->block_info.closure)->type==RBH);
613 /* isOnBQ(blocked_queue) */
615 case BlockedOnException:
616 /* isOnSomeBQ(tso) */
617 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
620 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
624 Could check other values of why_blocked but I am more
625 lazy than paranoid (bad combination) -- HWL
629 /* if the link field is non-nil it most point to one of these
630 three closure types */
631 ASSERT(tso->link == END_TSO_QUEUE ||
632 get_itbl(tso->link)->type == TSO ||
633 get_itbl(tso->link)->type == BLOCKED_FETCH ||
634 get_itbl(tso->link)->type == CONSTR);
637 checkStackChunk(sp, stack_end);
642 checkTSOsSanity(void) {
646 belch("Checking sanity of all runnable TSOs:");
648 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
649 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
650 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
652 fprintf(stderr, "OK, ");
657 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
664 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
668 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
669 ASSERT(run_queue_hds[proc]!=NULL);
670 ASSERT(run_queue_tls[proc]!=NULL);
671 /* if either head or tail is NIL then the other one must be NIL, too */
672 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
673 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
674 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
676 prev=tso, tso=tso->link) {
677 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
678 (prev==END_TSO_QUEUE || prev->link==tso));
682 ASSERT(prev==run_queue_tls[proc]);
686 checkThreadQsSanity (rtsBool check_TSO_too)
690 for (p=0; p<RtsFlags.GranFlags.proc; p++)
691 checkThreadQSanity(p, check_TSO_too);
696 Check that all TSOs have been evacuated.
697 Optionally also check the sanity of the TSOs.
700 checkGlobalTSOList (rtsBool checkTSOs)
702 extern StgTSO *all_threads;
704 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
705 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
706 ASSERT(get_itbl(tso)->type == TSO);
712 /* -----------------------------------------------------------------------------
713 Check mutable list sanity.
714 -------------------------------------------------------------------------- */
717 checkMutableList( StgMutClosure *p, nat gen )
721 for (; p != END_MUT_LIST; p = p->mut_link) {
723 ASSERT(closure_MUTABLE(p));
724 ASSERT(bd->gen_no == gen);
725 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
730 checkMutOnceList( StgMutClosure *p, nat gen )
735 for (; p != END_MUT_LIST; p = p->mut_link) {
739 ASSERT(!closure_MUTABLE(p));
740 ASSERT(ip_STATIC(info) || bd->gen_no == gen);
741 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
743 switch (info->type) {
746 case IND_OLDGEN_PERM:
750 barf("checkMutOnceList: strange closure %p (%s)",
751 p, info_type((StgClosure *)p));
757 Check the static objects list.
760 checkStaticObjects ( StgClosure* static_objects )
762 StgClosure *p = static_objects;
765 while (p != END_OF_STATIC_LIST) {
768 switch (info->type) {
771 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
773 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
774 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
775 p = IND_STATIC_LINK((StgClosure *)p);
780 p = THUNK_STATIC_LINK((StgClosure *)p);
784 p = FUN_STATIC_LINK((StgClosure *)p);
788 p = STATIC_LINK(info,(StgClosure *)p);
792 barf("checkStaticObjetcs: strange closure %p (%s)",
799 Check the sanity of a blocking queue starting at bqe with closure being
800 the closure holding the blocking queue.
801 Note that in GUM we can have several different closure types in a
806 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
808 rtsBool end = rtsFalse;
809 StgInfoTable *info = get_itbl(closure);
811 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
812 || info->type == FETCH_ME_BQ || info->type == RBH);
815 switch (get_itbl(bqe)->type) {
818 checkClosure((StgClosure *)bqe);
820 end = (bqe==END_BQ_QUEUE);
824 checkClosure((StgClosure *)bqe);
829 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
830 get_itbl(bqe)->type, closure, info_type(closure));
836 checkBQ (StgTSO *bqe, StgClosure *closure)
838 rtsBool end = rtsFalse;
839 StgInfoTable *info = get_itbl(closure);
841 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
844 switch (get_itbl(bqe)->type) {
847 checkClosure((StgClosure *)bqe);
849 end = (bqe==END_BQ_QUEUE);
853 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
854 get_itbl(bqe)->type, closure, info_type(closure));
860 checkBQ (StgTSO *bqe, StgClosure *closure)
862 rtsBool end = rtsFalse;
863 StgInfoTable *info = get_itbl(closure);
865 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
868 switch (get_itbl(bqe)->type) {
870 checkClosure((StgClosure *)bqe);
872 end = (bqe==END_TSO_QUEUE);
876 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
877 get_itbl(bqe)->type, closure, info->type);
887 This routine checks the sanity of the LAGA and GALA tables. They are
888 implemented as lists through one hash table, LAtoGALAtable, because entries
889 in both tables have the same structure:
890 - the LAGA table maps local addresses to global addresses; it starts
891 with liveIndirections
892 - the GALA table maps global addresses to local addresses; it starts
899 /* hidden in parallel/Global.c; only accessed for testing here */
900 extern GALA *liveIndirections;
901 extern GALA *liveRemoteGAs;
902 extern HashTable *LAtoGALAtable;
905 checkLAGAtable(rtsBool check_closures)
908 nat n=0, m=0; // debugging
910 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
912 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
913 ASSERT(!gala->preferred || gala == gala0);
914 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
915 ASSERT(gala->next!=gala); // detect direct loops
916 if ( check_closures ) {
917 checkClosure((StgClosure *)gala->la);
921 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
923 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
924 ASSERT(!gala->preferred || gala == gala0);
925 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
926 ASSERT(gala->next!=gala); // detect direct loops
928 if ( check_closures ) {
929 checkClosure((StgClosure *)gala->la);