1 /* -----------------------------------------------------------------------------
2 * $Id: Sanity.c,v 1.34 2003/07/03 15:14:58 sof Exp $
4 * (c) The GHC Team, 1998-2001
6 * Sanity checking code for the heap and stack.
8 * Used when debugging: check that everything reasonable.
10 * - All things that are supposed to be pointers look like pointers.
12 * - Objects in text space are marked as static closures, those
13 * in the heap are dynamic.
15 * ---------------------------------------------------------------------------*/
17 #include "PosixSource.h"
20 #ifdef DEBUG /* whole file */
24 #include "BlockAlloc.h"
29 #include "StoragePriv.h" // for END_OF_STATIC_LIST
32 /* -----------------------------------------------------------------------------
34 -------------------------------------------------------------------------- */
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
40 /* -----------------------------------------------------------------------------
42 -------------------------------------------------------------------------- */
45 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
83 checkClosureShallow( StgClosure* p )
85 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
87 /* Is it a static closure? */
88 if (!HEAP_ALLOCED(p)) {
89 ASSERT(closure_STATIC(p));
91 ASSERT(!closure_STATIC(p));
95 // check an individual stack object
97 checkStackFrame( StgPtr c )
100 const StgRetInfoTable* info;
102 info = get_ret_itbl((StgClosure *)c);
104 /* All activation records have 'bitmap' style layout info. */
105 switch (info->i.type) {
106 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
115 p = (P_)(r->payload);
116 checkSmallBitmap(p,GET_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
117 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
119 // skip over the non-pointers
120 p += GET_NONPTRS(dyn);
122 // follow the ptr words
123 for (size = GET_PTRS(dyn); size > 0; size--) {
124 checkClosureShallow((StgClosure *)*p);
128 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
129 RET_DYN_NONPTR_REGS_SIZE +
130 GET_NONPTRS(dyn) + GET_PTRS(dyn);
134 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 // small bitmap cases (<= 32 entries)
140 size = BITMAP_SIZE(info->i.layout.bitmap);
141 checkSmallBitmap((StgPtr)c + 1,
142 BITMAP_BITS(info->i.layout.bitmap), size);
148 bco = (StgBCO *)*(c+1);
149 size = BCO_BITMAP_SIZE(bco);
150 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
154 case RET_BIG: // large bitmap (> 32 entries)
156 size = info->i.layout.large_bitmap->size;
157 checkLargeBitmap((StgPtr)c + 1, info->i.layout.large_bitmap, size);
162 StgFunInfoTable *fun_info;
165 ret_fun = (StgRetFun *)c;
166 fun_info = get_fun_itbl(ret_fun->fun);
167 size = ret_fun->size;
168 switch (fun_info->fun_type) {
170 checkSmallBitmap((StgPtr)ret_fun->payload,
171 BITMAP_BITS(fun_info->bitmap), size);
174 checkLargeBitmap((StgPtr)ret_fun->payload,
175 (StgLargeBitmap *)fun_info->bitmap, size);
178 checkSmallBitmap((StgPtr)ret_fun->payload,
179 BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]),
183 return sizeofW(StgRetFun) + size;
187 barf("checkStackFrame: weird activation record found on stack (%p).",c);
191 // check sections of stack between update frames
193 checkStackChunk( StgPtr sp, StgPtr stack_end )
198 while (p < stack_end) {
199 p += checkStackFrame( p );
201 // ASSERT( p == stack_end ); -- HWL
205 checkClosure( StgClosure* p )
207 const StgInfoTable *info;
209 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
211 /* Is it a static closure (i.e. in the data segment)? */
212 if (!HEAP_ALLOCED(p)) {
213 ASSERT(closure_STATIC(p));
215 ASSERT(!closure_STATIC(p));
219 switch (info->type) {
223 StgMVar *mvar = (StgMVar *)p;
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
225 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
226 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
229 checkBQ((StgBlockingQueueElement *)mvar->head, p);
231 checkBQ(mvar->head, p);
234 return sizeofW(StgMVar);
245 for (i = 0; i < info->layout.payload.ptrs; i++) {
246 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
248 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
252 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
253 /* fall through to basic ptr check */
268 case IND_OLDGEN_PERM:
271 case SE_CAF_BLACKHOLE:
280 case CONSTR_CHARLIKE:
282 case CONSTR_NOCAF_STATIC:
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
290 return sizeW_fromITBL(info);
294 StgBCO *bco = (StgBCO *)p;
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
296 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
297 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
298 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
299 return bco_sizeW(bco);
302 case IND_STATIC: /* (1, 0) closure */
303 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
304 return sizeW_fromITBL(info);
307 /* deal with these specially - the info table isn't
308 * representative of the actual layout.
310 { StgWeak *w = (StgWeak *)p;
311 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
312 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
315 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
317 return sizeW_fromITBL(info);
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
322 return sizeofW(StgHeader) + MIN_UPD_SIZE;
326 /* we don't expect to see any of these after GC
327 * but they might appear during execution
330 StgInd *ind = (StgInd *)p;
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
332 q = (P_)p + sizeofW(StgInd);
333 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
346 barf("checkClosure: stack frame");
348 case AP: /* we can treat this as being the same as a PAP */
351 StgFunInfoTable *fun_info;
352 StgPAP* pap = (StgPAP *)p;
354 ASSERT(LOOKS_LIKE_CLOSURE_PTR(pap->fun));
355 fun_info = get_fun_itbl(pap->fun);
357 p = (StgClosure *)pap->payload;
358 switch (fun_info->fun_type) {
360 checkSmallBitmap( (StgPtr)pap->payload,
361 BITMAP_BITS(fun_info->bitmap), pap->n_args );
364 checkLargeBitmap( (StgPtr)pap->payload,
365 (StgLargeBitmap *)fun_info->bitmap,
369 checkLargeBitmap( (StgPtr)pap->payload,
370 BCO_BITMAP(pap->fun),
374 checkSmallBitmap( (StgPtr)pap->payload,
375 BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]),
379 return pap_sizeW(pap);
384 StgAP_STACK *ap = (StgAP_STACK *)p;
385 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
386 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
387 return ap_stack_sizeW(ap);
391 return arr_words_sizeW((StgArrWords *)p);
394 case MUT_ARR_PTRS_FROZEN:
396 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
398 for (i = 0; i < a->ptrs; i++) {
399 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
401 return mut_arr_ptrs_sizeW(a);
405 checkTSO((StgTSO *)p);
406 return tso_sizeW((StgTSO *)p);
411 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
412 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
413 return sizeofW(StgBlockedFetch); // see size used in evacuate()
417 return sizeofW(StgFetchMe);
421 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
422 return sizeofW(StgFetchMe); // see size used in evacuate()
425 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
426 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
429 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
430 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
431 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
432 checkBQ(((StgRBH *)p)->blocking_queue, p);
433 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
434 return BLACKHOLE_sizeW(); // see size used in evacuate()
435 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
440 barf("checkClosure: found EVACUATED closure %d",
443 barf("checkClosure (closure type %d)", info->type);
449 #define PVM_PE_MASK 0xfffc0000
450 #define MAX_PVM_PES MAX_PES
451 #define MAX_PVM_TIDS MAX_PES
452 #define MAX_SLOTS 100000
455 looks_like_tid(StgInt tid)
457 StgInt hi = (tid & PVM_PE_MASK) >> 18;
458 StgInt lo = (tid & ~PVM_PE_MASK);
459 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
464 looks_like_slot(StgInt slot)
466 /* if tid is known better use looks_like_ga!! */
467 rtsBool ok = slot<MAX_SLOTS;
468 // This refers only to the no. of slots on the current PE
469 // rtsBool ok = slot<=highest_slot();
474 looks_like_ga(globalAddr *ga)
476 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
477 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
478 (ga)->payload.gc.slot<=highest_slot() :
479 (ga)->payload.gc.slot<MAX_SLOTS;
480 rtsBool ok = is_tid && is_slot;
487 /* -----------------------------------------------------------------------------
490 After garbage collection, the live heap is in a state where we can
491 run through and check that all the pointers point to the right
492 place. This function starts at a given position and sanity-checks
493 all the objects in the remainder of the chain.
494 -------------------------------------------------------------------------- */
497 checkHeap(bdescr *bd)
501 for (; bd != NULL; bd = bd->link) {
503 while (p < bd->free) {
504 nat size = checkClosure((StgClosure *)p);
505 /* This is the smallest size of closure that can live in the heap */
506 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
510 while (p < bd->free &&
511 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
518 Check heap between start and end. Used after unpacking graphs.
521 checkHeapChunk(StgPtr start, StgPtr end)
523 extern globalAddr *LAGAlookup(StgClosure *addr);
527 for (p=start; p<end; p+=size) {
528 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
529 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
530 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
531 /* if it's a FM created during unpack and commoned up, it's not global */
532 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
533 size = sizeofW(StgFetchMe);
534 } else if (get_itbl((StgClosure*)p)->type == IND) {
535 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
538 size = checkClosure((StgClosure *)p);
539 /* This is the smallest size of closure that can live in the heap. */
540 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
546 checkHeapChunk(StgPtr start, StgPtr end)
551 for (p=start; p<end; p+=size) {
552 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
553 size = checkClosure((StgClosure *)p);
554 /* This is the smallest size of closure that can live in the heap. */
555 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
561 checkChain(bdescr *bd)
564 checkClosure((StgClosure *)bd->start);
570 checkTSO(StgTSO *tso)
573 StgPtr stack = tso->stack;
574 StgOffset stack_size = tso->stack_size;
575 StgPtr stack_end = stack + stack_size;
577 if (tso->what_next == ThreadRelocated) {
582 if (tso->what_next == ThreadKilled) {
583 /* The garbage collector doesn't bother following any pointers
584 * from dead threads, so don't check sanity here.
589 ASSERT(stack <= sp && sp < stack_end);
592 ASSERT(tso->par.magic==TSO_MAGIC);
594 switch (tso->why_blocked) {
596 checkClosureShallow(tso->block_info.closure);
597 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
598 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
600 case BlockedOnGA_NoSend:
601 checkClosureShallow(tso->block_info.closure);
602 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
604 case BlockedOnBlackHole:
605 checkClosureShallow(tso->block_info.closure);
606 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
607 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
608 get_itbl(tso->block_info.closure)->type==RBH);
613 #if defined(mingw32_TARGET_OS)
614 case BlockedOnDoProc:
616 /* isOnBQ(blocked_queue) */
618 case BlockedOnException:
619 /* isOnSomeBQ(tso) */
620 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
623 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
627 Could check other values of why_blocked but I am more
628 lazy than paranoid (bad combination) -- HWL
632 /* if the link field is non-nil it most point to one of these
633 three closure types */
634 ASSERT(tso->link == END_TSO_QUEUE ||
635 get_itbl(tso->link)->type == TSO ||
636 get_itbl(tso->link)->type == BLOCKED_FETCH ||
637 get_itbl(tso->link)->type == CONSTR);
640 checkStackChunk(sp, stack_end);
645 checkTSOsSanity(void) {
649 belch("Checking sanity of all runnable TSOs:");
651 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
652 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
653 fprintf(stderr, "TSO %p on PE %d ...", tso, i);
655 fprintf(stderr, "OK, ");
660 belch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
667 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
671 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
672 ASSERT(run_queue_hds[proc]!=NULL);
673 ASSERT(run_queue_tls[proc]!=NULL);
674 /* if either head or tail is NIL then the other one must be NIL, too */
675 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
676 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
677 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
679 prev=tso, tso=tso->link) {
680 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
681 (prev==END_TSO_QUEUE || prev->link==tso));
685 ASSERT(prev==run_queue_tls[proc]);
689 checkThreadQsSanity (rtsBool check_TSO_too)
693 for (p=0; p<RtsFlags.GranFlags.proc; p++)
694 checkThreadQSanity(p, check_TSO_too);
699 Check that all TSOs have been evacuated.
700 Optionally also check the sanity of the TSOs.
703 checkGlobalTSOList (rtsBool checkTSOs)
705 extern StgTSO *all_threads;
707 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
708 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
709 ASSERT(get_itbl(tso)->type == TSO);
715 /* -----------------------------------------------------------------------------
716 Check mutable list sanity.
717 -------------------------------------------------------------------------- */
720 checkMutableList( StgMutClosure *p, nat gen )
724 for (; p != END_MUT_LIST; p = p->mut_link) {
726 ASSERT(closure_MUTABLE(p));
727 ASSERT(bd->gen_no == gen);
728 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
733 checkMutOnceList( StgMutClosure *p, nat gen )
738 for (; p != END_MUT_LIST; p = p->mut_link) {
742 ASSERT(!closure_MUTABLE(p));
743 ASSERT(ip_STATIC(info) || bd->gen_no == gen);
744 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
746 switch (info->type) {
749 case IND_OLDGEN_PERM:
753 barf("checkMutOnceList: strange closure %p (%s)",
754 p, info_type((StgClosure *)p));
760 Check the static objects list.
763 checkStaticObjects ( StgClosure* static_objects )
765 StgClosure *p = static_objects;
768 while (p != END_OF_STATIC_LIST) {
771 switch (info->type) {
774 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
776 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
777 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
778 p = IND_STATIC_LINK((StgClosure *)p);
783 p = THUNK_STATIC_LINK((StgClosure *)p);
787 p = FUN_STATIC_LINK((StgClosure *)p);
791 p = STATIC_LINK(info,(StgClosure *)p);
795 barf("checkStaticObjetcs: strange closure %p (%s)",
802 Check the sanity of a blocking queue starting at bqe with closure being
803 the closure holding the blocking queue.
804 Note that in GUM we can have several different closure types in a
809 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
811 rtsBool end = rtsFalse;
812 StgInfoTable *info = get_itbl(closure);
814 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
815 || info->type == FETCH_ME_BQ || info->type == RBH);
818 switch (get_itbl(bqe)->type) {
821 checkClosure((StgClosure *)bqe);
823 end = (bqe==END_BQ_QUEUE);
827 checkClosure((StgClosure *)bqe);
832 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
833 get_itbl(bqe)->type, closure, info_type(closure));
839 checkBQ (StgTSO *bqe, StgClosure *closure)
841 rtsBool end = rtsFalse;
842 StgInfoTable *info = get_itbl(closure);
844 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
847 switch (get_itbl(bqe)->type) {
850 checkClosure((StgClosure *)bqe);
852 end = (bqe==END_BQ_QUEUE);
856 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
857 get_itbl(bqe)->type, closure, info_type(closure));
863 checkBQ (StgTSO *bqe, StgClosure *closure)
865 rtsBool end = rtsFalse;
866 StgInfoTable *info = get_itbl(closure);
868 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
871 switch (get_itbl(bqe)->type) {
873 checkClosure((StgClosure *)bqe);
875 end = (bqe==END_TSO_QUEUE);
879 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
880 get_itbl(bqe)->type, closure, info->type);
890 This routine checks the sanity of the LAGA and GALA tables. They are
891 implemented as lists through one hash table, LAtoGALAtable, because entries
892 in both tables have the same structure:
893 - the LAGA table maps local addresses to global addresses; it starts
894 with liveIndirections
895 - the GALA table maps global addresses to local addresses; it starts
902 /* hidden in parallel/Global.c; only accessed for testing here */
903 extern GALA *liveIndirections;
904 extern GALA *liveRemoteGAs;
905 extern HashTable *LAtoGALAtable;
908 checkLAGAtable(rtsBool check_closures)
911 nat n=0, m=0; // debugging
913 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
915 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
916 ASSERT(!gala->preferred || gala == gala0);
917 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
918 ASSERT(gala->next!=gala); // detect direct loops
919 if ( check_closures ) {
920 checkClosure((StgClosure *)gala->la);
924 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
926 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
927 ASSERT(!gala->preferred || gala == gala0);
928 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
929 ASSERT(gala->next!=gala); // detect direct loops
931 if ( check_closures ) {
932 checkClosure((StgClosure *)gala->la);