1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
23 #include "BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
83 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
85 /* Is it a static closure? */
86 if (!HEAP_ALLOCED(p)) {
87 ASSERT(closure_STATIC(p));
89 ASSERT(!closure_STATIC(p));
93 // check an individual stack object
95 checkStackFrame( StgPtr c )
98 const StgRetInfoTable* info;
100 info = get_ret_itbl((StgClosure *)c);
102 /* All activation records have 'bitmap' style layout info. */
103 switch (info->i.type) {
104 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
113 p = (P_)(r->payload);
114 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
115 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
117 // skip over the non-pointers
118 p += RET_DYN_NONPTRS(dyn);
120 // follow the ptr words
121 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
122 checkClosureShallow((StgClosure *)*p);
126 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
127 RET_DYN_NONPTR_REGS_SIZE +
128 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
132 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
134 // small bitmap cases (<= 32 entries)
138 size = BITMAP_SIZE(info->i.layout.bitmap);
139 checkSmallBitmap((StgPtr)c + 1,
140 BITMAP_BITS(info->i.layout.bitmap), size);
146 bco = (StgBCO *)*(c+1);
147 size = BCO_BITMAP_SIZE(bco);
148 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
152 case RET_BIG: // large bitmap (> 32 entries)
154 size = GET_LARGE_BITMAP(&info->i)->size;
155 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
160 StgFunInfoTable *fun_info;
163 ret_fun = (StgRetFun *)c;
164 fun_info = get_fun_itbl(ret_fun->fun);
165 size = ret_fun->size;
166 switch (fun_info->f.fun_type) {
168 checkSmallBitmap((StgPtr)ret_fun->payload,
169 BITMAP_BITS(fun_info->f.bitmap), size);
172 checkLargeBitmap((StgPtr)ret_fun->payload,
173 GET_FUN_LARGE_BITMAP(fun_info), size);
176 checkSmallBitmap((StgPtr)ret_fun->payload,
177 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
181 return sizeofW(StgRetFun) + size;
185 barf("checkStackFrame: weird activation record found on stack (%p).",c);
189 // check sections of stack between update frames
191 checkStackChunk( StgPtr sp, StgPtr stack_end )
196 while (p < stack_end) {
197 p += checkStackFrame( p );
199 // ASSERT( p == stack_end ); -- HWL
203 checkClosure( StgClosure* p )
205 const StgInfoTable *info;
207 ASSERT(LOOKS_LIKE_INFO_PTR(p->header.info));
209 /* Is it a static closure (i.e. in the data segment)? */
210 if (!HEAP_ALLOCED(p)) {
211 ASSERT(closure_STATIC(p));
213 ASSERT(!closure_STATIC(p));
217 switch (info->type) {
221 StgMVar *mvar = (StgMVar *)p;
222 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
223 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
224 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
227 checkBQ((StgBlockingQueueElement *)mvar->head, p);
229 checkBQ(mvar->head, p);
232 return sizeofW(StgMVar);
243 for (i = 0; i < info->layout.payload.ptrs; i++) {
244 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
246 return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
250 checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
251 /* fall through to basic ptr check */
266 case IND_OLDGEN_PERM:
269 case SE_CAF_BLACKHOLE:
278 case CONSTR_CHARLIKE:
280 case CONSTR_NOCAF_STATIC:
285 for (i = 0; i < info->layout.payload.ptrs; i++) {
286 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
288 return sizeW_fromITBL(info);
292 StgBCO *bco = (StgBCO *)p;
293 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
294 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
296 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->itbls));
297 return bco_sizeW(bco);
300 case IND_STATIC: /* (1, 0) closure */
301 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
302 return sizeW_fromITBL(info);
305 /* deal with these specially - the info table isn't
306 * representative of the actual layout.
308 { StgWeak *w = (StgWeak *)p;
309 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
310 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
311 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
313 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
315 return sizeW_fromITBL(info);
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
320 return sizeofW(StgHeader) + MIN_UPD_SIZE;
324 /* we don't expect to see any of these after GC
325 * but they might appear during execution
328 StgInd *ind = (StgInd *)p;
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
330 q = (P_)p + sizeofW(StgInd);
331 while (!*q) { q++; }; /* skip padding words (see GC.c: evacuate())*/
344 barf("checkClosure: stack frame");
346 case AP: /* we can treat this as being the same as a PAP */
349 StgFunInfoTable *fun_info;
350 StgPAP* pap = (StgPAP *)p;
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(pap->fun));
353 fun_info = get_fun_itbl(pap->fun);
355 p = (StgClosure *)pap->payload;
356 switch (fun_info->f.fun_type) {
358 checkSmallBitmap( (StgPtr)pap->payload,
359 BITMAP_BITS(fun_info->f.bitmap), pap->n_args );
362 checkLargeBitmap( (StgPtr)pap->payload,
363 GET_FUN_LARGE_BITMAP(fun_info),
367 checkLargeBitmap( (StgPtr)pap->payload,
368 BCO_BITMAP(pap->fun),
372 checkSmallBitmap( (StgPtr)pap->payload,
373 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
377 return pap_sizeW(pap);
382 StgAP_STACK *ap = (StgAP_STACK *)p;
383 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
384 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
385 return ap_stack_sizeW(ap);
389 return arr_words_sizeW((StgArrWords *)p);
392 case MUT_ARR_PTRS_FROZEN:
394 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
396 for (i = 0; i < a->ptrs; i++) {
397 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
399 return mut_arr_ptrs_sizeW(a);
403 checkTSO((StgTSO *)p);
404 return tso_sizeW((StgTSO *)p);
409 ASSERT(LOOKS_LIKE_GA(&(((StgBlockedFetch *)p)->ga)));
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR((((StgBlockedFetch *)p)->node)));
411 return sizeofW(StgBlockedFetch); // see size used in evacuate()
415 return sizeofW(StgFetchMe);
419 ASSERT(LOOKS_LIKE_GA(((StgFetchMe *)p)->ga));
420 return sizeofW(StgFetchMe); // see size used in evacuate()
423 checkBQ(((StgFetchMeBlockingQueue *)p)->blocking_queue, (StgClosure *)p);
424 return sizeofW(StgFetchMeBlockingQueue); // see size used in evacuate()
427 /* In an RBH the BQ may be empty (ie END_BQ_QUEUE) but not NULL */
428 ASSERT(((StgRBH *)p)->blocking_queue!=NULL);
429 if (((StgRBH *)p)->blocking_queue!=END_BQ_QUEUE)
430 checkBQ(((StgRBH *)p)->blocking_queue, p);
431 ASSERT(LOOKS_LIKE_INFO_PTR(REVERT_INFOPTR(get_itbl((StgClosure *)p))));
432 return BLACKHOLE_sizeW(); // see size used in evacuate()
433 // sizeW_fromITBL(REVERT_INFOPTR(get_itbl((StgClosure *)p)));
438 barf("checkClosure: found EVACUATED closure %d",
441 barf("checkClosure (closure type %d)", info->type);
447 #define PVM_PE_MASK 0xfffc0000
448 #define MAX_PVM_PES MAX_PES
449 #define MAX_PVM_TIDS MAX_PES
450 #define MAX_SLOTS 100000
453 looks_like_tid(StgInt tid)
455 StgInt hi = (tid & PVM_PE_MASK) >> 18;
456 StgInt lo = (tid & ~PVM_PE_MASK);
457 rtsBool ok = (hi != 0) && (lo < MAX_PVM_TIDS) && (hi < MAX_PVM_TIDS);
462 looks_like_slot(StgInt slot)
464 /* if tid is known better use looks_like_ga!! */
465 rtsBool ok = slot<MAX_SLOTS;
466 // This refers only to the no. of slots on the current PE
467 // rtsBool ok = slot<=highest_slot();
472 looks_like_ga(globalAddr *ga)
474 rtsBool is_tid = looks_like_tid((ga)->payload.gc.gtid);
475 rtsBool is_slot = ((ga)->payload.gc.gtid==mytid) ?
476 (ga)->payload.gc.slot<=highest_slot() :
477 (ga)->payload.gc.slot<MAX_SLOTS;
478 rtsBool ok = is_tid && is_slot;
485 /* -----------------------------------------------------------------------------
488 After garbage collection, the live heap is in a state where we can
489 run through and check that all the pointers point to the right
490 place. This function starts at a given position and sanity-checks
491 all the objects in the remainder of the chain.
492 -------------------------------------------------------------------------- */
495 checkHeap(bdescr *bd)
499 for (; bd != NULL; bd = bd->link) {
501 while (p < bd->free) {
502 nat size = checkClosure((StgClosure *)p);
503 /* This is the smallest size of closure that can live in the heap */
504 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
508 while (p < bd->free &&
509 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR((void*)*p))) { p++; }
516 Check heap between start and end. Used after unpacking graphs.
519 checkHeapChunk(StgPtr start, StgPtr end)
521 extern globalAddr *LAGAlookup(StgClosure *addr);
525 for (p=start; p<end; p+=size) {
526 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
527 if (get_itbl((StgClosure*)p)->type == FETCH_ME &&
528 *(p+1) == 0x0000eeee /* ie. unpack garbage (see SetGAandCommonUp) */) {
529 /* if it's a FM created during unpack and commoned up, it's not global */
530 ASSERT(LAGAlookup((StgClosure*)p)==NULL);
531 size = sizeofW(StgFetchMe);
532 } else if (get_itbl((StgClosure*)p)->type == IND) {
533 *(p+2) = 0x0000ee11; /* mark slop in IND as garbage */
536 size = checkClosure((StgClosure *)p);
537 /* This is the smallest size of closure that can live in the heap. */
538 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
544 checkHeapChunk(StgPtr start, StgPtr end)
549 for (p=start; p<end; p+=size) {
550 ASSERT(LOOKS_LIKE_INFO_PTR((void*)*p));
551 size = checkClosure((StgClosure *)p);
552 /* This is the smallest size of closure that can live in the heap. */
553 ASSERT( size >= MIN_NONUPD_SIZE + sizeofW(StgHeader) );
559 checkChain(bdescr *bd)
562 checkClosure((StgClosure *)bd->start);
568 checkTSO(StgTSO *tso)
571 StgPtr stack = tso->stack;
572 StgOffset stack_size = tso->stack_size;
573 StgPtr stack_end = stack + stack_size;
575 if (tso->what_next == ThreadRelocated) {
580 if (tso->what_next == ThreadKilled) {
581 /* The garbage collector doesn't bother following any pointers
582 * from dead threads, so don't check sanity here.
587 ASSERT(stack <= sp && sp < stack_end);
590 ASSERT(tso->par.magic==TSO_MAGIC);
592 switch (tso->why_blocked) {
594 checkClosureShallow(tso->block_info.closure);
595 ASSERT(/* Can't be a FETCH_ME because *this* closure is on its BQ */
596 get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
598 case BlockedOnGA_NoSend:
599 checkClosureShallow(tso->block_info.closure);
600 ASSERT(get_itbl(tso->block_info.closure)->type==FETCH_ME_BQ);
602 case BlockedOnBlackHole:
603 checkClosureShallow(tso->block_info.closure);
604 ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
605 get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
606 get_itbl(tso->block_info.closure)->type==RBH);
611 #if defined(mingw32_TARGET_OS)
612 case BlockedOnDoProc:
614 /* isOnBQ(blocked_queue) */
616 case BlockedOnException:
617 /* isOnSomeBQ(tso) */
618 ASSERT(get_itbl(tso->block_info.tso)->type==TSO);
621 ASSERT(get_itbl(tso->block_info.closure)->type==MVAR);
625 Could check other values of why_blocked but I am more
626 lazy than paranoid (bad combination) -- HWL
630 /* if the link field is non-nil it most point to one of these
631 three closure types */
632 ASSERT(tso->link == END_TSO_QUEUE ||
633 get_itbl(tso->link)->type == TSO ||
634 get_itbl(tso->link)->type == BLOCKED_FETCH ||
635 get_itbl(tso->link)->type == CONSTR);
638 checkStackChunk(sp, stack_end);
643 checkTSOsSanity(void) {
647 debugBelch("Checking sanity of all runnable TSOs:");
649 for (i=0, tsos=0; i<RtsFlags.GranFlags.proc; i++) {
650 for (tso=run_queue_hds[i]; tso!=END_TSO_QUEUE; tso=tso->link) {
651 debugBelch("TSO %p on PE %d ...", tso, i);
658 debugBelch(" checked %d TSOs on %d PEs; ok\n", tsos, RtsFlags.GranFlags.proc);
665 checkThreadQSanity (PEs proc, rtsBool check_TSO_too)
669 /* the NIL value for TSOs is END_TSO_QUEUE; thus, finding NULL is an error */
670 ASSERT(run_queue_hds[proc]!=NULL);
671 ASSERT(run_queue_tls[proc]!=NULL);
672 /* if either head or tail is NIL then the other one must be NIL, too */
673 ASSERT(run_queue_hds[proc]!=END_TSO_QUEUE || run_queue_tls[proc]==END_TSO_QUEUE);
674 ASSERT(run_queue_tls[proc]!=END_TSO_QUEUE || run_queue_hds[proc]==END_TSO_QUEUE);
675 for (tso=run_queue_hds[proc], prev=END_TSO_QUEUE;
677 prev=tso, tso=tso->link) {
678 ASSERT((prev!=END_TSO_QUEUE || tso==run_queue_hds[proc]) &&
679 (prev==END_TSO_QUEUE || prev->link==tso));
683 ASSERT(prev==run_queue_tls[proc]);
687 checkThreadQsSanity (rtsBool check_TSO_too)
691 for (p=0; p<RtsFlags.GranFlags.proc; p++)
692 checkThreadQSanity(p, check_TSO_too);
697 Check that all TSOs have been evacuated.
698 Optionally also check the sanity of the TSOs.
701 checkGlobalTSOList (rtsBool checkTSOs)
703 extern StgTSO *all_threads;
705 for (tso=all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
706 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
707 ASSERT(get_itbl(tso)->type == TSO);
713 /* -----------------------------------------------------------------------------
714 Check mutable list sanity.
715 -------------------------------------------------------------------------- */
718 checkMutableList( StgMutClosure *p, nat gen )
722 for (; p != END_MUT_LIST; p = p->mut_link) {
724 ASSERT(closure_MUTABLE(p));
725 ASSERT(bd->gen_no == gen);
726 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
731 checkMutOnceList( StgMutClosure *p, nat gen )
736 for (; p != END_MUT_LIST; p = p->mut_link) {
740 ASSERT(!closure_MUTABLE(p));
741 ASSERT(ip_STATIC(info) || bd->gen_no == gen);
742 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
744 switch (info->type) {
747 case IND_OLDGEN_PERM:
751 barf("checkMutOnceList: strange closure %p (%s)",
752 p, info_type((StgClosure *)p));
758 Check the static objects list.
761 checkStaticObjects ( StgClosure* static_objects )
763 StgClosure *p = static_objects;
766 while (p != END_OF_STATIC_LIST) {
769 switch (info->type) {
772 StgClosure *indirectee = ((StgIndStatic *)p)->indirectee;
774 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
775 ASSERT(LOOKS_LIKE_INFO_PTR(indirectee->header.info));
776 p = IND_STATIC_LINK((StgClosure *)p);
781 p = THUNK_STATIC_LINK((StgClosure *)p);
785 p = FUN_STATIC_LINK((StgClosure *)p);
789 p = STATIC_LINK(info,(StgClosure *)p);
793 barf("checkStaticObjetcs: strange closure %p (%s)",
800 Check the sanity of a blocking queue starting at bqe with closure being
801 the closure holding the blocking queue.
802 Note that in GUM we can have several different closure types in a
807 checkBQ (StgBlockingQueueElement *bqe, StgClosure *closure)
809 rtsBool end = rtsFalse;
810 StgInfoTable *info = get_itbl(closure);
812 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
813 || info->type == FETCH_ME_BQ || info->type == RBH);
816 switch (get_itbl(bqe)->type) {
819 checkClosure((StgClosure *)bqe);
821 end = (bqe==END_BQ_QUEUE);
825 checkClosure((StgClosure *)bqe);
830 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
831 get_itbl(bqe)->type, closure, info_type(closure));
837 checkBQ (StgTSO *bqe, StgClosure *closure)
839 rtsBool end = rtsFalse;
840 StgInfoTable *info = get_itbl(closure);
842 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
845 switch (get_itbl(bqe)->type) {
848 checkClosure((StgClosure *)bqe);
850 end = (bqe==END_BQ_QUEUE);
854 barf("checkBQ: strange closure %d in blocking queue for closure %p (%s)\n",
855 get_itbl(bqe)->type, closure, info_type(closure));
861 checkBQ (StgTSO *bqe, StgClosure *closure)
863 rtsBool end = rtsFalse;
864 StgInfoTable *info = get_itbl(closure);
866 ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
869 switch (get_itbl(bqe)->type) {
871 checkClosure((StgClosure *)bqe);
873 end = (bqe==END_TSO_QUEUE);
877 barf("checkBQ: strange closure %d in blocking queue for closure %p\n",
878 get_itbl(bqe)->type, closure, info->type);
888 This routine checks the sanity of the LAGA and GALA tables. They are
889 implemented as lists through one hash table, LAtoGALAtable, because entries
890 in both tables have the same structure:
891 - the LAGA table maps local addresses to global addresses; it starts
892 with liveIndirections
893 - the GALA table maps global addresses to local addresses; it starts
900 /* hidden in parallel/Global.c; only accessed for testing here */
901 extern GALA *liveIndirections;
902 extern GALA *liveRemoteGAs;
903 extern HashTable *LAtoGALAtable;
906 checkLAGAtable(rtsBool check_closures)
909 nat n=0, m=0; // debugging
911 for (gala = liveIndirections; gala != NULL; gala = gala->next) {
913 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
914 ASSERT(!gala->preferred || gala == gala0);
915 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
916 ASSERT(gala->next!=gala); // detect direct loops
917 if ( check_closures ) {
918 checkClosure((StgClosure *)gala->la);
922 for (gala = liveRemoteGAs; gala != NULL; gala = gala->next) {
924 gala0 = lookupHashTable(LAtoGALAtable, (StgWord) gala->la);
925 ASSERT(!gala->preferred || gala == gala0);
926 ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)gala->la)->header.info));
927 ASSERT(gala->next!=gala); // detect direct loops
929 if ( check_closures ) {
930 checkClosure((StgClosure *)gala->la);