1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
29 #include "RetainerProfile.h"
31 /* -----------------------------------------------------------------------------
33 -------------------------------------------------------------------------- */
35 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
36 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
37 static void checkClosureShallow ( StgClosure * );
38 static void checkSTACK (StgStack *stack);
40 /* -----------------------------------------------------------------------------
42 -------------------------------------------------------------------------- */
45 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
51 for(i = 0; i < size; i++, bitmap >>= 1 ) {
52 if ((bitmap & 1) == 0) {
53 checkClosureShallow((StgClosure *)payload[i]);
59 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
65 for (bmp=0; i < size; bmp++) {
66 StgWord bitmap = large_bitmap->bitmap[bmp];
68 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
69 if ((bitmap & 1) == 0) {
70 checkClosureShallow((StgClosure *)payload[i]);
77 * check that it looks like a valid closure - without checking its payload
78 * used to avoid recursion between checking PAPs and checking stack
83 checkClosureShallow( StgClosure* p )
88 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
90 /* Is it a static closure? */
91 if (!HEAP_ALLOCED(q)) {
92 ASSERT(closure_STATIC(q));
94 ASSERT(!closure_STATIC(q));
98 // check an individual stack object
100 checkStackFrame( StgPtr c )
103 const StgRetInfoTable* info;
105 info = get_ret_itbl((StgClosure *)c);
107 /* All activation records have 'bitmap' style layout info. */
108 switch (info->i.type) {
109 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
118 p = (P_)(r->payload);
119 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
120 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
122 // skip over the non-pointers
123 p += RET_DYN_NONPTRS(dyn);
125 // follow the ptr words
126 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
127 checkClosureShallow((StgClosure *)*p);
131 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
132 RET_DYN_NONPTR_REGS_SIZE +
133 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
137 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
138 case ATOMICALLY_FRAME:
139 case CATCH_RETRY_FRAME:
140 case CATCH_STM_FRAME:
142 // small bitmap cases (<= 32 entries)
143 case UNDERFLOW_FRAME:
146 size = BITMAP_SIZE(info->i.layout.bitmap);
147 checkSmallBitmap((StgPtr)c + 1,
148 BITMAP_BITS(info->i.layout.bitmap), size);
154 bco = (StgBCO *)*(c+1);
155 size = BCO_BITMAP_SIZE(bco);
156 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
160 case RET_BIG: // large bitmap (> 32 entries)
161 size = GET_LARGE_BITMAP(&info->i)->size;
162 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
167 StgFunInfoTable *fun_info;
170 ret_fun = (StgRetFun *)c;
171 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
172 size = ret_fun->size;
173 switch (fun_info->f.fun_type) {
175 checkSmallBitmap((StgPtr)ret_fun->payload,
176 BITMAP_BITS(fun_info->f.b.bitmap), size);
179 checkLargeBitmap((StgPtr)ret_fun->payload,
180 GET_FUN_LARGE_BITMAP(fun_info), size);
183 checkSmallBitmap((StgPtr)ret_fun->payload,
184 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
188 return sizeofW(StgRetFun) + size;
192 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
196 // check sections of stack between update frames
198 checkStackChunk( StgPtr sp, StgPtr stack_end )
203 while (p < stack_end) {
204 p += checkStackFrame( p );
206 // ASSERT( p == stack_end ); -- HWL
210 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
214 StgFunInfoTable *fun_info;
216 fun = UNTAG_CLOSURE(tagged_fun);
217 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
218 fun_info = get_fun_itbl(fun);
220 p = (StgClosure *)payload;
221 switch (fun_info->f.fun_type) {
223 checkSmallBitmap( (StgPtr)payload,
224 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
227 checkLargeBitmap( (StgPtr)payload,
228 GET_FUN_LARGE_BITMAP(fun_info),
232 checkLargeBitmap( (StgPtr)payload,
237 checkSmallBitmap( (StgPtr)payload,
238 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
243 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
244 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
249 checkClosure( StgClosure* p )
251 const StgInfoTable *info;
253 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
255 p = UNTAG_CLOSURE(p);
256 /* Is it a static closure (i.e. in the data segment)? */
257 if (!HEAP_ALLOCED(p)) {
258 ASSERT(closure_STATIC(p));
260 ASSERT(!closure_STATIC(p));
263 info = p->header.info;
265 if (IS_FORWARDING_PTR(info)) {
266 barf("checkClosure: found EVACUATED closure %d", info->type);
268 info = INFO_PTR_TO_STRUCT(info);
270 switch (info->type) {
275 StgMVar *mvar = (StgMVar *)p;
276 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
277 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
278 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
279 return sizeofW(StgMVar);
290 for (i = 0; i < info->layout.payload.ptrs; i++) {
291 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
293 return thunk_sizeW_fromITBL(info);
315 case CONSTR_NOCAF_STATIC:
320 for (i = 0; i < info->layout.payload.ptrs; i++) {
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
323 return sizeW_fromITBL(info);
328 StgBlockingQueue *bq = (StgBlockingQueue *)p;
330 // NO: the BH might have been updated now
331 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
332 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
334 ASSERT(get_itbl(bq->owner)->type == TSO);
335 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
336 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
337 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
338 get_itbl(bq->link)->type == IND ||
339 get_itbl(bq->link)->type == BLOCKING_QUEUE);
341 return sizeofW(StgBlockingQueue);
345 StgBCO *bco = (StgBCO *)p;
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
349 return bco_sizeW(bco);
352 case IND_STATIC: /* (1, 0) closure */
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
354 return sizeW_fromITBL(info);
357 /* deal with these specially - the info table isn't
358 * representative of the actual layout.
360 { StgWeak *w = (StgWeak *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
365 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
367 return sizeW_fromITBL(info);
371 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
372 return THUNK_SELECTOR_sizeW();
376 /* we don't expect to see any of these after GC
377 * but they might appear during execution
379 StgInd *ind = (StgInd *)p;
380 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
381 return sizeofW(StgInd);
389 case UNDERFLOW_FRAME:
392 case ATOMICALLY_FRAME:
393 case CATCH_RETRY_FRAME:
394 case CATCH_STM_FRAME:
395 barf("checkClosure: stack frame");
399 StgAP* ap = (StgAP *)p;
400 checkPAP (ap->fun, ap->payload, ap->n_args);
406 StgPAP* pap = (StgPAP *)p;
407 checkPAP (pap->fun, pap->payload, pap->n_args);
408 return pap_sizeW(pap);
413 StgAP_STACK *ap = (StgAP_STACK *)p;
414 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
415 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
416 return ap_stack_sizeW(ap);
420 return arr_words_sizeW((StgArrWords *)p);
422 case MUT_ARR_PTRS_CLEAN:
423 case MUT_ARR_PTRS_DIRTY:
424 case MUT_ARR_PTRS_FROZEN:
425 case MUT_ARR_PTRS_FROZEN0:
427 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
429 for (i = 0; i < a->ptrs; i++) {
430 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
432 return mut_arr_ptrs_sizeW(a);
436 checkTSO((StgTSO *)p);
437 return sizeofW(StgTSO);
440 checkSTACK((StgStack*)p);
441 return stack_sizeW((StgStack*)p);
446 StgTRecChunk *tc = (StgTRecChunk *)p;
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
448 for (i = 0; i < tc -> next_entry_idx; i ++) {
449 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
450 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
451 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
453 return sizeofW(StgTRecChunk);
457 barf("checkClosure (closure type %d)", info->type);
462 /* -----------------------------------------------------------------------------
465 After garbage collection, the live heap is in a state where we can
466 run through and check that all the pointers point to the right
467 place. This function starts at a given position and sanity-checks
468 all the objects in the remainder of the chain.
469 -------------------------------------------------------------------------- */
472 checkHeap(bdescr *bd)
476 #if defined(THREADED_RTS)
477 // heap sanity checking doesn't work with SMP, because we can't
478 // zero the slop (see Updates.h).
482 for (; bd != NULL; bd = bd->link) {
483 if(!(bd->flags & BF_SWEPT)) {
485 while (p < bd->free) {
486 nat size = checkClosure((StgClosure *)p);
487 /* This is the smallest size of closure that can live in the heap */
488 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
492 while (p < bd->free &&
493 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
500 checkHeapChunk(StgPtr start, StgPtr end)
505 for (p=start; p<end; p+=size) {
506 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
507 size = checkClosure((StgClosure *)p);
508 /* This is the smallest size of closure that can live in the heap. */
509 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
514 checkLargeObjects(bdescr *bd)
517 if (!(bd->flags & BF_PINNED)) {
518 checkClosure((StgClosure *)bd->start);
525 checkSTACK (StgStack *stack)
527 StgPtr sp = stack->sp;
528 StgOffset stack_size = stack->stack_size;
529 StgPtr stack_end = stack->stack + stack_size;
531 ASSERT(stack->stack <= sp && sp <= stack_end);
533 checkStackChunk(sp, stack_end);
537 checkTSO(StgTSO *tso)
539 if (tso->what_next == ThreadKilled) {
540 /* The garbage collector doesn't bother following any pointers
541 * from dead threads, so don't check sanity here.
546 ASSERT(tso->_link == END_TSO_QUEUE ||
547 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
548 tso->_link->header.info == &stg_TSO_info);
550 if ( tso->why_blocked == BlockedOnMVar
551 || tso->why_blocked == BlockedOnBlackHole
552 || tso->why_blocked == BlockedOnMsgThrowTo
553 || tso->why_blocked == NotBlocked
555 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
558 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
559 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
560 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
562 // XXX are we checking the stack twice?
563 checkSTACK(tso->stackobj);
567 Check that all TSOs have been evacuated.
568 Optionally also check the sanity of the TSOs.
571 checkGlobalTSOList (rtsBool checkTSOs)
576 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
577 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
578 tso = tso->global_link) {
579 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
580 ASSERT(get_itbl(tso)->type == TSO);
584 // If this TSO is dirty and in an old generation, it better
585 // be on the mutable list.
587 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
588 tso->flags &= ~TSO_MARKED;
594 /* -----------------------------------------------------------------------------
595 Check mutable list sanity.
596 -------------------------------------------------------------------------- */
599 checkMutableList( bdescr *mut_bd, nat gen )
605 for (bd = mut_bd; bd != NULL; bd = bd->link) {
606 for (q = bd->start; q < bd->free; q++) {
607 p = (StgClosure *)*q;
608 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
609 if (get_itbl(p)->type == TSO) {
610 ((StgTSO *)p)->flags |= TSO_MARKED;
617 checkMutableLists (rtsBool checkTSOs)
621 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
622 checkMutableList(generations[g].mut_list, g);
623 for (i = 0; i < n_capabilities; i++) {
624 checkMutableList(capabilities[i].mut_lists[g], g);
627 checkGlobalTSOList(checkTSOs);
631 Check the static objects list.
634 checkStaticObjects ( StgClosure* static_objects )
636 StgClosure *p = static_objects;
639 while (p != END_OF_STATIC_LIST) {
642 switch (info->type) {
645 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
647 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
648 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
649 p = *IND_STATIC_LINK((StgClosure *)p);
654 p = *THUNK_STATIC_LINK((StgClosure *)p);
658 p = *FUN_STATIC_LINK((StgClosure *)p);
662 p = *STATIC_LINK(info,(StgClosure *)p);
666 barf("checkStaticObjetcs: strange closure %p (%s)",
672 /* Nursery sanity check */
674 checkNurserySanity (nursery *nursery)
680 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
681 ASSERT(bd->u.back == prev);
683 blocks += bd->blocks;
686 ASSERT(blocks == nursery->n_blocks);
690 /* Full heap sanity check. */
692 checkSanity( rtsBool check_heap )
696 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
697 ASSERT(countBlocks(generations[g].blocks)
698 == generations[g].n_blocks);
699 ASSERT(countBlocks(generations[g].large_objects)
700 == generations[g].n_large_blocks);
702 checkHeap(generations[g].blocks);
704 checkLargeObjects(generations[g].large_objects);
707 for (n = 0; n < n_capabilities; n++) {
708 checkNurserySanity(&nurseries[n]);
711 checkFreeListSanity();
713 #if defined(THREADED_RTS)
714 // always check the stacks in threaded mode, because checkHeap()
715 // does nothing in this case.
716 checkMutableLists(rtsTrue);
719 checkMutableLists(rtsFalse);
721 checkMutableLists(rtsTrue);
726 // If memInventory() calculates that we have a memory leak, this
727 // function will try to find the block(s) that are leaking by marking
728 // all the ones that we know about, and search through memory to find
729 // blocks that are not marked. In the debugger this can help to give
730 // us a clue about what kind of block leaked. In the future we might
731 // annotate blocks with their allocation site to give more helpful
734 findMemoryLeak (void)
737 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
738 for (i = 0; i < n_capabilities; i++) {
739 markBlocks(capabilities[i].mut_lists[g]);
741 markBlocks(generations[g].mut_list);
742 markBlocks(generations[g].blocks);
743 markBlocks(generations[g].large_objects);
746 for (i = 0; i < n_capabilities; i++) {
747 markBlocks(nurseries[i].blocks);
752 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
753 // markRetainerBlocks();
757 // count the blocks allocated by the arena allocator
759 // markArenaBlocks();
761 // count the blocks containing executable memory
762 markBlocks(exec_block);
764 reportUnmarkedBlocks();
768 checkRunQueue(Capability *cap)
771 prev = END_TSO_QUEUE;
772 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
773 prev = tso, tso = tso->_link) {
774 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
775 ASSERT(tso->block_info.prev == prev);
777 ASSERT(cap->run_queue_tl == prev);
780 /* -----------------------------------------------------------------------------
781 Memory leak detection
783 memInventory() checks for memory leaks by counting up all the
784 blocks we know about and comparing that to the number of blocks
785 allegedly floating around in the system.
786 -------------------------------------------------------------------------- */
788 // Useful for finding partially full blocks in gdb
789 void findSlop(bdescr *bd);
790 void findSlop(bdescr *bd)
794 for (; bd != NULL; bd = bd->link) {
795 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
796 if (slop > (1024/sizeof(W_))) {
797 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
798 bd->start, bd, slop / (1024/sizeof(W_)));
804 genBlocks (generation *gen)
806 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
807 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
808 return gen->n_blocks + gen->n_old_blocks +
809 countAllocdBlocks(gen->large_objects);
813 memInventory (rtsBool show)
816 lnat gen_blocks[RtsFlags.GcFlags.generations];
817 lnat nursery_blocks, retainer_blocks,
818 arena_blocks, exec_blocks;
819 lnat live_blocks = 0, free_blocks = 0;
822 // count the blocks we current have
824 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
826 for (i = 0; i < n_capabilities; i++) {
827 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
829 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
830 gen_blocks[g] += genBlocks(&generations[g]);
834 for (i = 0; i < n_capabilities; i++) {
835 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
836 nursery_blocks += nurseries[i].n_blocks;
841 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
842 retainer_blocks = retainerStackBlocks();
846 // count the blocks allocated by the arena allocator
847 arena_blocks = arenaBlocks();
849 // count the blocks containing executable memory
850 exec_blocks = countAllocdBlocks(exec_block);
852 /* count the blocks on the free list */
853 free_blocks = countFreeList();
856 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
857 live_blocks += gen_blocks[g];
859 live_blocks += nursery_blocks +
860 + retainer_blocks + arena_blocks + exec_blocks;
862 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
864 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
869 debugBelch("Memory leak detected:\n");
871 debugBelch("Memory inventory:\n");
873 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
874 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
875 gen_blocks[g], MB(gen_blocks[g]));
877 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
878 nursery_blocks, MB(nursery_blocks));
879 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
880 retainer_blocks, MB(retainer_blocks));
881 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
882 arena_blocks, MB(arena_blocks));
883 debugBelch(" exec : %5lu blocks (%lu MB)\n",
884 exec_blocks, MB(exec_blocks));
885 debugBelch(" free : %5lu blocks (%lu MB)\n",
886 free_blocks, MB(free_blocks));
887 debugBelch(" total : %5lu blocks (%lu MB)\n",
888 live_blocks + free_blocks, MB(live_blocks+free_blocks));
890 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
891 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
899 ASSERT(n_alloc_blocks == live_blocks);