1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
29 #include "RetainerProfile.h"
31 /* -----------------------------------------------------------------------------
33 -------------------------------------------------------------------------- */
35 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
36 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
37 static void checkClosureShallow ( StgClosure * );
39 /* -----------------------------------------------------------------------------
41 -------------------------------------------------------------------------- */
44 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
82 checkClosureShallow( StgClosure* p )
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
93 ASSERT(!closure_STATIC(q));
97 // check an individual stack object
99 checkStackFrame( StgPtr c )
102 const StgRetInfoTable* info;
104 info = get_ret_itbl((StgClosure *)c);
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
117 p = (P_)(r->payload);
118 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
119 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
121 // skip over the non-pointers
122 p += RET_DYN_NONPTRS(dyn);
124 // follow the ptr words
125 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
126 checkClosureShallow((StgClosure *)*p);
130 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
131 RET_DYN_NONPTR_REGS_SIZE +
132 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
136 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
137 case ATOMICALLY_FRAME:
138 case CATCH_RETRY_FRAME:
139 case CATCH_STM_FRAME:
141 // small bitmap cases (<= 32 entries)
144 size = BITMAP_SIZE(info->i.layout.bitmap);
145 checkSmallBitmap((StgPtr)c + 1,
146 BITMAP_BITS(info->i.layout.bitmap), size);
152 bco = (StgBCO *)*(c+1);
153 size = BCO_BITMAP_SIZE(bco);
154 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
158 case RET_BIG: // large bitmap (> 32 entries)
159 size = GET_LARGE_BITMAP(&info->i)->size;
160 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
165 StgFunInfoTable *fun_info;
168 ret_fun = (StgRetFun *)c;
169 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
170 size = ret_fun->size;
171 switch (fun_info->f.fun_type) {
173 checkSmallBitmap((StgPtr)ret_fun->payload,
174 BITMAP_BITS(fun_info->f.b.bitmap), size);
177 checkLargeBitmap((StgPtr)ret_fun->payload,
178 GET_FUN_LARGE_BITMAP(fun_info), size);
181 checkSmallBitmap((StgPtr)ret_fun->payload,
182 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
186 return sizeofW(StgRetFun) + size;
190 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
194 // check sections of stack between update frames
196 checkStackChunk( StgPtr sp, StgPtr stack_end )
201 while (p < stack_end) {
202 p += checkStackFrame( p );
204 // ASSERT( p == stack_end ); -- HWL
208 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
212 StgFunInfoTable *fun_info;
214 fun = UNTAG_CLOSURE(tagged_fun);
215 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
216 fun_info = get_fun_itbl(fun);
218 p = (StgClosure *)payload;
219 switch (fun_info->f.fun_type) {
221 checkSmallBitmap( (StgPtr)payload,
222 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
225 checkLargeBitmap( (StgPtr)payload,
226 GET_FUN_LARGE_BITMAP(fun_info),
230 checkLargeBitmap( (StgPtr)payload,
235 checkSmallBitmap( (StgPtr)payload,
236 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
241 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
242 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
247 checkClosure( StgClosure* p )
249 const StgInfoTable *info;
251 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
253 p = UNTAG_CLOSURE(p);
254 /* Is it a static closure (i.e. in the data segment)? */
255 if (!HEAP_ALLOCED(p)) {
256 ASSERT(closure_STATIC(p));
258 ASSERT(!closure_STATIC(p));
261 info = p->header.info;
263 if (IS_FORWARDING_PTR(info)) {
264 barf("checkClosure: found EVACUATED closure %d", info->type);
266 info = INFO_PTR_TO_STRUCT(info);
268 switch (info->type) {
273 StgMVar *mvar = (StgMVar *)p;
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
276 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
277 return sizeofW(StgMVar);
288 for (i = 0; i < info->layout.payload.ptrs; i++) {
289 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
291 return thunk_sizeW_fromITBL(info);
313 case CONSTR_NOCAF_STATIC:
318 for (i = 0; i < info->layout.payload.ptrs; i++) {
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
321 return sizeW_fromITBL(info);
326 StgBlockingQueue *bq = (StgBlockingQueue *)p;
328 // NO: the BH might have been updated now
329 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
330 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
332 ASSERT(get_itbl(bq->owner)->type == TSO);
333 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
334 || get_itbl(bq->queue)->type == TSO);
335 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
336 get_itbl(bq->link)->type == IND ||
337 get_itbl(bq->link)->type == BLOCKING_QUEUE);
339 return sizeofW(StgBlockingQueue);
343 StgBCO *bco = (StgBCO *)p;
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
347 return bco_sizeW(bco);
350 case IND_STATIC: /* (1, 0) closure */
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
352 return sizeW_fromITBL(info);
355 /* deal with these specially - the info table isn't
356 * representative of the actual layout.
358 { StgWeak *w = (StgWeak *)p;
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
365 return sizeW_fromITBL(info);
369 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
370 return THUNK_SELECTOR_sizeW();
374 /* we don't expect to see any of these after GC
375 * but they might appear during execution
377 StgInd *ind = (StgInd *)p;
378 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
379 return sizeofW(StgInd);
389 case ATOMICALLY_FRAME:
390 case CATCH_RETRY_FRAME:
391 case CATCH_STM_FRAME:
392 barf("checkClosure: stack frame");
396 StgAP* ap = (StgAP *)p;
397 checkPAP (ap->fun, ap->payload, ap->n_args);
403 StgPAP* pap = (StgPAP *)p;
404 checkPAP (pap->fun, pap->payload, pap->n_args);
405 return pap_sizeW(pap);
410 StgAP_STACK *ap = (StgAP_STACK *)p;
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
412 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
413 return ap_stack_sizeW(ap);
417 return arr_words_sizeW((StgArrWords *)p);
419 case MUT_ARR_PTRS_CLEAN:
420 case MUT_ARR_PTRS_DIRTY:
421 case MUT_ARR_PTRS_FROZEN:
422 case MUT_ARR_PTRS_FROZEN0:
424 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
426 for (i = 0; i < a->ptrs; i++) {
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
429 return mut_arr_ptrs_sizeW(a);
433 checkTSO((StgTSO *)p);
434 return tso_sizeW((StgTSO *)p);
439 StgTRecChunk *tc = (StgTRecChunk *)p;
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
441 for (i = 0; i < tc -> next_entry_idx; i ++) {
442 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
444 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
446 return sizeofW(StgTRecChunk);
450 barf("checkClosure (closure type %d)", info->type);
455 /* -----------------------------------------------------------------------------
458 After garbage collection, the live heap is in a state where we can
459 run through and check that all the pointers point to the right
460 place. This function starts at a given position and sanity-checks
461 all the objects in the remainder of the chain.
462 -------------------------------------------------------------------------- */
465 checkHeap(bdescr *bd)
469 #if defined(THREADED_RTS)
470 // heap sanity checking doesn't work with SMP, because we can't
471 // zero the slop (see Updates.h).
475 for (; bd != NULL; bd = bd->link) {
477 while (p < bd->free) {
478 nat size = checkClosure((StgClosure *)p);
479 /* This is the smallest size of closure that can live in the heap */
480 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
484 while (p < bd->free &&
485 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
491 checkHeapChunk(StgPtr start, StgPtr end)
496 for (p=start; p<end; p+=size) {
497 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
498 size = checkClosure((StgClosure *)p);
499 /* This is the smallest size of closure that can live in the heap. */
500 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
505 checkLargeObjects(bdescr *bd)
508 if (!(bd->flags & BF_PINNED)) {
509 checkClosure((StgClosure *)bd->start);
516 checkTSO(StgTSO *tso)
519 StgPtr stack = tso->stack;
520 StgOffset stack_size = tso->stack_size;
521 StgPtr stack_end = stack + stack_size;
523 if (tso->what_next == ThreadRelocated) {
524 checkTSO(tso->_link);
528 if (tso->what_next == ThreadKilled) {
529 /* The garbage collector doesn't bother following any pointers
530 * from dead threads, so don't check sanity here.
535 ASSERT(tso->_link == END_TSO_QUEUE ||
536 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
537 tso->_link->header.info == &stg_TSO_info);
538 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
539 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
540 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
542 ASSERT(stack <= sp && sp < stack_end);
544 checkStackChunk(sp, stack_end);
548 Check that all TSOs have been evacuated.
549 Optionally also check the sanity of the TSOs.
552 checkGlobalTSOList (rtsBool checkTSOs)
557 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
558 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
559 tso = tso->global_link) {
560 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
561 ASSERT(get_itbl(tso)->type == TSO);
567 // If this TSO is dirty and in an old generation, it better
568 // be on the mutable list.
569 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
570 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
571 tso->flags &= ~TSO_MARKED;
577 /* -----------------------------------------------------------------------------
578 Check mutable list sanity.
579 -------------------------------------------------------------------------- */
582 checkMutableList( bdescr *mut_bd, nat gen )
588 for (bd = mut_bd; bd != NULL; bd = bd->link) {
589 for (q = bd->start; q < bd->free; q++) {
590 p = (StgClosure *)*q;
591 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
592 if (get_itbl(p)->type == TSO) {
593 ((StgTSO *)p)->flags |= TSO_MARKED;
600 checkMutableLists (rtsBool checkTSOs)
604 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
605 checkMutableList(generations[g].mut_list, g);
606 for (i = 0; i < n_capabilities; i++) {
607 checkMutableList(capabilities[i].mut_lists[g], g);
610 checkGlobalTSOList(checkTSOs);
614 Check the static objects list.
617 checkStaticObjects ( StgClosure* static_objects )
619 StgClosure *p = static_objects;
622 while (p != END_OF_STATIC_LIST) {
625 switch (info->type) {
628 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
630 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
631 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
632 p = *IND_STATIC_LINK((StgClosure *)p);
637 p = *THUNK_STATIC_LINK((StgClosure *)p);
641 p = *FUN_STATIC_LINK((StgClosure *)p);
645 p = *STATIC_LINK(info,(StgClosure *)p);
649 barf("checkStaticObjetcs: strange closure %p (%s)",
655 /* Nursery sanity check */
657 checkNurserySanity (nursery *nursery)
663 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
664 ASSERT(bd->u.back == prev);
666 blocks += bd->blocks;
669 ASSERT(blocks == nursery->n_blocks);
673 /* Full heap sanity check. */
675 checkSanity( rtsBool check_heap )
679 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
680 ASSERT(countBlocks(generations[g].blocks)
681 == generations[g].n_blocks);
682 ASSERT(countBlocks(generations[g].large_objects)
683 == generations[g].n_large_blocks);
685 checkHeap(generations[g].blocks);
687 checkLargeObjects(generations[g].large_objects);
690 for (n = 0; n < n_capabilities; n++) {
691 checkNurserySanity(&nurseries[n]);
694 checkFreeListSanity();
696 #if defined(THREADED_RTS)
697 // always check the stacks in threaded mode, because checkHeap()
698 // does nothing in this case.
699 checkMutableLists(rtsTrue);
702 checkMutableLists(rtsFalse);
704 checkMutableLists(rtsTrue);
709 // If memInventory() calculates that we have a memory leak, this
710 // function will try to find the block(s) that are leaking by marking
711 // all the ones that we know about, and search through memory to find
712 // blocks that are not marked. In the debugger this can help to give
713 // us a clue about what kind of block leaked. In the future we might
714 // annotate blocks with their allocation site to give more helpful
717 findMemoryLeak (void)
720 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
721 for (i = 0; i < n_capabilities; i++) {
722 markBlocks(capabilities[i].mut_lists[g]);
724 markBlocks(generations[g].mut_list);
725 markBlocks(generations[g].blocks);
726 markBlocks(generations[g].large_objects);
729 for (i = 0; i < n_capabilities; i++) {
730 markBlocks(nurseries[i].blocks);
735 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
736 // markRetainerBlocks();
740 // count the blocks allocated by the arena allocator
742 // markArenaBlocks();
744 // count the blocks containing executable memory
745 markBlocks(exec_block);
747 reportUnmarkedBlocks();
751 checkRunQueue(Capability *cap)
754 prev = END_TSO_QUEUE;
755 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
756 prev = tso, tso = tso->_link) {
757 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
758 ASSERT(tso->block_info.prev == prev);
760 ASSERT(cap->run_queue_tl == prev);
763 /* -----------------------------------------------------------------------------
764 Memory leak detection
766 memInventory() checks for memory leaks by counting up all the
767 blocks we know about and comparing that to the number of blocks
768 allegedly floating around in the system.
769 -------------------------------------------------------------------------- */
771 // Useful for finding partially full blocks in gdb
772 void findSlop(bdescr *bd);
773 void findSlop(bdescr *bd)
777 for (; bd != NULL; bd = bd->link) {
778 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
779 if (slop > (1024/sizeof(W_))) {
780 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
781 bd->start, bd, slop / (1024/sizeof(W_)));
787 genBlocks (generation *gen)
789 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
790 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
791 return gen->n_blocks + gen->n_old_blocks +
792 countAllocdBlocks(gen->large_objects);
796 memInventory (rtsBool show)
799 lnat gen_blocks[RtsFlags.GcFlags.generations];
800 lnat nursery_blocks, retainer_blocks,
801 arena_blocks, exec_blocks;
802 lnat live_blocks = 0, free_blocks = 0;
805 // count the blocks we current have
807 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
809 for (i = 0; i < n_capabilities; i++) {
810 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
812 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
813 gen_blocks[g] += genBlocks(&generations[g]);
817 for (i = 0; i < n_capabilities; i++) {
818 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
819 nursery_blocks += nurseries[i].n_blocks;
824 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
825 retainer_blocks = retainerStackBlocks();
829 // count the blocks allocated by the arena allocator
830 arena_blocks = arenaBlocks();
832 // count the blocks containing executable memory
833 exec_blocks = countAllocdBlocks(exec_block);
835 /* count the blocks on the free list */
836 free_blocks = countFreeList();
839 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
840 live_blocks += gen_blocks[g];
842 live_blocks += nursery_blocks +
843 + retainer_blocks + arena_blocks + exec_blocks;
845 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
847 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
852 debugBelch("Memory leak detected:\n");
854 debugBelch("Memory inventory:\n");
856 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
857 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
858 gen_blocks[g], MB(gen_blocks[g]));
860 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
861 nursery_blocks, MB(nursery_blocks));
862 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
863 retainer_blocks, MB(retainer_blocks));
864 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
865 arena_blocks, MB(arena_blocks));
866 debugBelch(" exec : %5lu blocks (%lu MB)\n",
867 exec_blocks, MB(exec_blocks));
868 debugBelch(" free : %5lu blocks (%lu MB)\n",
869 free_blocks, MB(free_blocks));
870 debugBelch(" total : %5lu blocks (%lu MB)\n",
871 live_blocks + free_blocks, MB(live_blocks+free_blocks));
873 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
874 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
882 ASSERT(n_alloc_blocks == live_blocks);