1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
260 info = p->header.info;
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
265 info = INFO_PTR_TO_STRUCT(info);
267 switch (info->type) {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
290 return thunk_sizeW_fromITBL(info);
307 case IND_OLDGEN_PERM:
315 case CONSTR_NOCAF_STATIC:
320 for (i = 0; i < info->layout.payload.ptrs; i++) {
321 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
323 return sizeW_fromITBL(info);
327 StgBCO *bco = (StgBCO *)p;
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
330 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
331 return bco_sizeW(bco);
334 case IND_STATIC: /* (1, 0) closure */
335 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
336 return sizeW_fromITBL(info);
339 /* deal with these specially - the info table isn't
340 * representative of the actual layout.
342 { StgWeak *w = (StgWeak *)p;
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
349 return sizeW_fromITBL(info);
353 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
354 return THUNK_SELECTOR_sizeW();
358 /* we don't expect to see any of these after GC
359 * but they might appear during execution
361 StgInd *ind = (StgInd *)p;
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
363 return sizeofW(StgInd);
373 case ATOMICALLY_FRAME:
374 case CATCH_RETRY_FRAME:
375 case CATCH_STM_FRAME:
376 barf("checkClosure: stack frame");
380 StgAP* ap = (StgAP *)p;
381 checkPAP (ap->fun, ap->payload, ap->n_args);
387 StgPAP* pap = (StgPAP *)p;
388 checkPAP (pap->fun, pap->payload, pap->n_args);
389 return pap_sizeW(pap);
394 StgAP_STACK *ap = (StgAP_STACK *)p;
395 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
396 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
397 return ap_stack_sizeW(ap);
401 return arr_words_sizeW((StgArrWords *)p);
403 case MUT_ARR_PTRS_CLEAN:
404 case MUT_ARR_PTRS_DIRTY:
405 case MUT_ARR_PTRS_FROZEN:
406 case MUT_ARR_PTRS_FROZEN0:
408 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
410 for (i = 0; i < a->ptrs; i++) {
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
413 return mut_arr_ptrs_sizeW(a);
417 checkTSO((StgTSO *)p);
418 return tso_sizeW((StgTSO *)p);
423 StgTRecChunk *tc = (StgTRecChunk *)p;
424 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
425 for (i = 0; i < tc -> next_entry_idx; i ++) {
426 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
428 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
430 return sizeofW(StgTRecChunk);
434 barf("checkClosure (closure type %d)", info->type);
439 /* -----------------------------------------------------------------------------
442 After garbage collection, the live heap is in a state where we can
443 run through and check that all the pointers point to the right
444 place. This function starts at a given position and sanity-checks
445 all the objects in the remainder of the chain.
446 -------------------------------------------------------------------------- */
449 checkHeap(bdescr *bd)
453 #if defined(THREADED_RTS)
454 // heap sanity checking doesn't work with SMP, because we can't
455 // zero the slop (see Updates.h).
459 for (; bd != NULL; bd = bd->link) {
461 while (p < bd->free) {
462 nat size = checkClosure((StgClosure *)p);
463 /* This is the smallest size of closure that can live in the heap */
464 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
468 while (p < bd->free &&
469 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
475 checkHeapChunk(StgPtr start, StgPtr end)
480 for (p=start; p<end; p+=size) {
481 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
482 size = checkClosure((StgClosure *)p);
483 /* This is the smallest size of closure that can live in the heap. */
484 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
489 checkLargeObjects(bdescr *bd)
492 if (!(bd->flags & BF_PINNED)) {
493 checkClosure((StgClosure *)bd->start);
500 checkTSO(StgTSO *tso)
503 StgPtr stack = tso->stack;
504 StgOffset stack_size = tso->stack_size;
505 StgPtr stack_end = stack + stack_size;
507 if (tso->what_next == ThreadRelocated) {
508 checkTSO(tso->_link);
512 if (tso->what_next == ThreadKilled) {
513 /* The garbage collector doesn't bother following any pointers
514 * from dead threads, so don't check sanity here.
519 ASSERT(stack <= sp && sp < stack_end);
521 checkStackChunk(sp, stack_end);
525 Check that all TSOs have been evacuated.
526 Optionally also check the sanity of the TSOs.
529 checkGlobalTSOList (rtsBool checkTSOs)
534 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
535 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
536 tso = tso->global_link) {
537 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
538 ASSERT(get_itbl(tso)->type == TSO);
542 while (tso->what_next == ThreadRelocated) {
546 // If this TSO is dirty and in an old generation, it better
547 // be on the mutable list.
548 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
549 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
550 tso->flags &= ~TSO_MARKED;
556 /* -----------------------------------------------------------------------------
557 Check mutable list sanity.
558 -------------------------------------------------------------------------- */
561 checkMutableList( bdescr *mut_bd, nat gen )
567 for (bd = mut_bd; bd != NULL; bd = bd->link) {
568 for (q = bd->start; q < bd->free; q++) {
569 p = (StgClosure *)*q;
570 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
571 if (get_itbl(p)->type == TSO) {
572 ((StgTSO *)p)->flags |= TSO_MARKED;
579 checkMutableLists (rtsBool checkTSOs)
583 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
584 checkMutableList(generations[g].mut_list, g);
585 for (i = 0; i < n_capabilities; i++) {
586 checkMutableList(capabilities[i].mut_lists[g], g);
589 checkGlobalTSOList(checkTSOs);
593 Check the static objects list.
596 checkStaticObjects ( StgClosure* static_objects )
598 StgClosure *p = static_objects;
601 while (p != END_OF_STATIC_LIST) {
604 switch (info->type) {
607 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
609 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
610 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
611 p = *IND_STATIC_LINK((StgClosure *)p);
616 p = *THUNK_STATIC_LINK((StgClosure *)p);
620 p = *FUN_STATIC_LINK((StgClosure *)p);
624 p = *STATIC_LINK(info,(StgClosure *)p);
628 barf("checkStaticObjetcs: strange closure %p (%s)",
634 /* Nursery sanity check */
636 checkNurserySanity (nursery *nursery)
642 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
643 ASSERT(bd->u.back == prev);
645 blocks += bd->blocks;
648 ASSERT(blocks == nursery->n_blocks);
652 /* Full heap sanity check. */
654 checkSanity( rtsBool check_heap )
658 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
659 ASSERT(countBlocks(generations[g].blocks)
660 == generations[g].n_blocks);
661 ASSERT(countBlocks(generations[g].large_objects)
662 == generations[g].n_large_blocks);
664 checkHeap(generations[g].blocks);
666 checkLargeObjects(generations[g].large_objects);
669 for (n = 0; n < n_capabilities; n++) {
670 checkNurserySanity(&nurseries[n]);
673 checkFreeListSanity();
675 #if defined(THREADED_RTS)
676 // always check the stacks in threaded mode, because checkHeap()
677 // does nothing in this case.
678 checkMutableLists(rtsTrue);
681 checkMutableLists(rtsFalse);
683 checkMutableLists(rtsTrue);
688 // If memInventory() calculates that we have a memory leak, this
689 // function will try to find the block(s) that are leaking by marking
690 // all the ones that we know about, and search through memory to find
691 // blocks that are not marked. In the debugger this can help to give
692 // us a clue about what kind of block leaked. In the future we might
693 // annotate blocks with their allocation site to give more helpful
696 findMemoryLeak (void)
699 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
700 for (i = 0; i < n_capabilities; i++) {
701 markBlocks(capabilities[i].mut_lists[g]);
703 markBlocks(generations[g].mut_list);
704 markBlocks(generations[g].blocks);
705 markBlocks(generations[g].large_objects);
708 for (i = 0; i < n_capabilities; i++) {
709 markBlocks(nurseries[i].blocks);
714 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
715 // markRetainerBlocks();
719 // count the blocks allocated by the arena allocator
721 // markArenaBlocks();
723 // count the blocks containing executable memory
724 markBlocks(exec_block);
726 reportUnmarkedBlocks();
730 /* -----------------------------------------------------------------------------
731 Memory leak detection
733 memInventory() checks for memory leaks by counting up all the
734 blocks we know about and comparing that to the number of blocks
735 allegedly floating around in the system.
736 -------------------------------------------------------------------------- */
738 // Useful for finding partially full blocks in gdb
739 void findSlop(bdescr *bd);
740 void findSlop(bdescr *bd)
744 for (; bd != NULL; bd = bd->link) {
745 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
746 if (slop > (1024/sizeof(W_))) {
747 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
748 bd->start, bd, slop / (1024/sizeof(W_)));
754 genBlocks (generation *gen)
756 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
757 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
758 return gen->n_blocks + gen->n_old_blocks +
759 countAllocdBlocks(gen->large_objects);
763 memInventory (rtsBool show)
766 lnat gen_blocks[RtsFlags.GcFlags.generations];
767 lnat nursery_blocks, retainer_blocks,
768 arena_blocks, exec_blocks;
769 lnat live_blocks = 0, free_blocks = 0;
772 // count the blocks we current have
774 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
776 for (i = 0; i < n_capabilities; i++) {
777 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
779 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
780 gen_blocks[g] += genBlocks(&generations[g]);
784 for (i = 0; i < n_capabilities; i++) {
785 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
786 nursery_blocks += nurseries[i].n_blocks;
791 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
792 retainer_blocks = retainerStackBlocks();
796 // count the blocks allocated by the arena allocator
797 arena_blocks = arenaBlocks();
799 // count the blocks containing executable memory
800 exec_blocks = countAllocdBlocks(exec_block);
802 /* count the blocks on the free list */
803 free_blocks = countFreeList();
806 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
807 live_blocks += gen_blocks[g];
809 live_blocks += nursery_blocks +
810 + retainer_blocks + arena_blocks + exec_blocks;
812 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
814 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
819 debugBelch("Memory leak detected:\n");
821 debugBelch("Memory inventory:\n");
823 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
824 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
825 gen_blocks[g], MB(gen_blocks[g]));
827 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
828 nursery_blocks, MB(nursery_blocks));
829 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
830 retainer_blocks, MB(retainer_blocks));
831 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
832 arena_blocks, MB(arena_blocks));
833 debugBelch(" exec : %5lu blocks (%lu MB)\n",
834 exec_blocks, MB(exec_blocks));
835 debugBelch(" free : %5lu blocks (%lu MB)\n",
836 free_blocks, MB(free_blocks));
837 debugBelch(" total : %5lu blocks (%lu MB)\n",
838 live_blocks + free_blocks, MB(live_blocks+free_blocks));
840 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
841 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
849 ASSERT(n_alloc_blocks == live_blocks);