1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
260 info = p->header.info;
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
265 info = INFO_PTR_TO_STRUCT(info);
267 switch (info->type) {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
290 return thunk_sizeW_fromITBL(info);
307 case IND_OLDGEN_PERM:
314 case CONSTR_NOCAF_STATIC:
319 for (i = 0; i < info->layout.payload.ptrs; i++) {
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
322 return sizeW_fromITBL(info);
327 StgBlockingQueue *bq = (StgBlockingQueue *)p;
329 // NO: the BH might have been updated now
330 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
331 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
333 ASSERT(get_itbl(bq->owner)->type == TSO);
334 ASSERT(bq->queue == END_TSO_QUEUE || get_itbl(bq->queue)->type == TSO);
335 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
336 get_itbl(bq->link)->type == IND ||
337 get_itbl(bq->link)->type == BLOCKING_QUEUE);
339 return sizeofW(StgBlockingQueue);
343 StgBCO *bco = (StgBCO *)p;
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
347 return bco_sizeW(bco);
350 case IND_STATIC: /* (1, 0) closure */
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
352 return sizeW_fromITBL(info);
355 /* deal with these specially - the info table isn't
356 * representative of the actual layout.
358 { StgWeak *w = (StgWeak *)p;
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
365 return sizeW_fromITBL(info);
369 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
370 return THUNK_SELECTOR_sizeW();
374 /* we don't expect to see any of these after GC
375 * but they might appear during execution
377 StgInd *ind = (StgInd *)p;
378 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
379 return sizeofW(StgInd);
389 case ATOMICALLY_FRAME:
390 case CATCH_RETRY_FRAME:
391 case CATCH_STM_FRAME:
392 barf("checkClosure: stack frame");
396 StgAP* ap = (StgAP *)p;
397 checkPAP (ap->fun, ap->payload, ap->n_args);
403 StgPAP* pap = (StgPAP *)p;
404 checkPAP (pap->fun, pap->payload, pap->n_args);
405 return pap_sizeW(pap);
410 StgAP_STACK *ap = (StgAP_STACK *)p;
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
412 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
413 return ap_stack_sizeW(ap);
417 return arr_words_sizeW((StgArrWords *)p);
419 case MUT_ARR_PTRS_CLEAN:
420 case MUT_ARR_PTRS_DIRTY:
421 case MUT_ARR_PTRS_FROZEN:
422 case MUT_ARR_PTRS_FROZEN0:
424 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
426 for (i = 0; i < a->ptrs; i++) {
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
429 return mut_arr_ptrs_sizeW(a);
433 checkTSO((StgTSO *)p);
434 return tso_sizeW((StgTSO *)p);
439 StgTRecChunk *tc = (StgTRecChunk *)p;
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
441 for (i = 0; i < tc -> next_entry_idx; i ++) {
442 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
444 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
446 return sizeofW(StgTRecChunk);
450 barf("checkClosure (closure type %d)", info->type);
455 /* -----------------------------------------------------------------------------
458 After garbage collection, the live heap is in a state where we can
459 run through and check that all the pointers point to the right
460 place. This function starts at a given position and sanity-checks
461 all the objects in the remainder of the chain.
462 -------------------------------------------------------------------------- */
465 checkHeap(bdescr *bd)
469 #if defined(THREADED_RTS)
470 // heap sanity checking doesn't work with SMP, because we can't
471 // zero the slop (see Updates.h).
475 for (; bd != NULL; bd = bd->link) {
477 while (p < bd->free) {
478 nat size = checkClosure((StgClosure *)p);
479 /* This is the smallest size of closure that can live in the heap */
480 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
484 while (p < bd->free &&
485 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
491 checkHeapChunk(StgPtr start, StgPtr end)
496 for (p=start; p<end; p+=size) {
497 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
498 size = checkClosure((StgClosure *)p);
499 /* This is the smallest size of closure that can live in the heap. */
500 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
505 checkLargeObjects(bdescr *bd)
508 if (!(bd->flags & BF_PINNED)) {
509 checkClosure((StgClosure *)bd->start);
516 checkTSO(StgTSO *tso)
519 StgPtr stack = tso->stack;
520 StgOffset stack_size = tso->stack_size;
521 StgPtr stack_end = stack + stack_size;
523 if (tso->what_next == ThreadRelocated) {
524 checkTSO(tso->_link);
528 if (tso->what_next == ThreadKilled) {
529 /* The garbage collector doesn't bother following any pointers
530 * from dead threads, so don't check sanity here.
535 ASSERT(tso->_link == END_TSO_QUEUE || get_itbl(tso->_link)->type == TSO);
536 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
537 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
538 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
540 ASSERT(stack <= sp && sp < stack_end);
542 checkStackChunk(sp, stack_end);
546 Check that all TSOs have been evacuated.
547 Optionally also check the sanity of the TSOs.
550 checkGlobalTSOList (rtsBool checkTSOs)
555 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
556 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
557 tso = tso->global_link) {
558 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
559 ASSERT(get_itbl(tso)->type == TSO);
565 // If this TSO is dirty and in an old generation, it better
566 // be on the mutable list.
567 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
568 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
569 tso->flags &= ~TSO_MARKED;
575 /* -----------------------------------------------------------------------------
576 Check mutable list sanity.
577 -------------------------------------------------------------------------- */
580 checkMutableList( bdescr *mut_bd, nat gen )
586 for (bd = mut_bd; bd != NULL; bd = bd->link) {
587 for (q = bd->start; q < bd->free; q++) {
588 p = (StgClosure *)*q;
589 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
590 if (get_itbl(p)->type == TSO) {
591 ((StgTSO *)p)->flags |= TSO_MARKED;
598 checkMutableLists (rtsBool checkTSOs)
602 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
603 checkMutableList(generations[g].mut_list, g);
604 for (i = 0; i < n_capabilities; i++) {
605 checkMutableList(capabilities[i].mut_lists[g], g);
608 checkGlobalTSOList(checkTSOs);
612 Check the static objects list.
615 checkStaticObjects ( StgClosure* static_objects )
617 StgClosure *p = static_objects;
620 while (p != END_OF_STATIC_LIST) {
623 switch (info->type) {
626 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
628 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
629 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
630 p = *IND_STATIC_LINK((StgClosure *)p);
635 p = *THUNK_STATIC_LINK((StgClosure *)p);
639 p = *FUN_STATIC_LINK((StgClosure *)p);
643 p = *STATIC_LINK(info,(StgClosure *)p);
647 barf("checkStaticObjetcs: strange closure %p (%s)",
653 /* Nursery sanity check */
655 checkNurserySanity (nursery *nursery)
661 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
662 ASSERT(bd->u.back == prev);
664 blocks += bd->blocks;
667 ASSERT(blocks == nursery->n_blocks);
671 /* Full heap sanity check. */
673 checkSanity( rtsBool check_heap )
677 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
678 ASSERT(countBlocks(generations[g].blocks)
679 == generations[g].n_blocks);
680 ASSERT(countBlocks(generations[g].large_objects)
681 == generations[g].n_large_blocks);
683 checkHeap(generations[g].blocks);
685 checkLargeObjects(generations[g].large_objects);
688 for (n = 0; n < n_capabilities; n++) {
689 checkNurserySanity(&nurseries[n]);
692 checkFreeListSanity();
694 #if defined(THREADED_RTS)
695 // always check the stacks in threaded mode, because checkHeap()
696 // does nothing in this case.
697 checkMutableLists(rtsTrue);
700 checkMutableLists(rtsFalse);
702 checkMutableLists(rtsTrue);
707 // If memInventory() calculates that we have a memory leak, this
708 // function will try to find the block(s) that are leaking by marking
709 // all the ones that we know about, and search through memory to find
710 // blocks that are not marked. In the debugger this can help to give
711 // us a clue about what kind of block leaked. In the future we might
712 // annotate blocks with their allocation site to give more helpful
715 findMemoryLeak (void)
718 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
719 for (i = 0; i < n_capabilities; i++) {
720 markBlocks(capabilities[i].mut_lists[g]);
722 markBlocks(generations[g].mut_list);
723 markBlocks(generations[g].blocks);
724 markBlocks(generations[g].large_objects);
727 for (i = 0; i < n_capabilities; i++) {
728 markBlocks(nurseries[i].blocks);
733 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
734 // markRetainerBlocks();
738 // count the blocks allocated by the arena allocator
740 // markArenaBlocks();
742 // count the blocks containing executable memory
743 markBlocks(exec_block);
745 reportUnmarkedBlocks();
749 /* -----------------------------------------------------------------------------
750 Memory leak detection
752 memInventory() checks for memory leaks by counting up all the
753 blocks we know about and comparing that to the number of blocks
754 allegedly floating around in the system.
755 -------------------------------------------------------------------------- */
757 // Useful for finding partially full blocks in gdb
758 void findSlop(bdescr *bd);
759 void findSlop(bdescr *bd)
763 for (; bd != NULL; bd = bd->link) {
764 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
765 if (slop > (1024/sizeof(W_))) {
766 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
767 bd->start, bd, slop / (1024/sizeof(W_)));
773 genBlocks (generation *gen)
775 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
776 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
777 return gen->n_blocks + gen->n_old_blocks +
778 countAllocdBlocks(gen->large_objects);
782 memInventory (rtsBool show)
785 lnat gen_blocks[RtsFlags.GcFlags.generations];
786 lnat nursery_blocks, retainer_blocks,
787 arena_blocks, exec_blocks;
788 lnat live_blocks = 0, free_blocks = 0;
791 // count the blocks we current have
793 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
795 for (i = 0; i < n_capabilities; i++) {
796 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
798 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
799 gen_blocks[g] += genBlocks(&generations[g]);
803 for (i = 0; i < n_capabilities; i++) {
804 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
805 nursery_blocks += nurseries[i].n_blocks;
810 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
811 retainer_blocks = retainerStackBlocks();
815 // count the blocks allocated by the arena allocator
816 arena_blocks = arenaBlocks();
818 // count the blocks containing executable memory
819 exec_blocks = countAllocdBlocks(exec_block);
821 /* count the blocks on the free list */
822 free_blocks = countFreeList();
825 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
826 live_blocks += gen_blocks[g];
828 live_blocks += nursery_blocks +
829 + retainer_blocks + arena_blocks + exec_blocks;
831 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
833 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
838 debugBelch("Memory leak detected:\n");
840 debugBelch("Memory inventory:\n");
842 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
843 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
844 gen_blocks[g], MB(gen_blocks[g]));
846 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
847 nursery_blocks, MB(nursery_blocks));
848 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
849 retainer_blocks, MB(retainer_blocks));
850 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
851 arena_blocks, MB(arena_blocks));
852 debugBelch(" exec : %5lu blocks (%lu MB)\n",
853 exec_blocks, MB(exec_blocks));
854 debugBelch(" free : %5lu blocks (%lu MB)\n",
855 free_blocks, MB(free_blocks));
856 debugBelch(" total : %5lu blocks (%lu MB)\n",
857 live_blocks + free_blocks, MB(live_blocks+free_blocks));
859 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
860 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
868 ASSERT(n_alloc_blocks == live_blocks);