1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
260 info = p->header.info;
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
265 info = INFO_PTR_TO_STRUCT(info);
267 switch (info->type) {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
290 return thunk_sizeW_fromITBL(info);
307 case IND_OLDGEN_PERM:
314 case CONSTR_NOCAF_STATIC:
319 for (i = 0; i < info->layout.payload.ptrs; i++) {
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
322 return sizeW_fromITBL(info);
326 StgBCO *bco = (StgBCO *)p;
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
330 return bco_sizeW(bco);
333 case IND_STATIC: /* (1, 0) closure */
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
335 return sizeW_fromITBL(info);
338 /* deal with these specially - the info table isn't
339 * representative of the actual layout.
341 { StgWeak *w = (StgWeak *)p;
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
348 return sizeW_fromITBL(info);
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
353 return THUNK_SELECTOR_sizeW();
357 /* we don't expect to see any of these after GC
358 * but they might appear during execution
360 StgInd *ind = (StgInd *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
362 return sizeofW(StgInd);
372 case ATOMICALLY_FRAME:
373 case CATCH_RETRY_FRAME:
374 case CATCH_STM_FRAME:
375 barf("checkClosure: stack frame");
379 StgAP* ap = (StgAP *)p;
380 checkPAP (ap->fun, ap->payload, ap->n_args);
386 StgPAP* pap = (StgPAP *)p;
387 checkPAP (pap->fun, pap->payload, pap->n_args);
388 return pap_sizeW(pap);
393 StgAP_STACK *ap = (StgAP_STACK *)p;
394 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
395 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
396 return ap_stack_sizeW(ap);
400 return arr_words_sizeW((StgArrWords *)p);
402 case MUT_ARR_PTRS_CLEAN:
403 case MUT_ARR_PTRS_DIRTY:
404 case MUT_ARR_PTRS_FROZEN:
405 case MUT_ARR_PTRS_FROZEN0:
407 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
409 for (i = 0; i < a->ptrs; i++) {
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
412 return mut_arr_ptrs_sizeW(a);
416 checkTSO((StgTSO *)p);
417 return tso_sizeW((StgTSO *)p);
419 case TVAR_WATCH_QUEUE:
421 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
423 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
424 return sizeofW(StgTVarWatchQueue);
427 case INVARIANT_CHECK_QUEUE:
429 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
430 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
431 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
432 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
433 return sizeofW(StgInvariantCheckQueue);
436 case ATOMIC_INVARIANT:
438 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
439 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
441 return sizeofW(StgAtomicInvariant);
446 StgTVar *tv = (StgTVar *)p;
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
449 return sizeofW(StgTVar);
455 StgTRecChunk *tc = (StgTRecChunk *)p;
456 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
457 for (i = 0; i < tc -> next_entry_idx; i ++) {
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
459 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
460 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
462 return sizeofW(StgTRecChunk);
467 StgTRecHeader *trec = (StgTRecHeader *)p;
468 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
469 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
470 return sizeofW(StgTRecHeader);
474 barf("checkClosure (closure type %d)", info->type);
479 /* -----------------------------------------------------------------------------
482 After garbage collection, the live heap is in a state where we can
483 run through and check that all the pointers point to the right
484 place. This function starts at a given position and sanity-checks
485 all the objects in the remainder of the chain.
486 -------------------------------------------------------------------------- */
489 checkHeap(bdescr *bd)
493 #if defined(THREADED_RTS)
494 // heap sanity checking doesn't work with SMP, because we can't
495 // zero the slop (see Updates.h).
499 for (; bd != NULL; bd = bd->link) {
501 while (p < bd->free) {
502 nat size = checkClosure((StgClosure *)p);
503 /* This is the smallest size of closure that can live in the heap */
504 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
508 while (p < bd->free &&
509 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
515 checkHeapChunk(StgPtr start, StgPtr end)
520 for (p=start; p<end; p+=size) {
521 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
522 size = checkClosure((StgClosure *)p);
523 /* This is the smallest size of closure that can live in the heap. */
524 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
529 checkLargeObjects(bdescr *bd)
532 if (!(bd->flags & BF_PINNED)) {
533 checkClosure((StgClosure *)bd->start);
540 checkTSO(StgTSO *tso)
543 StgPtr stack = tso->stack;
544 StgOffset stack_size = tso->stack_size;
545 StgPtr stack_end = stack + stack_size;
547 if (tso->what_next == ThreadRelocated) {
548 checkTSO(tso->_link);
552 if (tso->what_next == ThreadKilled) {
553 /* The garbage collector doesn't bother following any pointers
554 * from dead threads, so don't check sanity here.
559 ASSERT(stack <= sp && sp < stack_end);
561 checkStackChunk(sp, stack_end);
565 Check that all TSOs have been evacuated.
566 Optionally also check the sanity of the TSOs.
569 checkGlobalTSOList (rtsBool checkTSOs)
574 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
575 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
576 tso = tso->global_link) {
577 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
578 ASSERT(get_itbl(tso)->type == TSO);
582 while (tso->what_next == ThreadRelocated) {
586 // If this TSO is dirty and in an old generation, it better
587 // be on the mutable list.
588 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
589 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
590 tso->flags &= ~TSO_MARKED;
596 /* -----------------------------------------------------------------------------
597 Check mutable list sanity.
598 -------------------------------------------------------------------------- */
601 checkMutableList( bdescr *mut_bd, nat gen )
607 for (bd = mut_bd; bd != NULL; bd = bd->link) {
608 for (q = bd->start; q < bd->free; q++) {
609 p = (StgClosure *)*q;
610 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
611 if (get_itbl(p)->type == TSO) {
612 ((StgTSO *)p)->flags |= TSO_MARKED;
619 checkMutableLists (rtsBool checkTSOs)
623 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
624 checkMutableList(generations[g].mut_list, g);
625 for (i = 0; i < n_capabilities; i++) {
626 checkMutableList(capabilities[i].mut_lists[g], g);
629 checkGlobalTSOList(checkTSOs);
633 Check the static objects list.
636 checkStaticObjects ( StgClosure* static_objects )
638 StgClosure *p = static_objects;
641 while (p != END_OF_STATIC_LIST) {
644 switch (info->type) {
647 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
649 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
650 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
651 p = *IND_STATIC_LINK((StgClosure *)p);
656 p = *THUNK_STATIC_LINK((StgClosure *)p);
660 p = *FUN_STATIC_LINK((StgClosure *)p);
664 p = *STATIC_LINK(info,(StgClosure *)p);
668 barf("checkStaticObjetcs: strange closure %p (%s)",
674 /* Nursery sanity check */
676 checkNurserySanity (nursery *nursery)
682 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
683 ASSERT(bd->u.back == prev);
685 blocks += bd->blocks;
688 ASSERT(blocks == nursery->n_blocks);
692 /* Full heap sanity check. */
694 checkSanity( rtsBool check_heap )
698 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
699 ASSERT(countBlocks(generations[g].blocks)
700 == generations[g].n_blocks);
701 ASSERT(countBlocks(generations[g].large_objects)
702 == generations[g].n_large_blocks);
704 checkHeap(generations[g].blocks);
706 checkLargeObjects(generations[g].large_objects);
709 for (n = 0; n < n_capabilities; n++) {
710 checkNurserySanity(&nurseries[n]);
713 checkFreeListSanity();
715 #if defined(THREADED_RTS)
716 // always check the stacks in threaded mode, because checkHeap()
717 // does nothing in this case.
718 checkMutableLists(rtsTrue);
721 checkMutableLists(rtsFalse);
723 checkMutableLists(rtsTrue);
728 // If memInventory() calculates that we have a memory leak, this
729 // function will try to find the block(s) that are leaking by marking
730 // all the ones that we know about, and search through memory to find
731 // blocks that are not marked. In the debugger this can help to give
732 // us a clue about what kind of block leaked. In the future we might
733 // annotate blocks with their allocation site to give more helpful
736 findMemoryLeak (void)
739 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
740 for (i = 0; i < n_capabilities; i++) {
741 markBlocks(capabilities[i].mut_lists[g]);
743 markBlocks(generations[g].mut_list);
744 markBlocks(generations[g].blocks);
745 markBlocks(generations[g].large_objects);
748 for (i = 0; i < n_capabilities; i++) {
749 markBlocks(nurseries[i].blocks);
754 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
755 // markRetainerBlocks();
759 // count the blocks allocated by the arena allocator
761 // markArenaBlocks();
763 // count the blocks containing executable memory
764 markBlocks(exec_block);
766 reportUnmarkedBlocks();
770 /* -----------------------------------------------------------------------------
771 Memory leak detection
773 memInventory() checks for memory leaks by counting up all the
774 blocks we know about and comparing that to the number of blocks
775 allegedly floating around in the system.
776 -------------------------------------------------------------------------- */
778 // Useful for finding partially full blocks in gdb
779 void findSlop(bdescr *bd);
780 void findSlop(bdescr *bd)
784 for (; bd != NULL; bd = bd->link) {
785 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
786 if (slop > (1024/sizeof(W_))) {
787 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
788 bd->start, bd, slop / (1024/sizeof(W_)));
794 genBlocks (generation *gen)
796 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
797 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
798 return gen->n_blocks + gen->n_old_blocks +
799 countAllocdBlocks(gen->large_objects);
803 memInventory (rtsBool show)
806 lnat gen_blocks[RtsFlags.GcFlags.generations];
807 lnat nursery_blocks, retainer_blocks,
808 arena_blocks, exec_blocks;
809 lnat live_blocks = 0, free_blocks = 0;
812 // count the blocks we current have
814 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
816 for (i = 0; i < n_capabilities; i++) {
817 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
819 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
820 gen_blocks[g] += genBlocks(&generations[g]);
824 for (i = 0; i < n_capabilities; i++) {
825 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
826 nursery_blocks += nurseries[i].n_blocks;
831 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
832 retainer_blocks = retainerStackBlocks();
836 // count the blocks allocated by the arena allocator
837 arena_blocks = arenaBlocks();
839 // count the blocks containing executable memory
840 exec_blocks = countAllocdBlocks(exec_block);
842 /* count the blocks on the free list */
843 free_blocks = countFreeList();
846 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
847 live_blocks += gen_blocks[g];
849 live_blocks += nursery_blocks +
850 + retainer_blocks + arena_blocks + exec_blocks;
852 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
854 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
859 debugBelch("Memory leak detected:\n");
861 debugBelch("Memory inventory:\n");
863 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
864 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
865 gen_blocks[g], MB(gen_blocks[g]));
867 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
868 nursery_blocks, MB(nursery_blocks));
869 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
870 retainer_blocks, MB(retainer_blocks));
871 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
872 arena_blocks, MB(arena_blocks));
873 debugBelch(" exec : %5lu blocks (%lu MB)\n",
874 exec_blocks, MB(exec_blocks));
875 debugBelch(" free : %5lu blocks (%lu MB)\n",
876 free_blocks, MB(free_blocks));
877 debugBelch(" total : %5lu blocks (%lu MB)\n",
878 live_blocks + free_blocks, MB(live_blocks+free_blocks));
880 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
881 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
889 ASSERT(n_alloc_blocks == live_blocks);