1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
260 info = p->header.info;
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
265 info = INFO_PTR_TO_STRUCT(info);
267 switch (info->type) {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
290 return thunk_sizeW_fromITBL(info);
307 case IND_OLDGEN_PERM:
314 case CONSTR_NOCAF_STATIC:
319 for (i = 0; i < info->layout.payload.ptrs; i++) {
320 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
322 return sizeW_fromITBL(info);
326 StgBCO *bco = (StgBCO *)p;
327 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
328 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
330 return bco_sizeW(bco);
333 case IND_STATIC: /* (1, 0) closure */
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
335 return sizeW_fromITBL(info);
338 /* deal with these specially - the info table isn't
339 * representative of the actual layout.
341 { StgWeak *w = (StgWeak *)p;
342 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
348 return sizeW_fromITBL(info);
352 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
353 return THUNK_SELECTOR_sizeW();
357 /* we don't expect to see any of these after GC
358 * but they might appear during execution
360 StgInd *ind = (StgInd *)p;
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
362 return sizeofW(StgInd);
372 case ATOMICALLY_FRAME:
373 case CATCH_RETRY_FRAME:
374 case CATCH_STM_FRAME:
375 barf("checkClosure: stack frame");
379 StgAP* ap = (StgAP *)p;
380 checkPAP (ap->fun, ap->payload, ap->n_args);
386 StgPAP* pap = (StgPAP *)p;
387 checkPAP (pap->fun, pap->payload, pap->n_args);
388 return pap_sizeW(pap);
393 StgAP_STACK *ap = (StgAP_STACK *)p;
394 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
395 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
396 return ap_stack_sizeW(ap);
400 return arr_words_sizeW((StgArrWords *)p);
402 case MUT_ARR_PTRS_CLEAN:
403 case MUT_ARR_PTRS_DIRTY:
404 case MUT_ARR_PTRS_FROZEN:
405 case MUT_ARR_PTRS_FROZEN0:
407 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
409 for (i = 0; i < a->ptrs; i++) {
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
412 return mut_arr_ptrs_sizeW(a);
416 checkTSO((StgTSO *)p);
417 return tso_sizeW((StgTSO *)p);
419 case TVAR_WATCH_QUEUE:
421 StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
423 ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
424 return sizeofW(StgTVarWatchQueue);
427 case INVARIANT_CHECK_QUEUE:
429 StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
430 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
431 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
432 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
433 return sizeofW(StgInvariantCheckQueue);
436 case ATOMIC_INVARIANT:
438 StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
439 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
441 return sizeofW(StgAtomicInvariant);
446 StgTVar *tv = (StgTVar *)p;
447 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
449 return sizeofW(StgTVar);
455 StgTRecChunk *tc = (StgTRecChunk *)p;
456 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
457 for (i = 0; i < tc -> next_entry_idx; i ++) {
458 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
459 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
460 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
462 return sizeofW(StgTRecChunk);
467 StgTRecHeader *trec = (StgTRecHeader *)p;
468 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
469 ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
470 return sizeofW(StgTRecHeader);
474 barf("checkClosure (closure type %d)", info->type);
479 /* -----------------------------------------------------------------------------
482 After garbage collection, the live heap is in a state where we can
483 run through and check that all the pointers point to the right
484 place. This function starts at a given position and sanity-checks
485 all the objects in the remainder of the chain.
486 -------------------------------------------------------------------------- */
489 checkHeap(bdescr *bd)
493 #if defined(THREADED_RTS)
494 // heap sanity checking doesn't work with SMP, because we can't
495 // zero the slop (see Updates.h).
499 for (; bd != NULL; bd = bd->link) {
501 while (p < bd->free) {
502 nat size = checkClosure((StgClosure *)p);
503 /* This is the smallest size of closure that can live in the heap */
504 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
508 while (p < bd->free &&
509 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
515 checkHeapChunk(StgPtr start, StgPtr end)
520 for (p=start; p<end; p+=size) {
521 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
522 size = checkClosure((StgClosure *)p);
523 /* This is the smallest size of closure that can live in the heap. */
524 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
529 checkLargeObjects(bdescr *bd)
532 if (!(bd->flags & BF_PINNED)) {
533 checkClosure((StgClosure *)bd->start);
540 checkTSO(StgTSO *tso)
543 StgPtr stack = tso->stack;
544 StgOffset stack_size = tso->stack_size;
545 StgPtr stack_end = stack + stack_size;
547 if (tso->what_next == ThreadRelocated) {
548 checkTSO(tso->_link);
552 if (tso->what_next == ThreadKilled) {
553 /* The garbage collector doesn't bother following any pointers
554 * from dead threads, so don't check sanity here.
559 ASSERT(stack <= sp && sp < stack_end);
561 checkStackChunk(sp, stack_end);
565 Check that all TSOs have been evacuated.
566 Optionally also check the sanity of the TSOs.
569 checkGlobalTSOList (rtsBool checkTSOs)
574 for (s = 0; s < total_steps; s++) {
575 for (tso=all_steps[s].threads; tso != END_TSO_QUEUE;
576 tso = tso->global_link) {
577 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
578 ASSERT(get_itbl(tso)->type == TSO);
582 while (tso->what_next == ThreadRelocated) {
586 // If this TSO is dirty and in an old generation, it better
587 // be on the mutable list.
588 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
589 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
590 tso->flags &= ~TSO_MARKED;
596 /* -----------------------------------------------------------------------------
597 Check mutable list sanity.
598 -------------------------------------------------------------------------- */
601 checkMutableList( bdescr *mut_bd, nat gen )
607 for (bd = mut_bd; bd != NULL; bd = bd->link) {
608 for (q = bd->start; q < bd->free; q++) {
609 p = (StgClosure *)*q;
610 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
611 if (get_itbl(p)->type == TSO) {
612 ((StgTSO *)p)->flags |= TSO_MARKED;
619 checkMutableLists (rtsBool checkTSOs)
623 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
624 checkMutableList(generations[g].mut_list, g);
625 for (i = 0; i < n_capabilities; i++) {
626 checkMutableList(capabilities[i].mut_lists[g], g);
629 checkGlobalTSOList(checkTSOs);
633 Check the static objects list.
636 checkStaticObjects ( StgClosure* static_objects )
638 StgClosure *p = static_objects;
641 while (p != END_OF_STATIC_LIST) {
644 switch (info->type) {
647 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
649 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
650 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
651 p = *IND_STATIC_LINK((StgClosure *)p);
656 p = *THUNK_STATIC_LINK((StgClosure *)p);
660 p = *FUN_STATIC_LINK((StgClosure *)p);
664 p = *STATIC_LINK(info,(StgClosure *)p);
668 barf("checkStaticObjetcs: strange closure %p (%s)",
674 /* Nursery sanity check */
676 checkNurserySanity( step *stp )
682 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
683 ASSERT(bd->u.back == prev);
685 blocks += bd->blocks;
688 ASSERT(blocks == stp->n_blocks);
689 ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks);
693 /* Full heap sanity check. */
695 checkSanity( rtsBool check_heap )
699 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
700 for (s = 0; s < generations[g].n_steps; s++) {
701 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
704 ASSERT(countBlocks(generations[g].steps[s].blocks)
705 == generations[g].steps[s].n_blocks);
706 ASSERT(countBlocks(generations[g].steps[s].large_objects)
707 == generations[g].steps[s].n_large_blocks);
709 checkHeap(generations[g].steps[s].blocks);
711 checkLargeObjects(generations[g].steps[s].large_objects);
715 for (s = 0; s < n_capabilities; s++) {
716 checkNurserySanity(&nurseries[s]);
719 checkFreeListSanity();
721 #if defined(THREADED_RTS)
722 // always check the stacks in threaded mode, because checkHeap()
723 // does nothing in this case.
724 checkMutableLists(rtsTrue);
727 checkMutableLists(rtsFalse);
729 checkMutableLists(rtsTrue);
734 // If memInventory() calculates that we have a memory leak, this
735 // function will try to find the block(s) that are leaking by marking
736 // all the ones that we know about, and search through memory to find
737 // blocks that are not marked. In the debugger this can help to give
738 // us a clue about what kind of block leaked. In the future we might
739 // annotate blocks with their allocation site to give more helpful
742 findMemoryLeak (void)
745 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
746 for (i = 0; i < n_capabilities; i++) {
747 markBlocks(capabilities[i].mut_lists[g]);
749 markBlocks(generations[g].mut_list);
750 for (s = 0; s < generations[g].n_steps; s++) {
751 markBlocks(generations[g].steps[s].blocks);
752 markBlocks(generations[g].steps[s].large_objects);
756 for (i = 0; i < n_capabilities; i++) {
757 markBlocks(nurseries[i].blocks);
758 markBlocks(nurseries[i].large_objects);
763 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
764 // markRetainerBlocks();
768 // count the blocks allocated by the arena allocator
770 // markArenaBlocks();
772 // count the blocks containing executable memory
773 markBlocks(exec_block);
775 reportUnmarkedBlocks();
779 /* -----------------------------------------------------------------------------
780 Memory leak detection
782 memInventory() checks for memory leaks by counting up all the
783 blocks we know about and comparing that to the number of blocks
784 allegedly floating around in the system.
785 -------------------------------------------------------------------------- */
787 // Useful for finding partially full blocks in gdb
788 void findSlop(bdescr *bd);
789 void findSlop(bdescr *bd)
793 for (; bd != NULL; bd = bd->link) {
794 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
795 if (slop > (1024/sizeof(W_))) {
796 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
797 bd->start, bd, slop / (1024/sizeof(W_)));
803 stepBlocks (step *stp)
805 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
806 ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks);
807 return stp->n_blocks + stp->n_old_blocks +
808 countAllocdBlocks(stp->large_objects);
812 memInventory (rtsBool show)
816 lnat gen_blocks[RtsFlags.GcFlags.generations];
817 lnat nursery_blocks, retainer_blocks,
818 arena_blocks, exec_blocks;
819 lnat live_blocks = 0, free_blocks = 0;
822 // count the blocks we current have
824 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
826 for (i = 0; i < n_capabilities; i++) {
827 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
829 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
830 for (s = 0; s < generations[g].n_steps; s++) {
831 stp = &generations[g].steps[s];
832 gen_blocks[g] += stepBlocks(stp);
837 for (i = 0; i < n_capabilities; i++) {
838 nursery_blocks += stepBlocks(&nurseries[i]);
843 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
844 retainer_blocks = retainerStackBlocks();
848 // count the blocks allocated by the arena allocator
849 arena_blocks = arenaBlocks();
851 // count the blocks containing executable memory
852 exec_blocks = countAllocdBlocks(exec_block);
854 /* count the blocks on the free list */
855 free_blocks = countFreeList();
858 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
859 live_blocks += gen_blocks[g];
861 live_blocks += nursery_blocks +
862 + retainer_blocks + arena_blocks + exec_blocks;
864 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
866 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
871 debugBelch("Memory leak detected:\n");
873 debugBelch("Memory inventory:\n");
875 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
876 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
877 gen_blocks[g], MB(gen_blocks[g]));
879 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
880 nursery_blocks, MB(nursery_blocks));
881 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
882 retainer_blocks, MB(retainer_blocks));
883 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
884 arena_blocks, MB(arena_blocks));
885 debugBelch(" exec : %5lu blocks (%lu MB)\n",
886 exec_blocks, MB(exec_blocks));
887 debugBelch(" free : %5lu blocks (%lu MB)\n",
888 free_blocks, MB(free_blocks));
889 debugBelch(" total : %5lu blocks (%lu MB)\n",
890 live_blocks + free_blocks, MB(live_blocks+free_blocks));
892 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
893 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
901 ASSERT(n_alloc_blocks == live_blocks);