1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
29 #include "RetainerProfile.h"
31 /* -----------------------------------------------------------------------------
33 -------------------------------------------------------------------------- */
35 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
36 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
37 static void checkClosureShallow ( StgClosure * );
39 /* -----------------------------------------------------------------------------
41 -------------------------------------------------------------------------- */
44 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
50 for(i = 0; i < size; i++, bitmap >>= 1 ) {
51 if ((bitmap & 1) == 0) {
52 checkClosureShallow((StgClosure *)payload[i]);
58 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
64 for (bmp=0; i < size; bmp++) {
65 StgWord bitmap = large_bitmap->bitmap[bmp];
67 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
68 if ((bitmap & 1) == 0) {
69 checkClosureShallow((StgClosure *)payload[i]);
76 * check that it looks like a valid closure - without checking its payload
77 * used to avoid recursion between checking PAPs and checking stack
82 checkClosureShallow( StgClosure* p )
87 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
89 /* Is it a static closure? */
90 if (!HEAP_ALLOCED(q)) {
91 ASSERT(closure_STATIC(q));
93 ASSERT(!closure_STATIC(q));
97 // check an individual stack object
99 checkStackFrame( StgPtr c )
102 const StgRetInfoTable* info;
104 info = get_ret_itbl((StgClosure *)c);
106 /* All activation records have 'bitmap' style layout info. */
107 switch (info->i.type) {
108 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
117 p = (P_)(r->payload);
118 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
119 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
121 // skip over the non-pointers
122 p += RET_DYN_NONPTRS(dyn);
124 // follow the ptr words
125 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
126 checkClosureShallow((StgClosure *)*p);
130 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
131 RET_DYN_NONPTR_REGS_SIZE +
132 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
136 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
137 case ATOMICALLY_FRAME:
138 case CATCH_RETRY_FRAME:
139 case CATCH_STM_FRAME:
141 // small bitmap cases (<= 32 entries)
144 size = BITMAP_SIZE(info->i.layout.bitmap);
145 checkSmallBitmap((StgPtr)c + 1,
146 BITMAP_BITS(info->i.layout.bitmap), size);
152 bco = (StgBCO *)*(c+1);
153 size = BCO_BITMAP_SIZE(bco);
154 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
158 case RET_BIG: // large bitmap (> 32 entries)
159 size = GET_LARGE_BITMAP(&info->i)->size;
160 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
165 StgFunInfoTable *fun_info;
168 ret_fun = (StgRetFun *)c;
169 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
170 size = ret_fun->size;
171 switch (fun_info->f.fun_type) {
173 checkSmallBitmap((StgPtr)ret_fun->payload,
174 BITMAP_BITS(fun_info->f.b.bitmap), size);
177 checkLargeBitmap((StgPtr)ret_fun->payload,
178 GET_FUN_LARGE_BITMAP(fun_info), size);
181 checkSmallBitmap((StgPtr)ret_fun->payload,
182 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
186 return sizeofW(StgRetFun) + size;
190 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
194 // check sections of stack between update frames
196 checkStackChunk( StgPtr sp, StgPtr stack_end )
201 while (p < stack_end) {
202 p += checkStackFrame( p );
204 // ASSERT( p == stack_end ); -- HWL
208 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
212 StgFunInfoTable *fun_info;
214 fun = UNTAG_CLOSURE(tagged_fun);
215 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
216 fun_info = get_fun_itbl(fun);
218 p = (StgClosure *)payload;
219 switch (fun_info->f.fun_type) {
221 checkSmallBitmap( (StgPtr)payload,
222 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
225 checkLargeBitmap( (StgPtr)payload,
226 GET_FUN_LARGE_BITMAP(fun_info),
230 checkLargeBitmap( (StgPtr)payload,
235 checkSmallBitmap( (StgPtr)payload,
236 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
241 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
242 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
247 checkClosure( StgClosure* p )
249 const StgInfoTable *info;
251 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
253 p = UNTAG_CLOSURE(p);
254 /* Is it a static closure (i.e. in the data segment)? */
255 if (!HEAP_ALLOCED(p)) {
256 ASSERT(closure_STATIC(p));
258 ASSERT(!closure_STATIC(p));
261 info = p->header.info;
263 if (IS_FORWARDING_PTR(info)) {
264 barf("checkClosure: found EVACUATED closure %d", info->type);
266 info = INFO_PTR_TO_STRUCT(info);
268 switch (info->type) {
273 StgMVar *mvar = (StgMVar *)p;
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
276 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
277 return sizeofW(StgMVar);
288 for (i = 0; i < info->layout.payload.ptrs; i++) {
289 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
291 return thunk_sizeW_fromITBL(info);
313 case CONSTR_NOCAF_STATIC:
318 for (i = 0; i < info->layout.payload.ptrs; i++) {
319 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
321 return sizeW_fromITBL(info);
326 StgBlockingQueue *bq = (StgBlockingQueue *)p;
328 // NO: the BH might have been updated now
329 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
330 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
332 ASSERT(get_itbl(bq->owner)->type == TSO);
333 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
334 || get_itbl(bq->queue)->type == TSO);
335 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
336 get_itbl(bq->link)->type == IND ||
337 get_itbl(bq->link)->type == BLOCKING_QUEUE);
339 return sizeofW(StgBlockingQueue);
343 StgBCO *bco = (StgBCO *)p;
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
347 return bco_sizeW(bco);
350 case IND_STATIC: /* (1, 0) closure */
351 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
352 return sizeW_fromITBL(info);
355 /* deal with these specially - the info table isn't
356 * representative of the actual layout.
358 { StgWeak *w = (StgWeak *)p;
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
361 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
365 return sizeW_fromITBL(info);
369 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
370 return THUNK_SELECTOR_sizeW();
374 /* we don't expect to see any of these after GC
375 * but they might appear during execution
377 StgInd *ind = (StgInd *)p;
378 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
379 return sizeofW(StgInd);
389 case ATOMICALLY_FRAME:
390 case CATCH_RETRY_FRAME:
391 case CATCH_STM_FRAME:
392 barf("checkClosure: stack frame");
396 StgAP* ap = (StgAP *)p;
397 checkPAP (ap->fun, ap->payload, ap->n_args);
403 StgPAP* pap = (StgPAP *)p;
404 checkPAP (pap->fun, pap->payload, pap->n_args);
405 return pap_sizeW(pap);
410 StgAP_STACK *ap = (StgAP_STACK *)p;
411 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
412 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
413 return ap_stack_sizeW(ap);
417 return arr_words_sizeW((StgArrWords *)p);
419 case MUT_ARR_PTRS_CLEAN:
420 case MUT_ARR_PTRS_DIRTY:
421 case MUT_ARR_PTRS_FROZEN:
422 case MUT_ARR_PTRS_FROZEN0:
424 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
426 for (i = 0; i < a->ptrs; i++) {
427 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
429 return mut_arr_ptrs_sizeW(a);
433 checkTSO((StgTSO *)p);
434 return tso_sizeW((StgTSO *)p);
439 StgTRecChunk *tc = (StgTRecChunk *)p;
440 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
441 for (i = 0; i < tc -> next_entry_idx; i ++) {
442 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
444 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
446 return sizeofW(StgTRecChunk);
450 barf("checkClosure (closure type %d)", info->type);
455 /* -----------------------------------------------------------------------------
458 After garbage collection, the live heap is in a state where we can
459 run through and check that all the pointers point to the right
460 place. This function starts at a given position and sanity-checks
461 all the objects in the remainder of the chain.
462 -------------------------------------------------------------------------- */
465 checkHeap(bdescr *bd)
469 #if defined(THREADED_RTS)
470 // heap sanity checking doesn't work with SMP, because we can't
471 // zero the slop (see Updates.h).
475 for (; bd != NULL; bd = bd->link) {
476 if(!(bd->flags & BF_SWEPT)) {
478 while (p < bd->free) {
479 nat size = checkClosure((StgClosure *)p);
480 /* This is the smallest size of closure that can live in the heap */
481 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
485 while (p < bd->free &&
486 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
493 checkHeapChunk(StgPtr start, StgPtr end)
498 for (p=start; p<end; p+=size) {
499 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
500 size = checkClosure((StgClosure *)p);
501 /* This is the smallest size of closure that can live in the heap. */
502 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
507 checkLargeObjects(bdescr *bd)
510 if (!(bd->flags & BF_PINNED)) {
511 checkClosure((StgClosure *)bd->start);
518 checkTSO(StgTSO *tso)
521 StgPtr stack = tso->stack;
522 StgOffset stack_size = tso->stack_size;
523 StgPtr stack_end = stack + stack_size;
525 if (tso->what_next == ThreadRelocated) {
526 checkTSO(tso->_link);
530 if (tso->what_next == ThreadKilled) {
531 /* The garbage collector doesn't bother following any pointers
532 * from dead threads, so don't check sanity here.
537 ASSERT(tso->_link == END_TSO_QUEUE ||
538 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
539 tso->_link->header.info == &stg_TSO_info);
540 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
541 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
542 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
544 ASSERT(stack <= sp && sp < stack_end);
546 checkStackChunk(sp, stack_end);
550 Check that all TSOs have been evacuated.
551 Optionally also check the sanity of the TSOs.
554 checkGlobalTSOList (rtsBool checkTSOs)
559 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
560 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
561 tso = tso->global_link) {
562 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
563 ASSERT(get_itbl(tso)->type == TSO);
569 // If this TSO is dirty and in an old generation, it better
570 // be on the mutable list.
571 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
572 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
573 tso->flags &= ~TSO_MARKED;
579 /* -----------------------------------------------------------------------------
580 Check mutable list sanity.
581 -------------------------------------------------------------------------- */
584 checkMutableList( bdescr *mut_bd, nat gen )
590 for (bd = mut_bd; bd != NULL; bd = bd->link) {
591 for (q = bd->start; q < bd->free; q++) {
592 p = (StgClosure *)*q;
593 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
594 if (get_itbl(p)->type == TSO) {
595 ((StgTSO *)p)->flags |= TSO_MARKED;
602 checkMutableLists (rtsBool checkTSOs)
606 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
607 checkMutableList(generations[g].mut_list, g);
608 for (i = 0; i < n_capabilities; i++) {
609 checkMutableList(capabilities[i].mut_lists[g], g);
612 checkGlobalTSOList(checkTSOs);
616 Check the static objects list.
619 checkStaticObjects ( StgClosure* static_objects )
621 StgClosure *p = static_objects;
624 while (p != END_OF_STATIC_LIST) {
627 switch (info->type) {
630 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
632 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
633 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
634 p = *IND_STATIC_LINK((StgClosure *)p);
639 p = *THUNK_STATIC_LINK((StgClosure *)p);
643 p = *FUN_STATIC_LINK((StgClosure *)p);
647 p = *STATIC_LINK(info,(StgClosure *)p);
651 barf("checkStaticObjetcs: strange closure %p (%s)",
657 /* Nursery sanity check */
659 checkNurserySanity (nursery *nursery)
665 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
666 ASSERT(bd->u.back == prev);
668 blocks += bd->blocks;
671 ASSERT(blocks == nursery->n_blocks);
675 /* Full heap sanity check. */
677 checkSanity( rtsBool check_heap )
681 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
682 ASSERT(countBlocks(generations[g].blocks)
683 == generations[g].n_blocks);
684 ASSERT(countBlocks(generations[g].large_objects)
685 == generations[g].n_large_blocks);
687 checkHeap(generations[g].blocks);
689 checkLargeObjects(generations[g].large_objects);
692 for (n = 0; n < n_capabilities; n++) {
693 checkNurserySanity(&nurseries[n]);
696 checkFreeListSanity();
698 #if defined(THREADED_RTS)
699 // always check the stacks in threaded mode, because checkHeap()
700 // does nothing in this case.
701 checkMutableLists(rtsTrue);
704 checkMutableLists(rtsFalse);
706 checkMutableLists(rtsTrue);
711 // If memInventory() calculates that we have a memory leak, this
712 // function will try to find the block(s) that are leaking by marking
713 // all the ones that we know about, and search through memory to find
714 // blocks that are not marked. In the debugger this can help to give
715 // us a clue about what kind of block leaked. In the future we might
716 // annotate blocks with their allocation site to give more helpful
719 findMemoryLeak (void)
722 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
723 for (i = 0; i < n_capabilities; i++) {
724 markBlocks(capabilities[i].mut_lists[g]);
726 markBlocks(generations[g].mut_list);
727 markBlocks(generations[g].blocks);
728 markBlocks(generations[g].large_objects);
731 for (i = 0; i < n_capabilities; i++) {
732 markBlocks(nurseries[i].blocks);
737 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
738 // markRetainerBlocks();
742 // count the blocks allocated by the arena allocator
744 // markArenaBlocks();
746 // count the blocks containing executable memory
747 markBlocks(exec_block);
749 reportUnmarkedBlocks();
753 checkRunQueue(Capability *cap)
756 prev = END_TSO_QUEUE;
757 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
758 prev = tso, tso = tso->_link) {
759 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
760 ASSERT(tso->block_info.prev == prev);
762 ASSERT(cap->run_queue_tl == prev);
765 /* -----------------------------------------------------------------------------
766 Memory leak detection
768 memInventory() checks for memory leaks by counting up all the
769 blocks we know about and comparing that to the number of blocks
770 allegedly floating around in the system.
771 -------------------------------------------------------------------------- */
773 // Useful for finding partially full blocks in gdb
774 void findSlop(bdescr *bd);
775 void findSlop(bdescr *bd)
779 for (; bd != NULL; bd = bd->link) {
780 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
781 if (slop > (1024/sizeof(W_))) {
782 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
783 bd->start, bd, slop / (1024/sizeof(W_)));
789 genBlocks (generation *gen)
791 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
792 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
793 return gen->n_blocks + gen->n_old_blocks +
794 countAllocdBlocks(gen->large_objects);
798 memInventory (rtsBool show)
801 lnat gen_blocks[RtsFlags.GcFlags.generations];
802 lnat nursery_blocks, retainer_blocks,
803 arena_blocks, exec_blocks;
804 lnat live_blocks = 0, free_blocks = 0;
807 // count the blocks we current have
809 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
811 for (i = 0; i < n_capabilities; i++) {
812 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
814 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
815 gen_blocks[g] += genBlocks(&generations[g]);
819 for (i = 0; i < n_capabilities; i++) {
820 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
821 nursery_blocks += nurseries[i].n_blocks;
826 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
827 retainer_blocks = retainerStackBlocks();
831 // count the blocks allocated by the arena allocator
832 arena_blocks = arenaBlocks();
834 // count the blocks containing executable memory
835 exec_blocks = countAllocdBlocks(exec_block);
837 /* count the blocks on the free list */
838 free_blocks = countFreeList();
841 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
842 live_blocks += gen_blocks[g];
844 live_blocks += nursery_blocks +
845 + retainer_blocks + arena_blocks + exec_blocks;
847 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
849 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
854 debugBelch("Memory leak detected:\n");
856 debugBelch("Memory inventory:\n");
858 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
859 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
860 gen_blocks[g], MB(gen_blocks[g]));
862 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
863 nursery_blocks, MB(nursery_blocks));
864 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
865 retainer_blocks, MB(retainer_blocks));
866 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
867 arena_blocks, MB(arena_blocks));
868 debugBelch(" exec : %5lu blocks (%lu MB)\n",
869 exec_blocks, MB(exec_blocks));
870 debugBelch(" free : %5lu blocks (%lu MB)\n",
871 free_blocks, MB(free_blocks));
872 debugBelch(" total : %5lu blocks (%lu MB)\n",
873 live_blocks + free_blocks, MB(live_blocks+free_blocks));
875 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
876 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
884 ASSERT(n_alloc_blocks == live_blocks);