1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
30 /* -----------------------------------------------------------------------------
32 -------------------------------------------------------------------------- */
34 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
35 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
36 static void checkClosureShallow ( StgClosure * );
38 /* -----------------------------------------------------------------------------
40 -------------------------------------------------------------------------- */
43 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
49 for(i = 0; i < size; i++, bitmap >>= 1 ) {
50 if ((bitmap & 1) == 0) {
51 checkClosureShallow((StgClosure *)payload[i]);
57 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
63 for (bmp=0; i < size; bmp++) {
64 StgWord bitmap = large_bitmap->bitmap[bmp];
66 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
67 if ((bitmap & 1) == 0) {
68 checkClosureShallow((StgClosure *)payload[i]);
75 * check that it looks like a valid closure - without checking its payload
76 * used to avoid recursion between checking PAPs and checking stack
81 checkClosureShallow( StgClosure* p )
86 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
88 /* Is it a static closure? */
89 if (!HEAP_ALLOCED(q)) {
90 ASSERT(closure_STATIC(q));
92 ASSERT(!closure_STATIC(q));
96 // check an individual stack object
98 checkStackFrame( StgPtr c )
101 const StgRetInfoTable* info;
103 info = get_ret_itbl((StgClosure *)c);
105 /* All activation records have 'bitmap' style layout info. */
106 switch (info->i.type) {
107 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
116 p = (P_)(r->payload);
117 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
118 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
120 // skip over the non-pointers
121 p += RET_DYN_NONPTRS(dyn);
123 // follow the ptr words
124 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
125 checkClosureShallow((StgClosure *)*p);
129 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
130 RET_DYN_NONPTR_REGS_SIZE +
131 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
135 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
136 case ATOMICALLY_FRAME:
137 case CATCH_RETRY_FRAME:
138 case CATCH_STM_FRAME:
140 // small bitmap cases (<= 32 entries)
143 size = BITMAP_SIZE(info->i.layout.bitmap);
144 checkSmallBitmap((StgPtr)c + 1,
145 BITMAP_BITS(info->i.layout.bitmap), size);
151 bco = (StgBCO *)*(c+1);
152 size = BCO_BITMAP_SIZE(bco);
153 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
157 case RET_BIG: // large bitmap (> 32 entries)
158 size = GET_LARGE_BITMAP(&info->i)->size;
159 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
164 StgFunInfoTable *fun_info;
167 ret_fun = (StgRetFun *)c;
168 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
169 size = ret_fun->size;
170 switch (fun_info->f.fun_type) {
172 checkSmallBitmap((StgPtr)ret_fun->payload,
173 BITMAP_BITS(fun_info->f.b.bitmap), size);
176 checkLargeBitmap((StgPtr)ret_fun->payload,
177 GET_FUN_LARGE_BITMAP(fun_info), size);
180 checkSmallBitmap((StgPtr)ret_fun->payload,
181 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
185 return sizeofW(StgRetFun) + size;
189 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
193 // check sections of stack between update frames
195 checkStackChunk( StgPtr sp, StgPtr stack_end )
200 while (p < stack_end) {
201 p += checkStackFrame( p );
203 // ASSERT( p == stack_end ); -- HWL
207 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
211 StgFunInfoTable *fun_info;
213 fun = UNTAG_CLOSURE(tagged_fun);
214 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
215 fun_info = get_fun_itbl(fun);
217 p = (StgClosure *)payload;
218 switch (fun_info->f.fun_type) {
220 checkSmallBitmap( (StgPtr)payload,
221 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
224 checkLargeBitmap( (StgPtr)payload,
225 GET_FUN_LARGE_BITMAP(fun_info),
229 checkLargeBitmap( (StgPtr)payload,
234 checkSmallBitmap( (StgPtr)payload,
235 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
240 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
241 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
246 checkClosure( StgClosure* p )
248 const StgInfoTable *info;
250 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
252 p = UNTAG_CLOSURE(p);
253 /* Is it a static closure (i.e. in the data segment)? */
254 if (!HEAP_ALLOCED(p)) {
255 ASSERT(closure_STATIC(p));
257 ASSERT(!closure_STATIC(p));
260 info = p->header.info;
262 if (IS_FORWARDING_PTR(info)) {
263 barf("checkClosure: found EVACUATED closure %d", info->type);
265 info = INFO_PTR_TO_STRUCT(info);
267 switch (info->type) {
272 StgMVar *mvar = (StgMVar *)p;
273 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
274 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
275 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
276 return sizeofW(StgMVar);
287 for (i = 0; i < info->layout.payload.ptrs; i++) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
290 return thunk_sizeW_fromITBL(info);
312 case CONSTR_NOCAF_STATIC:
317 for (i = 0; i < info->layout.payload.ptrs; i++) {
318 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
320 return sizeW_fromITBL(info);
325 StgBlockingQueue *bq = (StgBlockingQueue *)p;
327 // NO: the BH might have been updated now
328 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
329 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
331 ASSERT(get_itbl(bq->owner)->type == TSO);
332 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
333 || get_itbl(bq->queue)->type == TSO);
334 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
335 get_itbl(bq->link)->type == IND ||
336 get_itbl(bq->link)->type == BLOCKING_QUEUE);
338 return sizeofW(StgBlockingQueue);
342 StgBCO *bco = (StgBCO *)p;
343 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
344 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
345 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
346 return bco_sizeW(bco);
349 case IND_STATIC: /* (1, 0) closure */
350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
351 return sizeW_fromITBL(info);
354 /* deal with these specially - the info table isn't
355 * representative of the actual layout.
357 { StgWeak *w = (StgWeak *)p;
358 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
359 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
360 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
364 return sizeW_fromITBL(info);
368 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
369 return THUNK_SELECTOR_sizeW();
373 /* we don't expect to see any of these after GC
374 * but they might appear during execution
376 StgInd *ind = (StgInd *)p;
377 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
378 return sizeofW(StgInd);
388 case ATOMICALLY_FRAME:
389 case CATCH_RETRY_FRAME:
390 case CATCH_STM_FRAME:
391 barf("checkClosure: stack frame");
395 StgAP* ap = (StgAP *)p;
396 checkPAP (ap->fun, ap->payload, ap->n_args);
402 StgPAP* pap = (StgPAP *)p;
403 checkPAP (pap->fun, pap->payload, pap->n_args);
404 return pap_sizeW(pap);
409 StgAP_STACK *ap = (StgAP_STACK *)p;
410 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
411 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
412 return ap_stack_sizeW(ap);
416 return arr_words_sizeW((StgArrWords *)p);
418 case MUT_ARR_PTRS_CLEAN:
419 case MUT_ARR_PTRS_DIRTY:
420 case MUT_ARR_PTRS_FROZEN:
421 case MUT_ARR_PTRS_FROZEN0:
423 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
425 for (i = 0; i < a->ptrs; i++) {
426 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
428 return mut_arr_ptrs_sizeW(a);
432 checkTSO((StgTSO *)p);
433 return tso_sizeW((StgTSO *)p);
438 StgTRecChunk *tc = (StgTRecChunk *)p;
439 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
440 for (i = 0; i < tc -> next_entry_idx; i ++) {
441 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
442 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
443 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
445 return sizeofW(StgTRecChunk);
449 barf("checkClosure (closure type %d)", info->type);
454 /* -----------------------------------------------------------------------------
457 After garbage collection, the live heap is in a state where we can
458 run through and check that all the pointers point to the right
459 place. This function starts at a given position and sanity-checks
460 all the objects in the remainder of the chain.
461 -------------------------------------------------------------------------- */
464 checkHeap(bdescr *bd)
468 #if defined(THREADED_RTS)
469 // heap sanity checking doesn't work with SMP, because we can't
470 // zero the slop (see Updates.h).
474 for (; bd != NULL; bd = bd->link) {
476 while (p < bd->free) {
477 nat size = checkClosure((StgClosure *)p);
478 /* This is the smallest size of closure that can live in the heap */
479 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
483 while (p < bd->free &&
484 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
490 checkHeapChunk(StgPtr start, StgPtr end)
495 for (p=start; p<end; p+=size) {
496 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
497 size = checkClosure((StgClosure *)p);
498 /* This is the smallest size of closure that can live in the heap. */
499 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
504 checkLargeObjects(bdescr *bd)
507 if (!(bd->flags & BF_PINNED)) {
508 checkClosure((StgClosure *)bd->start);
515 checkTSO(StgTSO *tso)
518 StgPtr stack = tso->stack;
519 StgOffset stack_size = tso->stack_size;
520 StgPtr stack_end = stack + stack_size;
522 if (tso->what_next == ThreadRelocated) {
523 checkTSO(tso->_link);
527 if (tso->what_next == ThreadKilled) {
528 /* The garbage collector doesn't bother following any pointers
529 * from dead threads, so don't check sanity here.
534 ASSERT(tso->_link == END_TSO_QUEUE ||
535 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
536 tso->_link->header.info == &stg_TSO_info);
537 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
538 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
539 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
541 ASSERT(stack <= sp && sp < stack_end);
543 checkStackChunk(sp, stack_end);
547 Check that all TSOs have been evacuated.
548 Optionally also check the sanity of the TSOs.
551 checkGlobalTSOList (rtsBool checkTSOs)
556 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
557 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
558 tso = tso->global_link) {
559 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
560 ASSERT(get_itbl(tso)->type == TSO);
566 // If this TSO is dirty and in an old generation, it better
567 // be on the mutable list.
568 if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) {
569 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
570 tso->flags &= ~TSO_MARKED;
576 /* -----------------------------------------------------------------------------
577 Check mutable list sanity.
578 -------------------------------------------------------------------------- */
581 checkMutableList( bdescr *mut_bd, nat gen )
587 for (bd = mut_bd; bd != NULL; bd = bd->link) {
588 for (q = bd->start; q < bd->free; q++) {
589 p = (StgClosure *)*q;
590 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
591 if (get_itbl(p)->type == TSO) {
592 ((StgTSO *)p)->flags |= TSO_MARKED;
599 checkMutableLists (rtsBool checkTSOs)
603 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
604 checkMutableList(generations[g].mut_list, g);
605 for (i = 0; i < n_capabilities; i++) {
606 checkMutableList(capabilities[i].mut_lists[g], g);
609 checkGlobalTSOList(checkTSOs);
613 Check the static objects list.
616 checkStaticObjects ( StgClosure* static_objects )
618 StgClosure *p = static_objects;
621 while (p != END_OF_STATIC_LIST) {
624 switch (info->type) {
627 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
629 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
630 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
631 p = *IND_STATIC_LINK((StgClosure *)p);
636 p = *THUNK_STATIC_LINK((StgClosure *)p);
640 p = *FUN_STATIC_LINK((StgClosure *)p);
644 p = *STATIC_LINK(info,(StgClosure *)p);
648 barf("checkStaticObjetcs: strange closure %p (%s)",
654 /* Nursery sanity check */
656 checkNurserySanity (nursery *nursery)
662 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
663 ASSERT(bd->u.back == prev);
665 blocks += bd->blocks;
668 ASSERT(blocks == nursery->n_blocks);
672 /* Full heap sanity check. */
674 checkSanity( rtsBool check_heap )
678 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
679 ASSERT(countBlocks(generations[g].blocks)
680 == generations[g].n_blocks);
681 ASSERT(countBlocks(generations[g].large_objects)
682 == generations[g].n_large_blocks);
684 checkHeap(generations[g].blocks);
686 checkLargeObjects(generations[g].large_objects);
689 for (n = 0; n < n_capabilities; n++) {
690 checkNurserySanity(&nurseries[n]);
693 checkFreeListSanity();
695 #if defined(THREADED_RTS)
696 // always check the stacks in threaded mode, because checkHeap()
697 // does nothing in this case.
698 checkMutableLists(rtsTrue);
701 checkMutableLists(rtsFalse);
703 checkMutableLists(rtsTrue);
708 // If memInventory() calculates that we have a memory leak, this
709 // function will try to find the block(s) that are leaking by marking
710 // all the ones that we know about, and search through memory to find
711 // blocks that are not marked. In the debugger this can help to give
712 // us a clue about what kind of block leaked. In the future we might
713 // annotate blocks with their allocation site to give more helpful
716 findMemoryLeak (void)
719 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
720 for (i = 0; i < n_capabilities; i++) {
721 markBlocks(capabilities[i].mut_lists[g]);
723 markBlocks(generations[g].mut_list);
724 markBlocks(generations[g].blocks);
725 markBlocks(generations[g].large_objects);
728 for (i = 0; i < n_capabilities; i++) {
729 markBlocks(nurseries[i].blocks);
734 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
735 // markRetainerBlocks();
739 // count the blocks allocated by the arena allocator
741 // markArenaBlocks();
743 // count the blocks containing executable memory
744 markBlocks(exec_block);
746 reportUnmarkedBlocks();
750 checkRunQueue(Capability *cap)
753 prev = END_TSO_QUEUE;
754 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
755 prev = tso, tso = tso->_link) {
756 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
757 ASSERT(tso->block_info.prev == prev);
759 ASSERT(cap->run_queue_tl == prev);
762 /* -----------------------------------------------------------------------------
763 Memory leak detection
765 memInventory() checks for memory leaks by counting up all the
766 blocks we know about and comparing that to the number of blocks
767 allegedly floating around in the system.
768 -------------------------------------------------------------------------- */
770 // Useful for finding partially full blocks in gdb
771 void findSlop(bdescr *bd);
772 void findSlop(bdescr *bd)
776 for (; bd != NULL; bd = bd->link) {
777 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
778 if (slop > (1024/sizeof(W_))) {
779 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
780 bd->start, bd, slop / (1024/sizeof(W_)));
786 genBlocks (generation *gen)
788 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
789 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
790 return gen->n_blocks + gen->n_old_blocks +
791 countAllocdBlocks(gen->large_objects);
795 memInventory (rtsBool show)
798 lnat gen_blocks[RtsFlags.GcFlags.generations];
799 lnat nursery_blocks, retainer_blocks,
800 arena_blocks, exec_blocks;
801 lnat live_blocks = 0, free_blocks = 0;
804 // count the blocks we current have
806 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
808 for (i = 0; i < n_capabilities; i++) {
809 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
811 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
812 gen_blocks[g] += genBlocks(&generations[g]);
816 for (i = 0; i < n_capabilities; i++) {
817 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
818 nursery_blocks += nurseries[i].n_blocks;
823 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
824 retainer_blocks = retainerStackBlocks();
828 // count the blocks allocated by the arena allocator
829 arena_blocks = arenaBlocks();
831 // count the blocks containing executable memory
832 exec_blocks = countAllocdBlocks(exec_block);
834 /* count the blocks on the free list */
835 free_blocks = countFreeList();
838 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
839 live_blocks += gen_blocks[g];
841 live_blocks += nursery_blocks +
842 + retainer_blocks + arena_blocks + exec_blocks;
844 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
846 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
851 debugBelch("Memory leak detected:\n");
853 debugBelch("Memory inventory:\n");
855 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
856 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
857 gen_blocks[g], MB(gen_blocks[g]));
859 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
860 nursery_blocks, MB(nursery_blocks));
861 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
862 retainer_blocks, MB(retainer_blocks));
863 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
864 arena_blocks, MB(arena_blocks));
865 debugBelch(" exec : %5lu blocks (%lu MB)\n",
866 exec_blocks, MB(exec_blocks));
867 debugBelch(" free : %5lu blocks (%lu MB)\n",
868 free_blocks, MB(free_blocks));
869 debugBelch(" total : %5lu blocks (%lu MB)\n",
870 live_blocks + free_blocks, MB(live_blocks+free_blocks));
872 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
873 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
881 ASSERT(n_alloc_blocks == live_blocks);