1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Sanity checking code for the heap and stack.
7 * Used when debugging: check that everything reasonable.
9 * - All things that are supposed to be pointers look like pointers.
11 * - Objects in text space are marked as static closures, those
12 * in the heap are dynamic.
14 * ---------------------------------------------------------------------------*/
16 #include "PosixSource.h"
19 #ifdef DEBUG /* whole file */
22 #include "sm/Storage.h"
23 #include "sm/BlockAlloc.h"
30 #include "RetainerProfile.h"
32 /* -----------------------------------------------------------------------------
34 -------------------------------------------------------------------------- */
36 static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat );
37 static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat );
38 static void checkClosureShallow ( StgClosure * );
39 static void checkSTACK (StgStack *stack);
41 /* -----------------------------------------------------------------------------
43 -------------------------------------------------------------------------- */
46 checkSmallBitmap( StgPtr payload, StgWord bitmap, nat size )
52 for(i = 0; i < size; i++, bitmap >>= 1 ) {
53 if ((bitmap & 1) == 0) {
54 checkClosureShallow((StgClosure *)payload[i]);
60 checkLargeBitmap( StgPtr payload, StgLargeBitmap* large_bitmap, nat size )
66 for (bmp=0; i < size; bmp++) {
67 StgWord bitmap = large_bitmap->bitmap[bmp];
69 for(; i < size && j < BITS_IN(W_); j++, i++, bitmap >>= 1 ) {
70 if ((bitmap & 1) == 0) {
71 checkClosureShallow((StgClosure *)payload[i]);
78 * check that it looks like a valid closure - without checking its payload
79 * used to avoid recursion between checking PAPs and checking stack
84 checkClosureShallow( StgClosure* p )
89 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
91 /* Is it a static closure? */
92 if (!HEAP_ALLOCED(q)) {
93 ASSERT(closure_STATIC(q));
95 ASSERT(!closure_STATIC(q));
99 // check an individual stack object
101 checkStackFrame( StgPtr c )
104 const StgRetInfoTable* info;
106 info = get_ret_itbl((StgClosure *)c);
108 /* All activation records have 'bitmap' style layout info. */
109 switch (info->i.type) {
110 case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
119 p = (P_)(r->payload);
120 checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
121 p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
123 // skip over the non-pointers
124 p += RET_DYN_NONPTRS(dyn);
126 // follow the ptr words
127 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
128 checkClosureShallow((StgClosure *)*p);
132 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
133 RET_DYN_NONPTR_REGS_SIZE +
134 RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
138 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
139 case ATOMICALLY_FRAME:
140 case CATCH_RETRY_FRAME:
141 case CATCH_STM_FRAME:
143 // small bitmap cases (<= 32 entries)
144 case UNDERFLOW_FRAME:
147 size = BITMAP_SIZE(info->i.layout.bitmap);
148 checkSmallBitmap((StgPtr)c + 1,
149 BITMAP_BITS(info->i.layout.bitmap), size);
155 bco = (StgBCO *)*(c+1);
156 size = BCO_BITMAP_SIZE(bco);
157 checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
161 case RET_BIG: // large bitmap (> 32 entries)
162 size = GET_LARGE_BITMAP(&info->i)->size;
163 checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
168 StgFunInfoTable *fun_info;
171 ret_fun = (StgRetFun *)c;
172 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
173 size = ret_fun->size;
174 switch (fun_info->f.fun_type) {
176 checkSmallBitmap((StgPtr)ret_fun->payload,
177 BITMAP_BITS(fun_info->f.b.bitmap), size);
180 checkLargeBitmap((StgPtr)ret_fun->payload,
181 GET_FUN_LARGE_BITMAP(fun_info), size);
184 checkSmallBitmap((StgPtr)ret_fun->payload,
185 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
189 return sizeofW(StgRetFun) + size;
193 barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
197 // check sections of stack between update frames
199 checkStackChunk( StgPtr sp, StgPtr stack_end )
204 while (p < stack_end) {
205 p += checkStackFrame( p );
207 // ASSERT( p == stack_end ); -- HWL
211 checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
215 StgFunInfoTable *fun_info;
217 fun = UNTAG_CLOSURE(tagged_fun);
218 ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
219 fun_info = get_fun_itbl(fun);
221 p = (StgClosure *)payload;
222 switch (fun_info->f.fun_type) {
224 checkSmallBitmap( (StgPtr)payload,
225 BITMAP_BITS(fun_info->f.b.bitmap), n_args );
228 checkLargeBitmap( (StgPtr)payload,
229 GET_FUN_LARGE_BITMAP(fun_info),
233 checkLargeBitmap( (StgPtr)payload,
238 checkSmallBitmap( (StgPtr)payload,
239 BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
244 ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
245 : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
250 checkClosure( StgClosure* p )
252 const StgInfoTable *info;
254 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
256 p = UNTAG_CLOSURE(p);
257 /* Is it a static closure (i.e. in the data segment)? */
258 if (!HEAP_ALLOCED(p)) {
259 ASSERT(closure_STATIC(p));
261 ASSERT(!closure_STATIC(p));
264 info = p->header.info;
266 if (IS_FORWARDING_PTR(info)) {
267 barf("checkClosure: found EVACUATED closure %d", info->type);
269 info = INFO_PTR_TO_STRUCT(info);
271 switch (info->type) {
276 StgMVar *mvar = (StgMVar *)p;
277 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
278 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
279 ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
280 return sizeofW(StgMVar);
291 for (i = 0; i < info->layout.payload.ptrs; i++) {
292 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
294 return thunk_sizeW_fromITBL(info);
316 case CONSTR_NOCAF_STATIC:
321 for (i = 0; i < info->layout.payload.ptrs; i++) {
322 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
324 return sizeW_fromITBL(info);
329 StgBlockingQueue *bq = (StgBlockingQueue *)p;
331 // NO: the BH might have been updated now
332 // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
333 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
335 ASSERT(get_itbl(bq->owner)->type == TSO);
336 ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE
337 || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
338 ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE ||
339 get_itbl(bq->link)->type == IND ||
340 get_itbl(bq->link)->type == BLOCKING_QUEUE);
342 return sizeofW(StgBlockingQueue);
346 StgBCO *bco = (StgBCO *)p;
347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
349 ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
350 return bco_sizeW(bco);
353 case IND_STATIC: /* (1, 0) closure */
354 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
355 return sizeW_fromITBL(info);
358 /* deal with these specially - the info table isn't
359 * representative of the actual layout.
361 { StgWeak *w = (StgWeak *)p;
362 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
364 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
366 ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
368 return sizeW_fromITBL(info);
372 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
373 return THUNK_SELECTOR_sizeW();
377 /* we don't expect to see any of these after GC
378 * but they might appear during execution
380 StgInd *ind = (StgInd *)p;
381 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
382 return sizeofW(StgInd);
390 case UNDERFLOW_FRAME:
393 case ATOMICALLY_FRAME:
394 case CATCH_RETRY_FRAME:
395 case CATCH_STM_FRAME:
396 barf("checkClosure: stack frame");
400 StgAP* ap = (StgAP *)p;
401 checkPAP (ap->fun, ap->payload, ap->n_args);
407 StgPAP* pap = (StgPAP *)p;
408 checkPAP (pap->fun, pap->payload, pap->n_args);
409 return pap_sizeW(pap);
414 StgAP_STACK *ap = (StgAP_STACK *)p;
415 ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
416 checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
417 return ap_stack_sizeW(ap);
421 return arr_words_sizeW((StgArrWords *)p);
423 case MUT_ARR_PTRS_CLEAN:
424 case MUT_ARR_PTRS_DIRTY:
425 case MUT_ARR_PTRS_FROZEN:
426 case MUT_ARR_PTRS_FROZEN0:
428 StgMutArrPtrs* a = (StgMutArrPtrs *)p;
430 for (i = 0; i < a->ptrs; i++) {
431 ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
433 return mut_arr_ptrs_sizeW(a);
437 checkTSO((StgTSO *)p);
438 return sizeofW(StgTSO);
441 checkSTACK((StgStack*)p);
442 return stack_sizeW((StgStack*)p);
447 StgTRecChunk *tc = (StgTRecChunk *)p;
448 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
449 for (i = 0; i < tc -> next_entry_idx; i ++) {
450 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
451 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
452 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
454 return sizeofW(StgTRecChunk);
458 barf("checkClosure (closure type %d)", info->type);
463 /* -----------------------------------------------------------------------------
466 After garbage collection, the live heap is in a state where we can
467 run through and check that all the pointers point to the right
468 place. This function starts at a given position and sanity-checks
469 all the objects in the remainder of the chain.
470 -------------------------------------------------------------------------- */
472 void checkHeapChain (bdescr *bd)
476 for (; bd != NULL; bd = bd->link) {
477 if(!(bd->flags & BF_SWEPT)) {
479 while (p < bd->free) {
480 nat size = checkClosure((StgClosure *)p);
481 /* This is the smallest size of closure that can live in the heap */
482 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
486 while (p < bd->free &&
487 (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
494 checkHeapChunk(StgPtr start, StgPtr end)
499 for (p=start; p<end; p+=size) {
500 ASSERT(LOOKS_LIKE_INFO_PTR(*p));
501 size = checkClosure((StgClosure *)p);
502 /* This is the smallest size of closure that can live in the heap. */
503 ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
508 checkLargeObjects(bdescr *bd)
511 if (!(bd->flags & BF_PINNED)) {
512 checkClosure((StgClosure *)bd->start);
519 checkSTACK (StgStack *stack)
521 StgPtr sp = stack->sp;
522 StgOffset stack_size = stack->stack_size;
523 StgPtr stack_end = stack->stack + stack_size;
525 ASSERT(stack->stack <= sp && sp <= stack_end);
527 checkStackChunk(sp, stack_end);
531 checkTSO(StgTSO *tso)
533 if (tso->what_next == ThreadKilled) {
534 /* The garbage collector doesn't bother following any pointers
535 * from dead threads, so don't check sanity here.
540 ASSERT(tso->_link == END_TSO_QUEUE ||
541 tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
542 tso->_link->header.info == &stg_TSO_info);
544 if ( tso->why_blocked == BlockedOnMVar
545 || tso->why_blocked == BlockedOnBlackHole
546 || tso->why_blocked == BlockedOnMsgThrowTo
547 || tso->why_blocked == NotBlocked
549 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
552 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
553 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
554 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj));
556 // XXX are we checking the stack twice?
557 checkSTACK(tso->stackobj);
561 Check that all TSOs have been evacuated.
562 Optionally also check the sanity of the TSOs.
565 checkGlobalTSOList (rtsBool checkTSOs)
570 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
571 for (tso=generations[g].threads; tso != END_TSO_QUEUE;
572 tso = tso->global_link) {
573 ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
574 ASSERT(get_itbl(tso)->type == TSO);
578 // If this TSO is dirty and in an old generation, it better
579 // be on the mutable list.
581 ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
582 tso->flags &= ~TSO_MARKED;
587 StgUnderflowFrame *frame;
589 stack = tso->stackobj;
591 if (stack->dirty & 1) {
592 ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
593 stack->dirty &= ~TSO_MARKED;
595 frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
596 - sizeofW(StgUnderflowFrame));
597 if (frame->info != &stg_stack_underflow_frame_info
598 || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
599 stack = frame->next_chunk;
606 /* -----------------------------------------------------------------------------
607 Check mutable list sanity.
608 -------------------------------------------------------------------------- */
611 checkMutableList( bdescr *mut_bd, nat gen )
617 for (bd = mut_bd; bd != NULL; bd = bd->link) {
618 for (q = bd->start; q < bd->free; q++) {
619 p = (StgClosure *)*q;
620 ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
623 switch (get_itbl(p)->type) {
625 ((StgTSO *)p)->flags |= TSO_MARKED;
628 ((StgStack *)p)->dirty |= TSO_MARKED;
636 checkLocalMutableLists (nat cap_no)
639 for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
640 checkMutableList(capabilities[cap_no].mut_lists[g], g);
645 checkMutableLists (void)
648 for (i = 0; i < n_capabilities; i++) {
649 checkLocalMutableLists(i);
654 Check the static objects list.
657 checkStaticObjects ( StgClosure* static_objects )
659 StgClosure *p = static_objects;
662 while (p != END_OF_STATIC_LIST) {
665 switch (info->type) {
668 StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);
670 ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
671 ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
672 p = *IND_STATIC_LINK((StgClosure *)p);
677 p = *THUNK_STATIC_LINK((StgClosure *)p);
681 p = *FUN_STATIC_LINK((StgClosure *)p);
685 p = *STATIC_LINK(info,(StgClosure *)p);
689 barf("checkStaticObjetcs: strange closure %p (%s)",
695 /* Nursery sanity check */
697 checkNurserySanity (nursery *nursery)
703 for (bd = nursery->blocks; bd != NULL; bd = bd->link) {
704 ASSERT(bd->gen == g0);
705 ASSERT(bd->u.back == prev);
707 blocks += bd->blocks;
710 ASSERT(blocks == nursery->n_blocks);
713 static void checkGeneration (generation *gen,
714 rtsBool after_major_gc USED_IF_THREADS)
719 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
720 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
722 #if defined(THREADED_RTS)
723 // heap sanity checking doesn't work with SMP, because we can't
724 // zero the slop (see Updates.h). However, we can sanity-check
725 // the heap after a major gc, because there is no slop.
726 if (!after_major_gc) return;
729 checkHeapChain(gen->blocks);
731 for (n = 0; n < n_capabilities; n++) {
732 ws = &gc_threads[n]->gens[gen->no];
733 checkHeapChain(ws->todo_bd);
734 checkHeapChain(ws->part_list);
735 checkHeapChain(ws->scavd_list);
738 checkLargeObjects(gen->large_objects);
741 /* Full heap sanity check. */
742 static void checkFullHeap (rtsBool after_major_gc)
746 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
747 checkGeneration(&generations[g], after_major_gc);
749 for (n = 0; n < n_capabilities; n++) {
750 checkNurserySanity(&nurseries[n]);
754 void checkSanity (rtsBool after_gc, rtsBool major_gc)
756 checkFullHeap(after_gc && major_gc);
758 checkFreeListSanity();
760 // always check the stacks in threaded mode, because checkHeap()
761 // does nothing in this case.
764 checkGlobalTSOList(rtsTrue);
768 // If memInventory() calculates that we have a memory leak, this
769 // function will try to find the block(s) that are leaking by marking
770 // all the ones that we know about, and search through memory to find
771 // blocks that are not marked. In the debugger this can help to give
772 // us a clue about what kind of block leaked. In the future we might
773 // annotate blocks with their allocation site to give more helpful
776 findMemoryLeak (void)
779 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
780 for (i = 0; i < n_capabilities; i++) {
781 markBlocks(capabilities[i].mut_lists[g]);
782 markBlocks(gc_threads[i]->gens[g].part_list);
783 markBlocks(gc_threads[i]->gens[g].scavd_list);
784 markBlocks(gc_threads[i]->gens[g].todo_bd);
786 markBlocks(generations[g].blocks);
787 markBlocks(generations[g].large_objects);
790 for (i = 0; i < n_capabilities; i++) {
791 markBlocks(nurseries[i].blocks);
796 // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
797 // markRetainerBlocks();
801 // count the blocks allocated by the arena allocator
803 // markArenaBlocks();
805 // count the blocks containing executable memory
806 markBlocks(exec_block);
808 reportUnmarkedBlocks();
812 checkRunQueue(Capability *cap)
815 prev = END_TSO_QUEUE;
816 for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE;
817 prev = tso, tso = tso->_link) {
818 ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
819 ASSERT(tso->block_info.prev == prev);
821 ASSERT(cap->run_queue_tl == prev);
824 /* -----------------------------------------------------------------------------
825 Memory leak detection
827 memInventory() checks for memory leaks by counting up all the
828 blocks we know about and comparing that to the number of blocks
829 allegedly floating around in the system.
830 -------------------------------------------------------------------------- */
832 // Useful for finding partially full blocks in gdb
833 void findSlop(bdescr *bd);
834 void findSlop(bdescr *bd)
838 for (; bd != NULL; bd = bd->link) {
839 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
840 if (slop > (1024/sizeof(W_))) {
841 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
842 bd->start, bd, slop / (1024/sizeof(W_)));
848 genBlocks (generation *gen)
850 ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
851 ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
852 return gen->n_blocks + gen->n_old_blocks +
853 countAllocdBlocks(gen->large_objects);
857 memInventory (rtsBool show)
860 lnat gen_blocks[RtsFlags.GcFlags.generations];
861 lnat nursery_blocks, retainer_blocks,
862 arena_blocks, exec_blocks;
863 lnat live_blocks = 0, free_blocks = 0;
866 // count the blocks we current have
868 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
870 for (i = 0; i < n_capabilities; i++) {
871 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
872 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
873 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
874 gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
876 gen_blocks[g] += genBlocks(&generations[g]);
880 for (i = 0; i < n_capabilities; i++) {
881 ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
882 nursery_blocks += nurseries[i].n_blocks;
887 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
888 retainer_blocks = retainerStackBlocks();
892 // count the blocks allocated by the arena allocator
893 arena_blocks = arenaBlocks();
895 // count the blocks containing executable memory
896 exec_blocks = countAllocdBlocks(exec_block);
898 /* count the blocks on the free list */
899 free_blocks = countFreeList();
902 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
903 live_blocks += gen_blocks[g];
905 live_blocks += nursery_blocks +
906 + retainer_blocks + arena_blocks + exec_blocks;
908 #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
910 leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
915 debugBelch("Memory leak detected:\n");
917 debugBelch("Memory inventory:\n");
919 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
920 debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
921 gen_blocks[g], MB(gen_blocks[g]));
923 debugBelch(" nursery : %5lu blocks (%lu MB)\n",
924 nursery_blocks, MB(nursery_blocks));
925 debugBelch(" retainer : %5lu blocks (%lu MB)\n",
926 retainer_blocks, MB(retainer_blocks));
927 debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
928 arena_blocks, MB(arena_blocks));
929 debugBelch(" exec : %5lu blocks (%lu MB)\n",
930 exec_blocks, MB(exec_blocks));
931 debugBelch(" free : %5lu blocks (%lu MB)\n",
932 free_blocks, MB(free_blocks));
933 debugBelch(" total : %5lu blocks (%lu MB)\n",
934 live_blocks + free_blocks, MB(live_blocks+free_blocks));
936 debugBelch("\n in system : %5lu blocks (%lu MB)\n",
937 mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
945 ASSERT(n_alloc_blocks == live_blocks);