1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.68 2002/08/16 13:20:36 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
6 * Storage manager front end
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
16 #include "BlockAlloc.h"
24 #include "OSThreads.h"
25 #include "StoragePriv.h"
27 #include "RetainerProfile.h" // for counting memory blocks (memInventory)
32 #ifdef darwin_TARGET_OS
33 #include <mach-o/getsect.h>
34 unsigned long macho_etext = 0;
35 unsigned long macho_edata = 0;
37 static void macosx_get_memory_layout(void)
39 struct segment_command *seg;
41 seg = getsegbyname("__TEXT");
42 macho_etext = seg->vmaddr + seg->vmsize;
43 seg = getsegbyname("__DATA");
44 macho_edata = seg->vmaddr + seg->vmsize;
48 StgClosure *caf_list = NULL;
50 bdescr *small_alloc_list; /* allocate()d small objects */
51 bdescr *pinned_object_block; /* allocate pinned objects into this block */
52 nat alloc_blocks; /* number of allocate()d blocks since GC */
53 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
55 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
56 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
58 generation *generations; /* all the generations */
59 generation *g0; /* generation 0, for convenience */
60 generation *oldest_gen; /* oldest generation, for convenience */
61 step *g0s0; /* generation 0, step 0, for convenience */
63 lnat total_allocated = 0; /* total memory allocated during run */
66 * Storage manager mutex: protects all the above state from
67 * simultaneous access by two STG threads.
70 Mutex sm_mutex = INIT_MUTEX_VAR;
76 static void *stgAllocForGMP (size_t size_in_bytes);
77 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
78 static void stgDeallocForGMP (void *ptr, size_t size);
87 #if defined(darwin_TARGET_OS)
88 macosx_get_memory_layout();
91 /* Sanity check to make sure we are able to make the distinction
92 * between closures and infotables
94 if (!LOOKS_LIKE_GHC_INFO(&stg_BLACKHOLE_info)) {
95 barf("LOOKS_LIKE_GHC_INFO+ is incorrectly defined");
98 if (LOOKS_LIKE_GHC_INFO(&stg_dummy_ret_closure)) {
99 barf("LOOKS_LIKE_GHC_INFO- is incorrectly defined");
102 if (LOOKS_LIKE_STATIC_CLOSURE(&stg_BLACKHOLE_info)) {
103 barf("LOOKS_LIKE_STATIC_CLOSURE- is incorrectly defined");
106 if (!LOOKS_LIKE_STATIC_CLOSURE(&stg_dummy_ret_closure)) {
107 barf("LOOKS_LIKE_STATIC_CLOSURE+ is incorrectly defined");
111 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
112 RtsFlags.GcFlags.heapSizeSuggestion >
113 RtsFlags.GcFlags.maxHeapSize) {
114 RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
117 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
118 RtsFlags.GcFlags.minAllocAreaSize >
119 RtsFlags.GcFlags.maxHeapSize) {
120 prog_belch("maximum heap size (-M) is smaller than minimum alloc area size (-A)");
124 initBlockAllocator();
127 initCondition(&sm_mutex);
130 /* allocate generation info array */
131 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
132 * sizeof(struct _generation),
133 "initStorage: gens");
135 /* Initialise all generations */
136 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
137 gen = &generations[g];
139 gen->mut_list = END_MUT_LIST;
140 gen->mut_once_list = END_MUT_LIST;
141 gen->collections = 0;
142 gen->failed_promotions = 0;
146 /* A couple of convenience pointers */
147 g0 = &generations[0];
148 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
150 /* Allocate step structures in each generation */
151 if (RtsFlags.GcFlags.generations > 1) {
152 /* Only for multiple-generations */
154 /* Oldest generation: one step */
155 oldest_gen->n_steps = 1;
157 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
159 /* set up all except the oldest generation with 2 steps */
160 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
161 generations[g].n_steps = RtsFlags.GcFlags.steps;
162 generations[g].steps =
163 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
164 "initStorage: steps");
168 /* single generation, i.e. a two-space collector */
170 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
173 /* Initialise all steps */
174 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
175 for (s = 0; s < generations[g].n_steps; s++) {
176 stp = &generations[g].steps[s];
180 stp->gen = &generations[g];
187 stp->large_objects = NULL;
188 stp->n_large_blocks = 0;
189 stp->new_large_objects = NULL;
190 stp->scavenged_large_objects = NULL;
191 stp->n_scavenged_large_blocks = 0;
192 stp->is_compacted = 0;
197 /* Set up the destination pointers in each younger gen. step */
198 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
199 for (s = 0; s < generations[g].n_steps-1; s++) {
200 generations[g].steps[s].to = &generations[g].steps[s+1];
202 generations[g].steps[s].to = &generations[g+1].steps[0];
205 /* The oldest generation has one step and it is compacted. */
206 if (RtsFlags.GcFlags.compact) {
207 if (RtsFlags.GcFlags.generations == 1) {
208 belch("WARNING: compaction is incompatible with -G1; disabled");
210 oldest_gen->steps[0].is_compacted = 1;
213 oldest_gen->steps[0].to = &oldest_gen->steps[0];
215 /* generation 0 is special: that's the nursery */
216 generations[0].max_blocks = 0;
218 /* G0S0: the allocation area. Policy: keep the allocation area
219 * small to begin with, even if we have a large suggested heap
220 * size. Reason: we're going to do a major collection first, and we
221 * don't want it to be a big one. This vague idea is borne out by
222 * rigorous experimental evidence.
224 g0s0 = &generations[0].steps[0];
228 weak_ptr_list = NULL;
231 /* initialise the allocate() interface */
232 small_alloc_list = NULL;
234 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
236 /* Tell GNU multi-precision pkg about our custom alloc functions */
237 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
240 initMutex(&sm_mutex);
243 IF_DEBUG(gc, statDescribeGens());
249 stat_exit(calcAllocated());
252 /* -----------------------------------------------------------------------------
255 The entry code for every CAF does the following:
257 - builds a CAF_BLACKHOLE in the heap
258 - pushes an update frame pointing to the CAF_BLACKHOLE
259 - invokes UPD_CAF(), which:
260 - calls newCaf, below
261 - updates the CAF with a static indirection to the CAF_BLACKHOLE
263 Why do we build a BLACKHOLE in the heap rather than just updating
264 the thunk directly? It's so that we only need one kind of update
265 frame - otherwise we'd need a static version of the update frame too.
267 newCaf() does the following:
269 - it puts the CAF on the oldest generation's mut-once list.
270 This is so that we can treat the CAF as a root when collecting
273 For GHCI, we have additional requirements when dealing with CAFs:
275 - we must *retain* all dynamically-loaded CAFs ever entered,
276 just in case we need them again.
277 - we must be able to *revert* CAFs that have been evaluated, to
278 their pre-evaluated form.
280 To do this, we use an additional CAF list. When newCaf() is
281 called on a dynamically-loaded CAF, we add it to the CAF list
282 instead of the old-generation mutable list, and save away its
283 old info pointer (in caf->saved_info) for later reversion.
285 To revert all the CAFs, we traverse the CAF list and reset the
286 info pointer to caf->saved_info, then throw away the CAF list.
287 (see GC.c:revertCAFs()).
291 -------------------------------------------------------------------------- */
294 newCAF(StgClosure* caf)
296 /* Put this CAF on the mutable list for the old generation.
297 * This is a HACK - the IND_STATIC closure doesn't really have
298 * a mut_link field, but we pretend it has - in fact we re-use
299 * the STATIC_LINK field for the time being, because when we
300 * come to do a major GC we won't need the mut_link field
301 * any more and can use it as a STATIC_LINK.
305 if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) {
306 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
307 ((StgIndStatic *)caf)->static_link = caf_list;
310 ((StgIndStatic *)caf)->saved_info = NULL;
311 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
312 oldest_gen->mut_once_list = (StgMutClosure *)caf;
318 /* If we are PAR or DIST then we never forget a CAF */
320 //belch("<##> Globalising CAF %08x %s",caf,info_type(caf));
321 newGA=makeGlobal(caf,rtsTrue); /*given full weight*/
327 /* -----------------------------------------------------------------------------
329 -------------------------------------------------------------------------- */
332 allocNurseries( void )
341 for (cap = free_capabilities; cap != NULL; cap = cap->link) {
342 cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
343 cap->r.rCurrentNursery = cap->r.rNursery;
344 for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
345 bd->u.back = (bdescr *)cap;
348 /* Set the back links to be equal to the Capability,
349 * so we can do slightly better informed locking.
353 g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
354 g0s0->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
355 g0s0->to_blocks = NULL;
356 g0s0->n_to_blocks = 0;
357 MainCapability.r.rNursery = g0s0->blocks;
358 MainCapability.r.rCurrentNursery = g0s0->blocks;
359 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
364 resetNurseries( void )
370 /* All tasks must be stopped */
371 ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes);
373 for (cap = free_capabilities; cap != NULL; cap = cap->link) {
374 for (bd = cap->r.rNursery; bd; bd = bd->link) {
375 bd->free = bd->start;
376 ASSERT(bd->gen_no == 0);
377 ASSERT(bd->step == g0s0);
378 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
380 cap->r.rCurrentNursery = cap->r.rNursery;
383 for (bd = g0s0->blocks; bd; bd = bd->link) {
384 bd->free = bd->start;
385 ASSERT(bd->gen_no == 0);
386 ASSERT(bd->step == g0s0);
387 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
389 MainCapability.r.rNursery = g0s0->blocks;
390 MainCapability.r.rCurrentNursery = g0s0->blocks;
395 allocNursery (bdescr *tail, nat blocks)
400 // Allocate a nursery: we allocate fresh blocks one at a time and
401 // cons them on to the front of the list, not forgetting to update
402 // the back pointer on the tail of the list to point to the new block.
403 for (i=0; i < blocks; i++) {
406 processNursery() in LdvProfile.c assumes that every block group in
407 the nursery contains only a single block. So, if a block group is
408 given multiple blocks, change processNursery() accordingly.
412 // double-link the nursery: we might need to insert blocks
419 bd->free = bd->start;
427 resizeNursery ( nat blocks )
433 barf("resizeNursery: can't resize in SMP mode");
436 nursery_blocks = g0s0->n_blocks;
437 if (nursery_blocks == blocks) {
441 else if (nursery_blocks < blocks) {
442 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
444 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
450 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
454 while (nursery_blocks > blocks) {
456 next_bd->u.back = NULL;
457 nursery_blocks -= bd->blocks; // might be a large block
462 // might have gone just under, by freeing a large block, so make
463 // up the difference.
464 if (nursery_blocks < blocks) {
465 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
469 g0s0->n_blocks = blocks;
470 ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks);
473 /* -----------------------------------------------------------------------------
474 The allocate() interface
476 allocate(n) always succeeds, and returns a chunk of memory n words
477 long. n can be larger than the size of a block if necessary, in
478 which case a contiguous block group will be allocated.
479 -------------------------------------------------------------------------- */
489 TICK_ALLOC_HEAP_NOCTR(n);
492 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
493 /* ToDo: allocate directly into generation 1 */
494 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
495 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
496 bd = allocGroup(req_blocks);
497 dbl_link_onto(bd, &g0s0->large_objects);
500 bd->flags = BF_LARGE;
501 bd->free = bd->start;
502 /* don't add these blocks to alloc_blocks, since we're assuming
503 * that large objects are likely to remain live for quite a while
504 * (eg. running threads), so garbage collecting early won't make
507 alloc_blocks += req_blocks;
511 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
512 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
513 if (small_alloc_list) {
514 small_alloc_list->free = alloc_Hp;
517 bd->link = small_alloc_list;
518 small_alloc_list = bd;
522 alloc_Hp = bd->start;
523 alloc_HpLim = bd->start + BLOCK_SIZE_W;
534 allocated_bytes( void )
536 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
539 /* ---------------------------------------------------------------------------
540 Allocate a fixed/pinned object.
542 We allocate small pinned objects into a single block, allocating a
543 new block when the current one overflows. The block is chained
544 onto the large_object_list of generation 0 step 0.
546 NOTE: The GC can't in general handle pinned objects. This
547 interface is only safe to use for ByteArrays, which have no
548 pointers and don't require scavenging. It works because the
549 block's descriptor has the BF_LARGE flag set, so the block is
550 treated as a large object and chained onto various lists, rather
551 than the individual objects being copied. However, when it comes
552 to scavenge the block, the GC will only scavenge the first object.
553 The reason is that the GC can't linearly scan a block of pinned
554 objects at the moment (doing so would require using the
555 mostly-copying techniques). But since we're restricting ourselves
556 to pinned ByteArrays, not scavenging is ok.
558 This function is called by newPinnedByteArray# which immediately
559 fills the allocated memory with a MutableByteArray#.
560 ------------------------------------------------------------------------- */
563 allocatePinned( nat n )
566 bdescr *bd = pinned_object_block;
570 TICK_ALLOC_HEAP_NOCTR(n);
573 // If the request is for a large object, then allocate()
574 // will give us a pinned object anyway.
575 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
580 // we always return 8-byte aligned memory. bd->free must be
581 // 8-byte aligned to begin with, so we just round up n to
582 // the nearest multiple of 8 bytes.
583 if (sizeof(StgWord) == 4) {
587 // If we don't have a block of pinned objects yet, or the current
588 // one isn't large enough to hold the new object, allocate a new one.
589 if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
590 pinned_object_block = bd = allocBlock();
591 dbl_link_onto(bd, &g0s0->large_objects);
594 bd->flags = BF_LARGE;
595 bd->free = bd->start;
605 /* -----------------------------------------------------------------------------
606 Allocation functions for GMP.
608 These all use the allocate() interface - we can't have any garbage
609 collection going on during a gmp operation, so we use allocate()
610 which always succeeds. The gmp operations which might need to
611 allocate will ask the storage manager (via doYouWantToGC()) whether
612 a garbage collection is required, in case we get into a loop doing
613 only allocate() style allocation.
614 -------------------------------------------------------------------------- */
617 stgAllocForGMP (size_t size_in_bytes)
620 nat data_size_in_words, total_size_in_words;
622 /* round up to a whole number of words */
623 data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_);
624 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
626 /* allocate and fill it in. */
627 arr = (StgArrWords *)allocate(total_size_in_words);
628 SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
630 /* and return a ptr to the goods inside the array */
631 return(BYTE_ARR_CTS(arr));
635 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
637 void *new_stuff_ptr = stgAllocForGMP(new_size);
639 char *p = (char *) ptr;
640 char *q = (char *) new_stuff_ptr;
642 for (; i < old_size; i++, p++, q++) {
646 return(new_stuff_ptr);
650 stgDeallocForGMP (void *ptr STG_UNUSED,
651 size_t size STG_UNUSED)
653 /* easy for us: the garbage collector does the dealloc'n */
656 /* -----------------------------------------------------------------------------
658 * -------------------------------------------------------------------------- */
660 /* -----------------------------------------------------------------------------
663 * Approximate how much we've allocated: number of blocks in the
664 * nursery + blocks allocated via allocate() - unused nusery blocks.
665 * This leaves a little slop at the end of each block, and doesn't
666 * take into account large objects (ToDo).
667 * -------------------------------------------------------------------------- */
670 calcAllocated( void )
678 /* All tasks must be stopped. Can't assert that all the
679 capabilities are owned by the scheduler, though: one or more
680 tasks might have been stopped while they were running (non-main)
682 /* ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); */
685 n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
688 for (cap = free_capabilities; cap != NULL; cap = cap->link) {
689 for ( bd = cap->r.rCurrentNursery->link; bd != NULL; bd = bd->link ) {
690 allocated -= BLOCK_SIZE_W;
692 if (cap->r.rCurrentNursery->free < cap->r.rCurrentNursery->start
694 allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
695 - cap->r.rCurrentNursery->free;
700 bdescr *current_nursery = MainCapability.r.rCurrentNursery;
702 allocated = (g0s0->n_blocks * BLOCK_SIZE_W) + allocated_bytes();
703 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
704 allocated -= BLOCK_SIZE_W;
706 if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
707 allocated -= (current_nursery->start + BLOCK_SIZE_W)
708 - current_nursery->free;
712 total_allocated += allocated;
716 /* Approximate the amount of live data in the heap. To be called just
717 * after garbage collection (see GarbageCollect()).
726 if (RtsFlags.GcFlags.generations == 1) {
727 live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W +
728 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
732 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
733 for (s = 0; s < generations[g].n_steps; s++) {
734 /* approximate amount of live data (doesn't take into account slop
735 * at end of each block).
737 if (g == 0 && s == 0) {
740 stp = &generations[g].steps[s];
741 live += (stp->n_large_blocks + stp->n_blocks - 1) * BLOCK_SIZE_W;
742 if (stp->hp_bd != NULL) {
743 live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start)
751 /* Approximate the number of blocks that will be needed at the next
752 * garbage collection.
754 * Assume: all data currently live will remain live. Steps that will
755 * be collected next time will therefore need twice as many blocks
756 * since all the data will be copied.
765 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
766 for (s = 0; s < generations[g].n_steps; s++) {
767 if (g == 0 && s == 0) { continue; }
768 stp = &generations[g].steps[s];
769 if (generations[g].steps[0].n_blocks +
770 generations[g].steps[0].n_large_blocks
771 > generations[g].max_blocks
772 && stp->is_compacted == 0) {
773 needed += 2 * stp->n_blocks;
775 needed += stp->n_blocks;
782 /* -----------------------------------------------------------------------------
785 memInventory() checks for memory leaks by counting up all the
786 blocks we know about and comparing that to the number of blocks
787 allegedly floating around in the system.
788 -------------------------------------------------------------------------- */
798 lnat total_blocks = 0, free_blocks = 0;
800 /* count the blocks we current have */
802 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
803 for (s = 0; s < generations[g].n_steps; s++) {
804 stp = &generations[g].steps[s];
805 total_blocks += stp->n_blocks;
806 if (RtsFlags.GcFlags.generations == 1) {
807 /* two-space collector has a to-space too :-) */
808 total_blocks += g0s0->n_to_blocks;
810 for (bd = stp->large_objects; bd; bd = bd->link) {
811 total_blocks += bd->blocks;
812 /* hack for megablock groups: they have an extra block or two in
813 the second and subsequent megablocks where the block
814 descriptors would normally go.
816 if (bd->blocks > BLOCKS_PER_MBLOCK) {
817 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
818 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
824 /* any blocks held by allocate() */
825 for (bd = small_alloc_list; bd; bd = bd->link) {
826 total_blocks += bd->blocks;
830 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
831 for (bd = firstStack; bd != NULL; bd = bd->link)
832 total_blocks += bd->blocks;
836 // count the blocks allocated by the arena allocator
837 total_blocks += arenaBlocks();
839 /* count the blocks on the free list */
840 free_blocks = countFreeList();
842 if (total_blocks + free_blocks != mblocks_allocated *
844 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
845 total_blocks, free_blocks, total_blocks + free_blocks,
846 mblocks_allocated * BLOCKS_PER_MBLOCK);
849 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
854 countBlocks(bdescr *bd)
857 for (n=0; bd != NULL; bd=bd->link) {
863 /* Full heap sanity check. */
869 if (RtsFlags.GcFlags.generations == 1) {
870 checkHeap(g0s0->to_blocks);
871 checkChain(g0s0->large_objects);
874 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
875 for (s = 0; s < generations[g].n_steps; s++) {
876 ASSERT(countBlocks(generations[g].steps[s].blocks)
877 == generations[g].steps[s].n_blocks);
878 ASSERT(countBlocks(generations[g].steps[s].large_objects)
879 == generations[g].steps[s].n_large_blocks);
880 if (g == 0 && s == 0) { continue; }
881 checkHeap(generations[g].steps[s].blocks);
882 checkChain(generations[g].steps[s].large_objects);
884 checkMutableList(generations[g].mut_list, g);
885 checkMutOnceList(generations[g].mut_once_list, g);
889 checkFreeListSanity();
893 // handy function for use in gdb, because Bdescr() is inlined.
894 extern bdescr *_bdescr( StgPtr p );