1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.61 2002/04/19 12:31:07 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
6 * Storage manager front end
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
16 #include "BlockAlloc.h"
24 #include "OSThreads.h"
25 #include "StoragePriv.h"
27 #include "RetainerProfile.h" // for counting memory blocks (memInventory)
29 #ifdef darwin_TARGET_OS
30 #include <mach-o/getsect.h>
31 unsigned long macho_etext = 0;
32 unsigned long macho_edata = 0;
34 static void macosx_get_memory_layout(void)
36 struct segment_command *seg;
38 seg = getsegbyname("__TEXT");
39 macho_etext = seg->vmaddr + seg->vmsize;
40 seg = getsegbyname("__DATA");
41 macho_edata = seg->vmaddr + seg->vmsize;
45 StgClosure *caf_list = NULL;
47 bdescr *small_alloc_list; /* allocate()d small objects */
48 bdescr *large_alloc_list; /* allocate()d large objects */
49 bdescr *pinned_object_block; /* allocate pinned objects into this block */
50 nat alloc_blocks; /* number of allocate()d blocks since GC */
51 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
53 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
54 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
56 generation *generations; /* all the generations */
57 generation *g0; /* generation 0, for convenience */
58 generation *oldest_gen; /* oldest generation, for convenience */
59 step *g0s0; /* generation 0, step 0, for convenience */
61 lnat total_allocated = 0; /* total memory allocated during run */
64 * Storage manager mutex: protects all the above state from
65 * simultaneous access by two STG threads.
68 Mutex sm_mutex = INIT_MUTEX_VAR;
74 static void *stgAllocForGMP (size_t size_in_bytes);
75 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
76 static void stgDeallocForGMP (void *ptr, size_t size);
85 #if defined(darwin_TARGET_OS)
86 macosx_get_memory_layout();
89 /* Sanity check to make sure we are able to make the distinction
90 * between closures and infotables
92 if (!LOOKS_LIKE_GHC_INFO(&stg_BLACKHOLE_info)) {
93 barf("LOOKS_LIKE_GHC_INFO+ is incorrectly defined");
96 if (LOOKS_LIKE_GHC_INFO(&stg_dummy_ret_closure)) {
97 barf("LOOKS_LIKE_GHC_INFO- is incorrectly defined");
100 if (LOOKS_LIKE_STATIC_CLOSURE(&stg_BLACKHOLE_info)) {
101 barf("LOOKS_LIKE_STATIC_CLOSURE- is incorrectly defined");
104 if (!LOOKS_LIKE_STATIC_CLOSURE(&stg_dummy_ret_closure)) {
105 barf("LOOKS_LIKE_STATIC_CLOSURE+ is incorrectly defined");
109 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
110 RtsFlags.GcFlags.heapSizeSuggestion >
111 RtsFlags.GcFlags.maxHeapSize) {
112 RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
115 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
116 RtsFlags.GcFlags.minAllocAreaSize >
117 RtsFlags.GcFlags.maxHeapSize) {
118 prog_belch("maximum heap size (-M) is smaller than minimum alloc area size (-A)");
122 initBlockAllocator();
125 initCondition(&sm_mutex);
128 /* allocate generation info array */
129 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
130 * sizeof(struct _generation),
131 "initStorage: gens");
133 /* Initialise all generations */
134 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
135 gen = &generations[g];
137 gen->mut_list = END_MUT_LIST;
138 gen->mut_once_list = END_MUT_LIST;
139 gen->collections = 0;
140 gen->failed_promotions = 0;
144 /* A couple of convenience pointers */
145 g0 = &generations[0];
146 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
148 /* Allocate step structures in each generation */
149 if (RtsFlags.GcFlags.generations > 1) {
150 /* Only for multiple-generations */
152 /* Oldest generation: one step */
153 oldest_gen->n_steps = 1;
155 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
157 /* set up all except the oldest generation with 2 steps */
158 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
159 generations[g].n_steps = RtsFlags.GcFlags.steps;
160 generations[g].steps =
161 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
162 "initStorage: steps");
166 /* single generation, i.e. a two-space collector */
168 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
171 /* Initialise all steps */
172 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
173 for (s = 0; s < generations[g].n_steps; s++) {
174 stp = &generations[g].steps[s];
178 stp->gen = &generations[g];
185 stp->large_objects = NULL;
186 stp->n_large_blocks = 0;
187 stp->new_large_objects = NULL;
188 stp->scavenged_large_objects = NULL;
189 stp->n_scavenged_large_blocks = 0;
190 stp->is_compacted = 0;
195 /* Set up the destination pointers in each younger gen. step */
196 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
197 for (s = 0; s < generations[g].n_steps-1; s++) {
198 generations[g].steps[s].to = &generations[g].steps[s+1];
200 generations[g].steps[s].to = &generations[g+1].steps[0];
203 /* The oldest generation has one step and it is compacted. */
204 if (RtsFlags.GcFlags.compact) {
205 if (RtsFlags.GcFlags.generations == 1) {
206 belch("WARNING: compaction is incompatible with -G1; disabled");
208 oldest_gen->steps[0].is_compacted = 1;
211 oldest_gen->steps[0].to = &oldest_gen->steps[0];
213 /* generation 0 is special: that's the nursery */
214 generations[0].max_blocks = 0;
216 /* G0S0: the allocation area. Policy: keep the allocation area
217 * small to begin with, even if we have a large suggested heap
218 * size. Reason: we're going to do a major collection first, and we
219 * don't want it to be a big one. This vague idea is borne out by
220 * rigorous experimental evidence.
222 g0s0 = &generations[0].steps[0];
226 weak_ptr_list = NULL;
229 /* initialise the allocate() interface */
230 small_alloc_list = NULL;
231 large_alloc_list = NULL;
233 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
235 /* Tell GNU multi-precision pkg about our custom alloc functions */
236 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
239 initMutex(&sm_mutex);
242 IF_DEBUG(gc, statDescribeGens());
248 stat_exit(calcAllocated());
251 /* -----------------------------------------------------------------------------
254 The entry code for every CAF does the following:
256 - builds a CAF_BLACKHOLE in the heap
257 - pushes an update frame pointing to the CAF_BLACKHOLE
258 - invokes UPD_CAF(), which:
259 - calls newCaf, below
260 - updates the CAF with a static indirection to the CAF_BLACKHOLE
262 Why do we build a BLACKHOLE in the heap rather than just updating
263 the thunk directly? It's so that we only need one kind of update
264 frame - otherwise we'd need a static version of the update frame too.
266 newCaf() does the following:
268 - it puts the CAF on the oldest generation's mut-once list.
269 This is so that we can treat the CAF as a root when collecting
272 For GHCI, we have additional requirements when dealing with CAFs:
274 - we must *retain* all dynamically-loaded CAFs ever entered,
275 just in case we need them again.
276 - we must be able to *revert* CAFs that have been evaluated, to
277 their pre-evaluated form.
279 To do this, we use an additional CAF list. When newCaf() is
280 called on a dynamically-loaded CAF, we add it to the CAF list
281 instead of the old-generation mutable list, and save away its
282 old info pointer (in caf->saved_info) for later reversion.
284 To revert all the CAFs, we traverse the CAF list and reset the
285 info pointer to caf->saved_info, then throw away the CAF list.
286 (see GC.c:revertCAFs()).
290 -------------------------------------------------------------------------- */
293 newCAF(StgClosure* caf)
295 /* Put this CAF on the mutable list for the old generation.
296 * This is a HACK - the IND_STATIC closure doesn't really have
297 * a mut_link field, but we pretend it has - in fact we re-use
298 * the STATIC_LINK field for the time being, because when we
299 * come to do a major GC we won't need the mut_link field
300 * any more and can use it as a STATIC_LINK.
304 if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) {
305 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
306 ((StgIndStatic *)caf)->static_link = caf_list;
309 ((StgIndStatic *)caf)->saved_info = NULL;
310 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
311 oldest_gen->mut_once_list = (StgMutClosure *)caf;
317 /* If we are PAR or DIST then we never forget a CAF */
319 //belch("<##> Globalising CAF %08x %s",caf,info_type(caf));
320 newGA=makeGlobal(caf,rtsTrue); /*given full weight*/
326 /* -----------------------------------------------------------------------------
328 -------------------------------------------------------------------------- */
331 allocNurseries( void )
340 for (cap = free_capabilities; cap != NULL; cap = cap->link) {
341 cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
342 cap->r.rCurrentNursery = cap->r.rNursery;
343 for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
344 bd->u.back = (bdescr *)cap;
347 /* Set the back links to be equal to the Capability,
348 * so we can do slightly better informed locking.
352 g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
353 g0s0->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
354 g0s0->to_blocks = NULL;
355 g0s0->n_to_blocks = 0;
356 MainCapability.r.rNursery = g0s0->blocks;
357 MainCapability.r.rCurrentNursery = g0s0->blocks;
358 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
363 resetNurseries( void )
369 /* All tasks must be stopped */
370 ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes);
372 for (cap = free_capabilities; cap != NULL; cap = cap->link) {
373 for (bd = cap->r.rNursery; bd; bd = bd->link) {
374 bd->free = bd->start;
375 ASSERT(bd->gen_no == 0);
376 ASSERT(bd->step == g0s0);
377 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
379 cap->r.rCurrentNursery = cap->r.rNursery;
382 for (bd = g0s0->blocks; bd; bd = bd->link) {
383 bd->free = bd->start;
384 ASSERT(bd->gen_no == 0);
385 ASSERT(bd->step == g0s0);
386 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
388 MainCapability.r.rNursery = g0s0->blocks;
389 MainCapability.r.rCurrentNursery = g0s0->blocks;
394 allocNursery (bdescr *tail, nat blocks)
399 // Allocate a nursery: we allocate fresh blocks one at a time and
400 // cons them on to the front of the list, not forgetting to update
401 // the back pointer on the tail of the list to point to the new block.
402 for (i=0; i < blocks; i++) {
405 processNursery() in LdvProfile.c assumes that every block group in
406 the nursery contains only a single block. So, if a block group is
407 given multiple blocks, change processNursery() accordingly.
411 // double-link the nursery: we might need to insert blocks
418 bd->free = bd->start;
426 resizeNursery ( nat blocks )
432 barf("resizeNursery: can't resize in SMP mode");
435 nursery_blocks = g0s0->n_blocks;
436 if (nursery_blocks == blocks) {
440 else if (nursery_blocks < blocks) {
441 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
443 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
449 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
453 while (nursery_blocks > blocks) {
455 next_bd->u.back = NULL;
456 nursery_blocks -= bd->blocks; // might be a large block
461 // might have gone just under, by freeing a large block, so make
462 // up the difference.
463 if (nursery_blocks < blocks) {
464 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
468 g0s0->n_blocks = blocks;
469 ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks);
472 /* -----------------------------------------------------------------------------
473 The allocate() interface
475 allocate(n) always succeeds, and returns a chunk of memory n words
476 long. n can be larger than the size of a block if necessary, in
477 which case a contiguous block group will be allocated.
478 -------------------------------------------------------------------------- */
488 TICK_ALLOC_HEAP_NOCTR(n);
491 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
492 /* ToDo: allocate directly into generation 1 */
493 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
494 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
495 bd = allocGroup(req_blocks);
496 dbl_link_onto(bd, &g0s0->large_objects);
499 bd->flags = BF_LARGE;
500 bd->free = bd->start;
501 /* don't add these blocks to alloc_blocks, since we're assuming
502 * that large objects are likely to remain live for quite a while
503 * (eg. running threads), so garbage collecting early won't make
506 alloc_blocks += req_blocks;
510 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
511 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
512 if (small_alloc_list) {
513 small_alloc_list->free = alloc_Hp;
516 bd->link = small_alloc_list;
517 small_alloc_list = bd;
521 alloc_Hp = bd->start;
522 alloc_HpLim = bd->start + BLOCK_SIZE_W;
533 allocated_bytes( void )
535 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
538 /* ---------------------------------------------------------------------------
539 Allocate a fixed/pinned object.
541 We allocate small pinned objects into a single block, allocating a
542 new block when the current one overflows. The block is chained
543 onto the large_object_list of generation 0 step 0.
545 NOTE: The GC can't in general handle pinned objects. This
546 interface is only safe to use for ByteArrays, which have no
547 pointers and don't require scavenging. It works because the
548 block's descriptor has the BF_LARGE flag set, so the block is
549 treated as a large object and chained onto various lists, rather
550 than the individual objects being copied. However, when it comes
551 to scavenge the block, the GC will only scavenge the first object.
552 The reason is that the GC can't linearly scan a block of pinned
553 objects at the moment (doing so would require using the
554 mostly-copying techniques). But since we're restricting ourselves
555 to pinned ByteArrays, not scavenging is ok.
557 This function is called by newPinnedByteArray# which immediately
558 fills the allocated memory with a MutableByteArray#.
559 ------------------------------------------------------------------------- */
562 allocatePinned( nat n )
565 bdescr *bd = pinned_object_block;
569 TICK_ALLOC_HEAP_NOCTR(n);
572 // If the request is for a large object, then allocate()
573 // will give us a pinned object anyway.
574 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
579 // If we don't have a block of pinned objects yet, or the current
580 // one isn't large enough to hold the new object, allocate a new one.
581 if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
582 pinned_object_block = bd = allocBlock();
583 dbl_link_onto(bd, &g0s0->large_objects);
586 bd->flags = BF_LARGE;
587 bd->free = bd->start;
597 /* -----------------------------------------------------------------------------
598 Allocation functions for GMP.
600 These all use the allocate() interface - we can't have any garbage
601 collection going on during a gmp operation, so we use allocate()
602 which always succeeds. The gmp operations which might need to
603 allocate will ask the storage manager (via doYouWantToGC()) whether
604 a garbage collection is required, in case we get into a loop doing
605 only allocate() style allocation.
606 -------------------------------------------------------------------------- */
609 stgAllocForGMP (size_t size_in_bytes)
612 nat data_size_in_words, total_size_in_words;
614 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
615 ASSERT(size_in_bytes % sizeof(W_) == 0);
617 data_size_in_words = size_in_bytes / sizeof(W_);
618 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
620 /* allocate and fill it in. */
621 arr = (StgArrWords *)allocate(total_size_in_words);
622 SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
624 /* and return a ptr to the goods inside the array */
625 return(BYTE_ARR_CTS(arr));
629 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
631 void *new_stuff_ptr = stgAllocForGMP(new_size);
633 char *p = (char *) ptr;
634 char *q = (char *) new_stuff_ptr;
636 for (; i < old_size; i++, p++, q++) {
640 return(new_stuff_ptr);
644 stgDeallocForGMP (void *ptr STG_UNUSED,
645 size_t size STG_UNUSED)
647 /* easy for us: the garbage collector does the dealloc'n */
650 /* -----------------------------------------------------------------------------
652 * -------------------------------------------------------------------------- */
654 /* -----------------------------------------------------------------------------
657 * Approximate how much we've allocated: number of blocks in the
658 * nursery + blocks allocated via allocate() - unused nusery blocks.
659 * This leaves a little slop at the end of each block, and doesn't
660 * take into account large objects (ToDo).
661 * -------------------------------------------------------------------------- */
664 calcAllocated( void )
672 /* All tasks must be stopped. Can't assert that all the
673 capabilities are owned by the scheduler, though: one or more
674 tasks might have been stopped while they were running (non-main)
676 /* ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); */
679 n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
682 for (cap = free_capabilities; cap != NULL; cap = cap->link) {
683 for ( bd = cap->r.rCurrentNursery->link; bd != NULL; bd = bd->link ) {
684 allocated -= BLOCK_SIZE_W;
686 if (cap->r.rCurrentNursery->free < cap->r.rCurrentNursery->start
688 allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
689 - cap->r.rCurrentNursery->free;
694 bdescr *current_nursery = MainCapability.r.rCurrentNursery;
696 allocated = (g0s0->n_blocks * BLOCK_SIZE_W) + allocated_bytes();
697 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
698 allocated -= BLOCK_SIZE_W;
700 if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
701 allocated -= (current_nursery->start + BLOCK_SIZE_W)
702 - current_nursery->free;
706 total_allocated += allocated;
710 /* Approximate the amount of live data in the heap. To be called just
711 * after garbage collection (see GarbageCollect()).
720 if (RtsFlags.GcFlags.generations == 1) {
721 live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W +
722 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
726 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
727 for (s = 0; s < generations[g].n_steps; s++) {
728 /* approximate amount of live data (doesn't take into account slop
729 * at end of each block).
731 if (g == 0 && s == 0) {
734 stp = &generations[g].steps[s];
735 live += (stp->n_large_blocks + stp->n_blocks - 1) * BLOCK_SIZE_W;
736 if (stp->hp_bd != NULL) {
737 live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start)
745 /* Approximate the number of blocks that will be needed at the next
746 * garbage collection.
748 * Assume: all data currently live will remain live. Steps that will
749 * be collected next time will therefore need twice as many blocks
750 * since all the data will be copied.
759 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
760 for (s = 0; s < generations[g].n_steps; s++) {
761 if (g == 0 && s == 0) { continue; }
762 stp = &generations[g].steps[s];
763 if (generations[g].steps[0].n_blocks +
764 generations[g].steps[0].n_large_blocks
765 > generations[g].max_blocks
766 && stp->is_compacted == 0) {
767 needed += 2 * stp->n_blocks;
769 needed += stp->n_blocks;
776 /* -----------------------------------------------------------------------------
779 memInventory() checks for memory leaks by counting up all the
780 blocks we know about and comparing that to the number of blocks
781 allegedly floating around in the system.
782 -------------------------------------------------------------------------- */
792 lnat total_blocks = 0, free_blocks = 0;
794 /* count the blocks we current have */
796 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
797 for (s = 0; s < generations[g].n_steps; s++) {
798 stp = &generations[g].steps[s];
799 total_blocks += stp->n_blocks;
800 if (RtsFlags.GcFlags.generations == 1) {
801 /* two-space collector has a to-space too :-) */
802 total_blocks += g0s0->n_to_blocks;
804 for (bd = stp->large_objects; bd; bd = bd->link) {
805 total_blocks += bd->blocks;
806 /* hack for megablock groups: they have an extra block or two in
807 the second and subsequent megablocks where the block
808 descriptors would normally go.
810 if (bd->blocks > BLOCKS_PER_MBLOCK) {
811 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
812 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
818 /* any blocks held by allocate() */
819 for (bd = small_alloc_list; bd; bd = bd->link) {
820 total_blocks += bd->blocks;
822 for (bd = large_alloc_list; bd; bd = bd->link) {
823 total_blocks += bd->blocks;
827 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
828 for (bd = firstStack; bd != NULL; bd = bd->link)
829 total_blocks += bd->blocks;
833 // count the blocks allocated by the arena allocator
834 total_blocks += arenaBlocks();
836 /* count the blocks on the free list */
837 free_blocks = countFreeList();
839 if (total_blocks + free_blocks != mblocks_allocated *
841 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
842 total_blocks, free_blocks, total_blocks + free_blocks,
843 mblocks_allocated * BLOCKS_PER_MBLOCK);
846 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
851 countBlocks(bdescr *bd)
854 for (n=0; bd != NULL; bd=bd->link) {
860 /* Full heap sanity check. */
866 if (RtsFlags.GcFlags.generations == 1) {
867 checkHeap(g0s0->to_blocks);
868 checkChain(g0s0->large_objects);
871 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
872 for (s = 0; s < generations[g].n_steps; s++) {
873 ASSERT(countBlocks(generations[g].steps[s].blocks)
874 == generations[g].steps[s].n_blocks);
875 ASSERT(countBlocks(generations[g].steps[s].large_objects)
876 == generations[g].steps[s].n_large_blocks);
877 if (g == 0 && s == 0) { continue; }
878 checkHeap(generations[g].steps[s].blocks);
879 checkChain(generations[g].steps[s].large_objects);
881 checkMutableList(generations[g].mut_list, g);
882 checkMutOnceList(generations[g].mut_once_list, g);
886 checkFreeListSanity();
890 // handy function for use in gdb, because Bdescr() is inlined.
891 extern bdescr *_bdescr( StgPtr p );