1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Storage manager front end
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
15 #include "BlockAlloc.h"
20 #include "OSThreads.h"
21 #include "Capability.h"
24 #include "RetainerProfile.h" // for counting memory blocks (memInventory)
25 #include "StoragePriv.h"
30 StgClosure *caf_list = NULL;
31 StgClosure *revertible_caf_list = NULL;
34 bdescr *small_alloc_list; /* allocate()d small objects */
35 bdescr *pinned_object_block; /* allocate pinned objects into this block */
36 nat alloc_blocks; /* number of allocate()d blocks since GC */
37 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
39 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
40 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
42 generation *generations = NULL; /* all the generations */
43 generation *g0 = NULL; /* generation 0, for convenience */
44 generation *oldest_gen = NULL; /* oldest generation, for convenience */
45 step *g0s0 = NULL; /* generation 0, step 0, for convenience */
47 ullong total_allocated = 0; /* total memory allocated during run */
49 nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */
50 step *nurseries = NULL; /* array of nurseries, >1 only if SMP */
53 * Storage manager mutex: protects all the above state from
54 * simultaneous access by two STG threads.
57 Mutex sm_mutex = INIT_MUTEX_VAR;
63 static void *stgAllocForGMP (size_t size_in_bytes);
64 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
65 static void stgDeallocForGMP (void *ptr, size_t size);
68 initStep (step *stp, int g, int s)
74 stp->gen = &generations[g];
81 stp->large_objects = NULL;
82 stp->n_large_blocks = 0;
83 stp->new_large_objects = NULL;
84 stp->scavenged_large_objects = NULL;
85 stp->n_scavenged_large_blocks = 0;
86 stp->is_compacted = 0;
96 if (generations != NULL) {
97 // multi-init protection
101 /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
102 * doing something reasonable.
104 ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info));
105 ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
106 ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
108 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
109 RtsFlags.GcFlags.heapSizeSuggestion >
110 RtsFlags.GcFlags.maxHeapSize) {
111 RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
114 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
115 RtsFlags.GcFlags.minAllocAreaSize >
116 RtsFlags.GcFlags.maxHeapSize) {
117 errorBelch("maximum heap size (-M) is smaller than minimum alloc area size (-A)");
121 initBlockAllocator();
124 initMutex(&sm_mutex);
127 /* allocate generation info array */
128 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
129 * sizeof(struct generation_),
130 "initStorage: gens");
132 /* Initialise all generations */
133 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
134 gen = &generations[g];
136 gen->mut_list = allocBlock();
137 gen->collections = 0;
138 gen->failed_promotions = 0;
142 /* A couple of convenience pointers */
143 g0 = &generations[0];
144 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
146 /* Allocate step structures in each generation */
147 if (RtsFlags.GcFlags.generations > 1) {
148 /* Only for multiple-generations */
150 /* Oldest generation: one step */
151 oldest_gen->n_steps = 1;
153 stgMallocBytes(1 * sizeof(struct step_), "initStorage: last step");
155 /* set up all except the oldest generation with 2 steps */
156 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
157 generations[g].n_steps = RtsFlags.GcFlags.steps;
158 generations[g].steps =
159 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct step_),
160 "initStorage: steps");
164 /* single generation, i.e. a two-space collector */
166 g0->steps = stgMallocBytes (sizeof(struct step_), "initStorage: steps");
170 n_nurseries = RtsFlags.ParFlags.nNodes;
171 nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
172 "initStorage: nurseries");
175 nurseries = g0->steps; // just share nurseries[0] with g0s0
178 /* Initialise all steps */
179 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
180 for (s = 0; s < generations[g].n_steps; s++) {
181 initStep(&generations[g].steps[s], g, s);
186 for (s = 0; s < n_nurseries; s++) {
187 initStep(&nurseries[s], 0, s);
191 /* Set up the destination pointers in each younger gen. step */
192 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
193 for (s = 0; s < generations[g].n_steps-1; s++) {
194 generations[g].steps[s].to = &generations[g].steps[s+1];
196 generations[g].steps[s].to = &generations[g+1].steps[0];
198 oldest_gen->steps[0].to = &oldest_gen->steps[0];
201 for (s = 0; s < n_nurseries; s++) {
202 nurseries[s].to = generations[0].steps[0].to;
206 /* The oldest generation has one step. */
207 if (RtsFlags.GcFlags.compact) {
208 if (RtsFlags.GcFlags.generations == 1) {
209 errorBelch("WARNING: compaction is incompatible with -G1; disabled");
211 oldest_gen->steps[0].is_compacted = 1;
216 if (RtsFlags.GcFlags.generations == 1) {
217 errorBelch("-G1 is incompatible with SMP");
221 if (RtsFlags.GcFlags.heapSizeSuggestion > 0) {
222 errorBelch("-H<size> is incompatible with SMP");
227 /* generation 0 is special: that's the nursery */
228 generations[0].max_blocks = 0;
230 /* G0S0: the allocation area. Policy: keep the allocation area
231 * small to begin with, even if we have a large suggested heap
232 * size. Reason: we're going to do a major collection first, and we
233 * don't want it to be a big one. This vague idea is borne out by
234 * rigorous experimental evidence.
236 g0s0 = &generations[0].steps[0];
240 weak_ptr_list = NULL;
242 revertible_caf_list = NULL;
244 /* initialise the allocate() interface */
245 small_alloc_list = NULL;
247 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
249 /* Tell GNU multi-precision pkg about our custom alloc functions */
250 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
252 IF_DEBUG(gc, statDescribeGens());
258 stat_exit(calcAllocated());
261 /* -----------------------------------------------------------------------------
264 The entry code for every CAF does the following:
266 - builds a CAF_BLACKHOLE in the heap
267 - pushes an update frame pointing to the CAF_BLACKHOLE
268 - invokes UPD_CAF(), which:
269 - calls newCaf, below
270 - updates the CAF with a static indirection to the CAF_BLACKHOLE
272 Why do we build a BLACKHOLE in the heap rather than just updating
273 the thunk directly? It's so that we only need one kind of update
274 frame - otherwise we'd need a static version of the update frame too.
276 newCaf() does the following:
278 - it puts the CAF on the oldest generation's mut-once list.
279 This is so that we can treat the CAF as a root when collecting
282 For GHCI, we have additional requirements when dealing with CAFs:
284 - we must *retain* all dynamically-loaded CAFs ever entered,
285 just in case we need them again.
286 - we must be able to *revert* CAFs that have been evaluated, to
287 their pre-evaluated form.
289 To do this, we use an additional CAF list. When newCaf() is
290 called on a dynamically-loaded CAF, we add it to the CAF list
291 instead of the old-generation mutable list, and save away its
292 old info pointer (in caf->saved_info) for later reversion.
294 To revert all the CAFs, we traverse the CAF list and reset the
295 info pointer to caf->saved_info, then throw away the CAF list.
296 (see GC.c:revertCAFs()).
300 -------------------------------------------------------------------------- */
303 newCAF(StgClosure* caf)
310 // If we are in GHCi _and_ we are using dynamic libraries,
311 // then we can't redirect newCAF calls to newDynCAF (see below),
312 // so we make newCAF behave almost like newDynCAF.
313 // The dynamic libraries might be used by both the interpreted
314 // program and GHCi itself, so they must not be reverted.
315 // This also means that in GHCi with dynamic libraries, CAFs are not
316 // garbage collected. If this turns out to be a problem, we could
317 // do another hack here and do an address range test on caf to figure
318 // out whether it is from a dynamic library.
319 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
320 ((StgIndStatic *)caf)->static_link = caf_list;
325 /* Put this CAF on the mutable list for the old generation.
326 * This is a HACK - the IND_STATIC closure doesn't really have
327 * a mut_link field, but we pretend it has - in fact we re-use
328 * the STATIC_LINK field for the time being, because when we
329 * come to do a major GC we won't need the mut_link field
330 * any more and can use it as a STATIC_LINK.
332 ((StgIndStatic *)caf)->saved_info = NULL;
333 recordMutableGen(caf, oldest_gen);
339 /* If we are PAR or DIST then we never forget a CAF */
341 //debugBelch("<##> Globalising CAF %08x %s",caf,info_type(caf));
342 newGA=makeGlobal(caf,rtsTrue); /*given full weight*/
348 // An alternate version of newCaf which is used for dynamically loaded
349 // object code in GHCi. In this case we want to retain *all* CAFs in
350 // the object code, because they might be demanded at any time from an
351 // expression evaluated on the command line.
352 // Also, GHCi might want to revert CAFs, so we add these to the
353 // revertible_caf_list.
355 // The linker hackily arranges that references to newCaf from dynamic
356 // code end up pointing to newDynCAF.
358 newDynCAF(StgClosure *caf)
362 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
363 ((StgIndStatic *)caf)->static_link = revertible_caf_list;
364 revertible_caf_list = caf;
369 /* -----------------------------------------------------------------------------
371 -------------------------------------------------------------------------- */
374 allocNursery (step *stp, bdescr *tail, nat blocks)
379 // Allocate a nursery: we allocate fresh blocks one at a time and
380 // cons them on to the front of the list, not forgetting to update
381 // the back pointer on the tail of the list to point to the new block.
382 for (i=0; i < blocks; i++) {
385 processNursery() in LdvProfile.c assumes that every block group in
386 the nursery contains only a single block. So, if a block group is
387 given multiple blocks, change processNursery() accordingly.
391 // double-link the nursery: we might need to insert blocks
398 bd->free = bd->start;
406 assignNurseriesToCapabilities (void)
411 for (i = 0; i < n_nurseries; i++) {
412 capabilities[i].r.rNursery = &nurseries[i];
413 capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
416 MainCapability.r.rNursery = &nurseries[0];
417 MainCapability.r.rCurrentNursery = nurseries[0].blocks;
422 allocNurseries( void )
426 for (i = 0; i < n_nurseries; i++) {
427 nurseries[i].blocks =
428 allocNursery(&nurseries[i], NULL,
429 RtsFlags.GcFlags.minAllocAreaSize);
430 nurseries[i].n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
431 nurseries[i].to_blocks = NULL;
432 nurseries[i].n_to_blocks = 0;
433 /* hp, hpLim, hp_bd, to_space etc. aren't used in the nursery */
435 assignNurseriesToCapabilities();
439 resetNurseries( void )
445 for (i = 0; i < n_nurseries; i++) {
447 for (bd = stp->blocks; bd; bd = bd->link) {
448 bd->free = bd->start;
449 ASSERT(bd->gen_no == 0);
450 ASSERT(bd->step == stp);
451 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
454 assignNurseriesToCapabilities();
458 countNurseryBlocks (void)
463 for (i = 0; i < n_nurseries; i++) {
464 blocks += nurseries[i].n_blocks;
470 resizeNursery ( step *stp, nat blocks )
475 nursery_blocks = stp->n_blocks;
476 if (nursery_blocks == blocks) return;
478 if (nursery_blocks < blocks) {
479 IF_DEBUG(gc, debugBelch("Increasing size of nursery to %d blocks\n",
481 stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
486 IF_DEBUG(gc, debugBelch("Decreasing size of nursery to %d blocks\n",
490 while (nursery_blocks > blocks) {
492 next_bd->u.back = NULL;
493 nursery_blocks -= bd->blocks; // might be a large block
498 // might have gone just under, by freeing a large block, so make
499 // up the difference.
500 if (nursery_blocks < blocks) {
501 stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
505 stp->n_blocks = blocks;
506 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
510 // Resize each of the nurseries to the specified size.
513 resizeNurseries (nat blocks)
516 for (i = 0; i < n_nurseries; i++) {
517 resizeNursery(&nurseries[i], blocks);
521 /* -----------------------------------------------------------------------------
522 The allocate() interface
524 allocate(n) always succeeds, and returns a chunk of memory n words
525 long. n can be larger than the size of a block if necessary, in
526 which case a contiguous block group will be allocated.
527 -------------------------------------------------------------------------- */
537 TICK_ALLOC_HEAP_NOCTR(n);
540 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
541 /* ToDo: allocate directly into generation 1 */
542 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
543 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
544 bd = allocGroup(req_blocks);
545 dbl_link_onto(bd, &g0s0->large_objects);
546 g0s0->n_large_blocks += req_blocks;
549 bd->flags = BF_LARGE;
550 bd->free = bd->start + n;
551 alloc_blocks += req_blocks;
555 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
556 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
557 if (small_alloc_list) {
558 small_alloc_list->free = alloc_Hp;
561 bd->link = small_alloc_list;
562 small_alloc_list = bd;
566 alloc_Hp = bd->start;
567 alloc_HpLim = bd->start + BLOCK_SIZE_W;
578 allocated_bytes( void )
582 allocated = alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp);
583 if (pinned_object_block != NULL) {
584 allocated -= (pinned_object_block->start + BLOCK_SIZE_W) -
585 pinned_object_block->free;
592 tidyAllocateLists (void)
594 if (small_alloc_list != NULL) {
595 ASSERT(alloc_Hp >= small_alloc_list->start &&
596 alloc_Hp <= small_alloc_list->start + BLOCK_SIZE);
597 small_alloc_list->free = alloc_Hp;
601 /* ---------------------------------------------------------------------------
602 Allocate a fixed/pinned object.
604 We allocate small pinned objects into a single block, allocating a
605 new block when the current one overflows. The block is chained
606 onto the large_object_list of generation 0 step 0.
608 NOTE: The GC can't in general handle pinned objects. This
609 interface is only safe to use for ByteArrays, which have no
610 pointers and don't require scavenging. It works because the
611 block's descriptor has the BF_LARGE flag set, so the block is
612 treated as a large object and chained onto various lists, rather
613 than the individual objects being copied. However, when it comes
614 to scavenge the block, the GC will only scavenge the first object.
615 The reason is that the GC can't linearly scan a block of pinned
616 objects at the moment (doing so would require using the
617 mostly-copying techniques). But since we're restricting ourselves
618 to pinned ByteArrays, not scavenging is ok.
620 This function is called by newPinnedByteArray# which immediately
621 fills the allocated memory with a MutableByteArray#.
622 ------------------------------------------------------------------------- */
625 allocatePinned( nat n )
628 bdescr *bd = pinned_object_block;
630 // If the request is for a large object, then allocate()
631 // will give us a pinned object anyway.
632 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
638 TICK_ALLOC_HEAP_NOCTR(n);
641 // we always return 8-byte aligned memory. bd->free must be
642 // 8-byte aligned to begin with, so we just round up n to
643 // the nearest multiple of 8 bytes.
644 if (sizeof(StgWord) == 4) {
648 // If we don't have a block of pinned objects yet, or the current
649 // one isn't large enough to hold the new object, allocate a new one.
650 if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
651 pinned_object_block = bd = allocBlock();
652 dbl_link_onto(bd, &g0s0->large_objects);
655 bd->flags = BF_PINNED | BF_LARGE;
656 bd->free = bd->start;
666 /* -----------------------------------------------------------------------------
667 Allocation functions for GMP.
669 These all use the allocate() interface - we can't have any garbage
670 collection going on during a gmp operation, so we use allocate()
671 which always succeeds. The gmp operations which might need to
672 allocate will ask the storage manager (via doYouWantToGC()) whether
673 a garbage collection is required, in case we get into a loop doing
674 only allocate() style allocation.
675 -------------------------------------------------------------------------- */
678 stgAllocForGMP (size_t size_in_bytes)
681 nat data_size_in_words, total_size_in_words;
683 /* round up to a whole number of words */
684 data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_);
685 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
687 /* allocate and fill it in. */
688 arr = (StgArrWords *)allocate(total_size_in_words);
689 SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
691 /* and return a ptr to the goods inside the array */
696 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
698 void *new_stuff_ptr = stgAllocForGMP(new_size);
700 char *p = (char *) ptr;
701 char *q = (char *) new_stuff_ptr;
703 for (; i < old_size; i++, p++, q++) {
707 return(new_stuff_ptr);
711 stgDeallocForGMP (void *ptr STG_UNUSED,
712 size_t size STG_UNUSED)
714 /* easy for us: the garbage collector does the dealloc'n */
717 /* -----------------------------------------------------------------------------
719 * -------------------------------------------------------------------------- */
721 /* -----------------------------------------------------------------------------
724 * Approximate how much we've allocated: number of blocks in the
725 * nursery + blocks allocated via allocate() - unused nusery blocks.
726 * This leaves a little slop at the end of each block, and doesn't
727 * take into account large objects (ToDo).
728 * -------------------------------------------------------------------------- */
731 calcAllocated( void )
737 allocated = allocated_bytes();
738 for (i = 0; i < n_nurseries; i++) {
739 allocated += nurseries[i].n_blocks * BLOCK_SIZE_W;
743 for (i = 0; i < n_nurseries; i++) {
745 for ( bd = capabilities[i].r.rCurrentNursery;
746 bd != NULL; bd = bd->link ) {
747 allocated -= BLOCK_SIZE_W;
749 cap = &capabilities[i];
750 if (cap->r.rCurrentNursery->free <
751 cap->r.rCurrentNursery->start + BLOCK_SIZE_W) {
752 allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
753 - cap->r.rCurrentNursery->free;
757 bdescr *current_nursery = MainCapability.r.rCurrentNursery;
759 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
760 allocated -= BLOCK_SIZE_W;
762 if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
763 allocated -= (current_nursery->start + BLOCK_SIZE_W)
764 - current_nursery->free;
768 total_allocated += allocated;
772 /* Approximate the amount of live data in the heap. To be called just
773 * after garbage collection (see GarbageCollect()).
782 if (RtsFlags.GcFlags.generations == 1) {
783 live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W +
784 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
788 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
789 for (s = 0; s < generations[g].n_steps; s++) {
790 /* approximate amount of live data (doesn't take into account slop
791 * at end of each block).
793 if (g == 0 && s == 0) {
796 stp = &generations[g].steps[s];
797 live += (stp->n_large_blocks + stp->n_blocks - 1) * BLOCK_SIZE_W;
798 if (stp->hp_bd != NULL) {
799 live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start)
807 /* Approximate the number of blocks that will be needed at the next
808 * garbage collection.
810 * Assume: all data currently live will remain live. Steps that will
811 * be collected next time will therefore need twice as many blocks
812 * since all the data will be copied.
821 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
822 for (s = 0; s < generations[g].n_steps; s++) {
823 if (g == 0 && s == 0) { continue; }
824 stp = &generations[g].steps[s];
825 if (generations[g].steps[0].n_blocks +
826 generations[g].steps[0].n_large_blocks
827 > generations[g].max_blocks
828 && stp->is_compacted == 0) {
829 needed += 2 * stp->n_blocks;
831 needed += stp->n_blocks;
838 /* -----------------------------------------------------------------------------
841 memInventory() checks for memory leaks by counting up all the
842 blocks we know about and comparing that to the number of blocks
843 allegedly floating around in the system.
844 -------------------------------------------------------------------------- */
849 stepBlocks (step *stp)
854 total_blocks = stp->n_blocks;
855 for (bd = stp->large_objects; bd; bd = bd->link) {
856 total_blocks += bd->blocks;
857 /* hack for megablock groups: they have an extra block or two in
858 the second and subsequent megablocks where the block
859 descriptors would normally go.
861 if (bd->blocks > BLOCKS_PER_MBLOCK) {
862 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
863 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
875 lnat total_blocks = 0, free_blocks = 0;
877 /* count the blocks we current have */
879 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
880 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
881 total_blocks += bd->blocks;
883 for (s = 0; s < generations[g].n_steps; s++) {
884 if (g==0 && s==0) continue;
885 stp = &generations[g].steps[s];
886 total_blocks += stepBlocks(stp);
890 for (i = 0; i < n_nurseries; i++) {
891 total_blocks += stepBlocks(&nurseries[i]);
894 if (RtsFlags.GcFlags.generations == 1) {
895 /* two-space collector has a to-space too :-) */
896 total_blocks += g0s0->n_to_blocks;
899 /* any blocks held by allocate() */
900 for (bd = small_alloc_list; bd; bd = bd->link) {
901 total_blocks += bd->blocks;
905 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
906 total_blocks += retainerStackBlocks();
910 // count the blocks allocated by the arena allocator
911 total_blocks += arenaBlocks();
913 /* count the blocks on the free list */
914 free_blocks = countFreeList();
916 if (total_blocks + free_blocks != mblocks_allocated *
918 debugBelch("Blocks: %ld live + %ld free = %ld total (%ld around)\n",
919 total_blocks, free_blocks, total_blocks + free_blocks,
920 mblocks_allocated * BLOCKS_PER_MBLOCK);
923 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
928 countBlocks(bdescr *bd)
931 for (n=0; bd != NULL; bd=bd->link) {
937 /* Full heap sanity check. */
943 if (RtsFlags.GcFlags.generations == 1) {
944 checkHeap(g0s0->to_blocks);
945 checkChain(g0s0->large_objects);
948 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
949 for (s = 0; s < generations[g].n_steps; s++) {
950 if (g == 0 && s == 0) { continue; }
951 ASSERT(countBlocks(generations[g].steps[s].blocks)
952 == generations[g].steps[s].n_blocks);
953 ASSERT(countBlocks(generations[g].steps[s].large_objects)
954 == generations[g].steps[s].n_large_blocks);
955 checkHeap(generations[g].steps[s].blocks);
956 checkChain(generations[g].steps[s].large_objects);
958 checkMutableList(generations[g].mut_list, g);
963 for (s = 0; s < n_nurseries; s++) {
964 ASSERT(countBlocks(nurseries[s].blocks)
965 == nurseries[s].n_blocks);
966 ASSERT(countBlocks(nurseries[s].large_objects)
967 == nurseries[s].n_large_blocks);
970 checkFreeListSanity();
974 // handy function for use in gdb, because Bdescr() is inlined.
975 extern bdescr *_bdescr( StgPtr p );