1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Storage manager front end
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
15 #include "BlockAlloc.h"
20 #include "OSThreads.h"
21 #include "Capability.h"
24 #include "RetainerProfile.h" // for counting memory blocks (memInventory)
29 StgClosure *caf_list = NULL;
30 StgClosure *revertible_caf_list = NULL;
33 bdescr *small_alloc_list; /* allocate()d small objects */
34 bdescr *pinned_object_block; /* allocate pinned objects into this block */
35 nat alloc_blocks; /* number of allocate()d blocks since GC */
36 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
38 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
39 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
41 generation *generations = NULL; /* all the generations */
42 generation *g0 = NULL; /* generation 0, for convenience */
43 generation *oldest_gen = NULL; /* oldest generation, for convenience */
44 step *g0s0 = NULL; /* generation 0, step 0, for convenience */
46 ullong total_allocated = 0; /* total memory allocated during run */
48 nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */
49 step *nurseries = NULL; /* array of nurseries, >1 only if SMP */
52 * Storage manager mutex: protects all the above state from
53 * simultaneous access by two STG threads.
56 Mutex sm_mutex = INIT_MUTEX_VAR;
62 static void *stgAllocForGMP (size_t size_in_bytes);
63 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
64 static void stgDeallocForGMP (void *ptr, size_t size);
67 * Storage manager mutex
70 extern Mutex sm_mutex;
71 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
72 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
74 #define ACQUIRE_SM_LOCK
75 #define RELEASE_SM_LOCK
79 initStep (step *stp, int g, int s)
85 stp->gen = &generations[g];
92 stp->large_objects = NULL;
93 stp->n_large_blocks = 0;
94 stp->new_large_objects = NULL;
95 stp->scavenged_large_objects = NULL;
96 stp->n_scavenged_large_blocks = 0;
97 stp->is_compacted = 0;
107 if (generations != NULL) {
108 // multi-init protection
112 /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
113 * doing something reasonable.
115 ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info));
116 ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
117 ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
119 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
120 RtsFlags.GcFlags.heapSizeSuggestion >
121 RtsFlags.GcFlags.maxHeapSize) {
122 RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
125 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
126 RtsFlags.GcFlags.minAllocAreaSize >
127 RtsFlags.GcFlags.maxHeapSize) {
128 errorBelch("maximum heap size (-M) is smaller than minimum alloc area size (-A)");
132 initBlockAllocator();
135 initMutex(&sm_mutex);
138 /* allocate generation info array */
139 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
140 * sizeof(struct generation_),
141 "initStorage: gens");
143 /* Initialise all generations */
144 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
145 gen = &generations[g];
147 gen->mut_list = allocBlock();
148 gen->collections = 0;
149 gen->failed_promotions = 0;
153 /* A couple of convenience pointers */
154 g0 = &generations[0];
155 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
157 /* Allocate step structures in each generation */
158 if (RtsFlags.GcFlags.generations > 1) {
159 /* Only for multiple-generations */
161 /* Oldest generation: one step */
162 oldest_gen->n_steps = 1;
164 stgMallocBytes(1 * sizeof(struct step_), "initStorage: last step");
166 /* set up all except the oldest generation with 2 steps */
167 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
168 generations[g].n_steps = RtsFlags.GcFlags.steps;
169 generations[g].steps =
170 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct step_),
171 "initStorage: steps");
175 /* single generation, i.e. a two-space collector */
177 g0->steps = stgMallocBytes (sizeof(struct step_), "initStorage: steps");
181 n_nurseries = RtsFlags.ParFlags.nNodes;
182 nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
183 "initStorage: nurseries");
186 nurseries = g0->steps; // just share nurseries[0] with g0s0
189 /* Initialise all steps */
190 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
191 for (s = 0; s < generations[g].n_steps; s++) {
192 initStep(&generations[g].steps[s], g, s);
197 for (s = 0; s < n_nurseries; s++) {
198 initStep(&nurseries[s], 0, s);
202 /* Set up the destination pointers in each younger gen. step */
203 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
204 for (s = 0; s < generations[g].n_steps-1; s++) {
205 generations[g].steps[s].to = &generations[g].steps[s+1];
207 generations[g].steps[s].to = &generations[g+1].steps[0];
209 oldest_gen->steps[0].to = &oldest_gen->steps[0];
212 for (s = 0; s < n_nurseries; s++) {
213 nurseries[s].to = generations[0].steps[0].to;
217 /* The oldest generation has one step. */
218 if (RtsFlags.GcFlags.compact) {
219 if (RtsFlags.GcFlags.generations == 1) {
220 errorBelch("WARNING: compaction is incompatible with -G1; disabled");
222 oldest_gen->steps[0].is_compacted = 1;
227 if (RtsFlags.GcFlags.generations == 1) {
228 errorBelch("-G1 is incompatible with SMP");
232 if (RtsFlags.GcFlags.heapSizeSuggestion > 0) {
233 errorBelch("-H<size> is incompatible with SMP");
238 /* generation 0 is special: that's the nursery */
239 generations[0].max_blocks = 0;
241 /* G0S0: the allocation area. Policy: keep the allocation area
242 * small to begin with, even if we have a large suggested heap
243 * size. Reason: we're going to do a major collection first, and we
244 * don't want it to be a big one. This vague idea is borne out by
245 * rigorous experimental evidence.
247 g0s0 = &generations[0].steps[0];
251 weak_ptr_list = NULL;
253 revertible_caf_list = NULL;
255 /* initialise the allocate() interface */
256 small_alloc_list = NULL;
258 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
260 /* Tell GNU multi-precision pkg about our custom alloc functions */
261 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
263 IF_DEBUG(gc, statDescribeGens());
269 stat_exit(calcAllocated());
272 /* -----------------------------------------------------------------------------
275 The entry code for every CAF does the following:
277 - builds a CAF_BLACKHOLE in the heap
278 - pushes an update frame pointing to the CAF_BLACKHOLE
279 - invokes UPD_CAF(), which:
280 - calls newCaf, below
281 - updates the CAF with a static indirection to the CAF_BLACKHOLE
283 Why do we build a BLACKHOLE in the heap rather than just updating
284 the thunk directly? It's so that we only need one kind of update
285 frame - otherwise we'd need a static version of the update frame too.
287 newCaf() does the following:
289 - it puts the CAF on the oldest generation's mut-once list.
290 This is so that we can treat the CAF as a root when collecting
293 For GHCI, we have additional requirements when dealing with CAFs:
295 - we must *retain* all dynamically-loaded CAFs ever entered,
296 just in case we need them again.
297 - we must be able to *revert* CAFs that have been evaluated, to
298 their pre-evaluated form.
300 To do this, we use an additional CAF list. When newCaf() is
301 called on a dynamically-loaded CAF, we add it to the CAF list
302 instead of the old-generation mutable list, and save away its
303 old info pointer (in caf->saved_info) for later reversion.
305 To revert all the CAFs, we traverse the CAF list and reset the
306 info pointer to caf->saved_info, then throw away the CAF list.
307 (see GC.c:revertCAFs()).
311 -------------------------------------------------------------------------- */
314 newCAF(StgClosure* caf)
321 // If we are in GHCi _and_ we are using dynamic libraries,
322 // then we can't redirect newCAF calls to newDynCAF (see below),
323 // so we make newCAF behave almost like newDynCAF.
324 // The dynamic libraries might be used by both the interpreted
325 // program and GHCi itself, so they must not be reverted.
326 // This also means that in GHCi with dynamic libraries, CAFs are not
327 // garbage collected. If this turns out to be a problem, we could
328 // do another hack here and do an address range test on caf to figure
329 // out whether it is from a dynamic library.
330 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
331 ((StgIndStatic *)caf)->static_link = caf_list;
336 /* Put this CAF on the mutable list for the old generation.
337 * This is a HACK - the IND_STATIC closure doesn't really have
338 * a mut_link field, but we pretend it has - in fact we re-use
339 * the STATIC_LINK field for the time being, because when we
340 * come to do a major GC we won't need the mut_link field
341 * any more and can use it as a STATIC_LINK.
343 ((StgIndStatic *)caf)->saved_info = NULL;
344 recordMutableGen(caf, oldest_gen);
350 /* If we are PAR or DIST then we never forget a CAF */
352 //debugBelch("<##> Globalising CAF %08x %s",caf,info_type(caf));
353 newGA=makeGlobal(caf,rtsTrue); /*given full weight*/
359 // An alternate version of newCaf which is used for dynamically loaded
360 // object code in GHCi. In this case we want to retain *all* CAFs in
361 // the object code, because they might be demanded at any time from an
362 // expression evaluated on the command line.
363 // Also, GHCi might want to revert CAFs, so we add these to the
364 // revertible_caf_list.
366 // The linker hackily arranges that references to newCaf from dynamic
367 // code end up pointing to newDynCAF.
369 newDynCAF(StgClosure *caf)
373 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
374 ((StgIndStatic *)caf)->static_link = revertible_caf_list;
375 revertible_caf_list = caf;
380 /* -----------------------------------------------------------------------------
382 -------------------------------------------------------------------------- */
385 allocNursery (step *stp, bdescr *tail, nat blocks)
390 // Allocate a nursery: we allocate fresh blocks one at a time and
391 // cons them on to the front of the list, not forgetting to update
392 // the back pointer on the tail of the list to point to the new block.
393 for (i=0; i < blocks; i++) {
396 processNursery() in LdvProfile.c assumes that every block group in
397 the nursery contains only a single block. So, if a block group is
398 given multiple blocks, change processNursery() accordingly.
402 // double-link the nursery: we might need to insert blocks
409 bd->free = bd->start;
417 assignNurseriesToCapabilities (void)
422 for (i = 0; i < n_nurseries; i++) {
423 capabilities[i].r.rNursery = &nurseries[i];
424 capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
427 MainCapability.r.rNursery = &nurseries[0];
428 MainCapability.r.rCurrentNursery = nurseries[0].blocks;
433 allocNurseries( void )
437 for (i = 0; i < n_nurseries; i++) {
438 nurseries[i].blocks =
439 allocNursery(&nurseries[i], NULL,
440 RtsFlags.GcFlags.minAllocAreaSize);
441 nurseries[i].n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
442 nurseries[i].to_blocks = NULL;
443 nurseries[i].n_to_blocks = 0;
444 /* hp, hpLim, hp_bd, to_space etc. aren't used in the nursery */
446 assignNurseriesToCapabilities();
450 resetNurseries( void )
456 for (i = 0; i < n_nurseries; i++) {
458 for (bd = stp->blocks; bd; bd = bd->link) {
459 bd->free = bd->start;
460 ASSERT(bd->gen_no == 0);
461 ASSERT(bd->step == stp);
462 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
465 assignNurseriesToCapabilities();
469 countNurseryBlocks (void)
474 for (i = 0; i < n_nurseries; i++) {
475 blocks += nurseries[i].n_blocks;
481 resizeNursery ( step *stp, nat blocks )
486 nursery_blocks = stp->n_blocks;
487 if (nursery_blocks == blocks) return;
489 if (nursery_blocks < blocks) {
490 IF_DEBUG(gc, debugBelch("Increasing size of nursery to %d blocks\n",
492 stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
497 IF_DEBUG(gc, debugBelch("Decreasing size of nursery to %d blocks\n",
501 while (nursery_blocks > blocks) {
503 next_bd->u.back = NULL;
504 nursery_blocks -= bd->blocks; // might be a large block
509 // might have gone just under, by freeing a large block, so make
510 // up the difference.
511 if (nursery_blocks < blocks) {
512 stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
516 stp->n_blocks = blocks;
517 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
521 // Resize each of the nurseries to the specified size.
524 resizeNurseries (nat blocks)
527 for (i = 0; i < n_nurseries; i++) {
528 resizeNursery(&nurseries[i], blocks);
532 /* -----------------------------------------------------------------------------
533 The allocate() interface
535 allocate(n) always succeeds, and returns a chunk of memory n words
536 long. n can be larger than the size of a block if necessary, in
537 which case a contiguous block group will be allocated.
538 -------------------------------------------------------------------------- */
548 TICK_ALLOC_HEAP_NOCTR(n);
551 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
552 /* ToDo: allocate directly into generation 1 */
553 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
554 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
555 bd = allocGroup(req_blocks);
556 dbl_link_onto(bd, &g0s0->large_objects);
557 g0s0->n_large_blocks += req_blocks;
560 bd->flags = BF_LARGE;
561 bd->free = bd->start + n;
562 alloc_blocks += req_blocks;
566 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
567 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
568 if (small_alloc_list) {
569 small_alloc_list->free = alloc_Hp;
572 bd->link = small_alloc_list;
573 small_alloc_list = bd;
577 alloc_Hp = bd->start;
578 alloc_HpLim = bd->start + BLOCK_SIZE_W;
589 allocated_bytes( void )
593 allocated = alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp);
594 if (pinned_object_block != NULL) {
595 allocated -= (pinned_object_block->start + BLOCK_SIZE_W) -
596 pinned_object_block->free;
603 tidyAllocateLists (void)
605 if (small_alloc_list != NULL) {
606 ASSERT(alloc_Hp >= small_alloc_list->start &&
607 alloc_Hp <= small_alloc_list->start + BLOCK_SIZE);
608 small_alloc_list->free = alloc_Hp;
612 /* ---------------------------------------------------------------------------
613 Allocate a fixed/pinned object.
615 We allocate small pinned objects into a single block, allocating a
616 new block when the current one overflows. The block is chained
617 onto the large_object_list of generation 0 step 0.
619 NOTE: The GC can't in general handle pinned objects. This
620 interface is only safe to use for ByteArrays, which have no
621 pointers and don't require scavenging. It works because the
622 block's descriptor has the BF_LARGE flag set, so the block is
623 treated as a large object and chained onto various lists, rather
624 than the individual objects being copied. However, when it comes
625 to scavenge the block, the GC will only scavenge the first object.
626 The reason is that the GC can't linearly scan a block of pinned
627 objects at the moment (doing so would require using the
628 mostly-copying techniques). But since we're restricting ourselves
629 to pinned ByteArrays, not scavenging is ok.
631 This function is called by newPinnedByteArray# which immediately
632 fills the allocated memory with a MutableByteArray#.
633 ------------------------------------------------------------------------- */
636 allocatePinned( nat n )
639 bdescr *bd = pinned_object_block;
641 // If the request is for a large object, then allocate()
642 // will give us a pinned object anyway.
643 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
649 TICK_ALLOC_HEAP_NOCTR(n);
652 // we always return 8-byte aligned memory. bd->free must be
653 // 8-byte aligned to begin with, so we just round up n to
654 // the nearest multiple of 8 bytes.
655 if (sizeof(StgWord) == 4) {
659 // If we don't have a block of pinned objects yet, or the current
660 // one isn't large enough to hold the new object, allocate a new one.
661 if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
662 pinned_object_block = bd = allocBlock();
663 dbl_link_onto(bd, &g0s0->large_objects);
666 bd->flags = BF_PINNED | BF_LARGE;
667 bd->free = bd->start;
677 /* -----------------------------------------------------------------------------
678 Allocation functions for GMP.
680 These all use the allocate() interface - we can't have any garbage
681 collection going on during a gmp operation, so we use allocate()
682 which always succeeds. The gmp operations which might need to
683 allocate will ask the storage manager (via doYouWantToGC()) whether
684 a garbage collection is required, in case we get into a loop doing
685 only allocate() style allocation.
686 -------------------------------------------------------------------------- */
689 stgAllocForGMP (size_t size_in_bytes)
692 nat data_size_in_words, total_size_in_words;
694 /* round up to a whole number of words */
695 data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_);
696 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
698 /* allocate and fill it in. */
699 arr = (StgArrWords *)allocate(total_size_in_words);
700 SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
702 /* and return a ptr to the goods inside the array */
707 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
709 void *new_stuff_ptr = stgAllocForGMP(new_size);
711 char *p = (char *) ptr;
712 char *q = (char *) new_stuff_ptr;
714 for (; i < old_size; i++, p++, q++) {
718 return(new_stuff_ptr);
722 stgDeallocForGMP (void *ptr STG_UNUSED,
723 size_t size STG_UNUSED)
725 /* easy for us: the garbage collector does the dealloc'n */
728 /* -----------------------------------------------------------------------------
730 * -------------------------------------------------------------------------- */
732 /* -----------------------------------------------------------------------------
735 * Approximate how much we've allocated: number of blocks in the
736 * nursery + blocks allocated via allocate() - unused nusery blocks.
737 * This leaves a little slop at the end of each block, and doesn't
738 * take into account large objects (ToDo).
739 * -------------------------------------------------------------------------- */
742 calcAllocated( void )
748 allocated = allocated_bytes();
749 for (i = 0; i < n_nurseries; i++) {
750 allocated += nurseries[i].n_blocks * BLOCK_SIZE_W;
754 for (i = 0; i < n_nurseries; i++) {
756 for ( bd = capabilities[i].r.rCurrentNursery;
757 bd != NULL; bd = bd->link ) {
758 allocated -= BLOCK_SIZE_W;
760 cap = &capabilities[i];
761 if (cap->r.rCurrentNursery->free <
762 cap->r.rCurrentNursery->start + BLOCK_SIZE_W) {
763 allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
764 - cap->r.rCurrentNursery->free;
768 bdescr *current_nursery = MainCapability.r.rCurrentNursery;
770 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
771 allocated -= BLOCK_SIZE_W;
773 if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
774 allocated -= (current_nursery->start + BLOCK_SIZE_W)
775 - current_nursery->free;
779 total_allocated += allocated;
783 /* Approximate the amount of live data in the heap. To be called just
784 * after garbage collection (see GarbageCollect()).
793 if (RtsFlags.GcFlags.generations == 1) {
794 live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W +
795 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
799 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
800 for (s = 0; s < generations[g].n_steps; s++) {
801 /* approximate amount of live data (doesn't take into account slop
802 * at end of each block).
804 if (g == 0 && s == 0) {
807 stp = &generations[g].steps[s];
808 live += (stp->n_large_blocks + stp->n_blocks - 1) * BLOCK_SIZE_W;
809 if (stp->hp_bd != NULL) {
810 live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start)
818 /* Approximate the number of blocks that will be needed at the next
819 * garbage collection.
821 * Assume: all data currently live will remain live. Steps that will
822 * be collected next time will therefore need twice as many blocks
823 * since all the data will be copied.
832 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
833 for (s = 0; s < generations[g].n_steps; s++) {
834 if (g == 0 && s == 0) { continue; }
835 stp = &generations[g].steps[s];
836 if (generations[g].steps[0].n_blocks +
837 generations[g].steps[0].n_large_blocks
838 > generations[g].max_blocks
839 && stp->is_compacted == 0) {
840 needed += 2 * stp->n_blocks;
842 needed += stp->n_blocks;
849 /* -----------------------------------------------------------------------------
852 memInventory() checks for memory leaks by counting up all the
853 blocks we know about and comparing that to the number of blocks
854 allegedly floating around in the system.
855 -------------------------------------------------------------------------- */
860 stepBlocks (step *stp)
865 total_blocks = stp->n_blocks;
866 for (bd = stp->large_objects; bd; bd = bd->link) {
867 total_blocks += bd->blocks;
868 /* hack for megablock groups: they have an extra block or two in
869 the second and subsequent megablocks where the block
870 descriptors would normally go.
872 if (bd->blocks > BLOCKS_PER_MBLOCK) {
873 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
874 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
886 lnat total_blocks = 0, free_blocks = 0;
888 /* count the blocks we current have */
890 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
891 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
892 total_blocks += bd->blocks;
894 for (s = 0; s < generations[g].n_steps; s++) {
895 if (g==0 && s==0) continue;
896 stp = &generations[g].steps[s];
897 total_blocks += stepBlocks(stp);
901 for (i = 0; i < n_nurseries; i++) {
902 total_blocks += stepBlocks(&nurseries[i]);
905 if (RtsFlags.GcFlags.generations == 1) {
906 /* two-space collector has a to-space too :-) */
907 total_blocks += g0s0->n_to_blocks;
910 /* any blocks held by allocate() */
911 for (bd = small_alloc_list; bd; bd = bd->link) {
912 total_blocks += bd->blocks;
916 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
917 total_blocks += retainerStackBlocks();
921 // count the blocks allocated by the arena allocator
922 total_blocks += arenaBlocks();
924 /* count the blocks on the free list */
925 free_blocks = countFreeList();
927 if (total_blocks + free_blocks != mblocks_allocated *
929 debugBelch("Blocks: %ld live + %ld free = %ld total (%ld around)\n",
930 total_blocks, free_blocks, total_blocks + free_blocks,
931 mblocks_allocated * BLOCKS_PER_MBLOCK);
934 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
939 countBlocks(bdescr *bd)
942 for (n=0; bd != NULL; bd=bd->link) {
948 /* Full heap sanity check. */
954 if (RtsFlags.GcFlags.generations == 1) {
955 checkHeap(g0s0->to_blocks);
956 checkChain(g0s0->large_objects);
959 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
960 for (s = 0; s < generations[g].n_steps; s++) {
961 if (g == 0 && s == 0) { continue; }
962 ASSERT(countBlocks(generations[g].steps[s].blocks)
963 == generations[g].steps[s].n_blocks);
964 ASSERT(countBlocks(generations[g].steps[s].large_objects)
965 == generations[g].steps[s].n_large_blocks);
966 checkHeap(generations[g].steps[s].blocks);
967 checkChain(generations[g].steps[s].large_objects);
969 checkMutableList(generations[g].mut_list, g);
974 for (s = 0; s < n_nurseries; s++) {
975 ASSERT(countBlocks(generations[g].steps[s].blocks)
976 == generations[g].steps[s].n_blocks);
977 ASSERT(countBlocks(generations[g].steps[s].large_objects)
978 == generations[g].steps[s].n_large_blocks);
981 checkFreeListSanity();
985 // handy function for use in gdb, because Bdescr() is inlined.
986 extern bdescr *_bdescr( StgPtr p );