+
+/* -----------------------------------------------------------------------------
+ * Stats and stuff
+ * -------------------------------------------------------------------------- */
+
+/* -----------------------------------------------------------------------------
+ * calcAllocated()
+ *
+ * Approximate how much we've allocated: number of blocks in the
+ * nursery + blocks allocated via allocate() - unused nusery blocks.
+ * This leaves a little slop at the end of each block, and doesn't
+ * take into account large objects (ToDo).
+ * -------------------------------------------------------------------------- */
+
+lnat
+calcAllocated( void )
+{
+ nat allocated;
+ bdescr *bd;
+
+#ifdef SMP
+ Capability *cap;
+
+ /* All tasks must be stopped. Can't assert that all the
+ capabilities are owned by the scheduler, though: one or more
+ tasks might have been stopped while they were running (non-main)
+ threads. */
+ /* ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); */
+
+ allocated =
+ n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
+ + allocated_bytes();
+
+ for (cap = free_capabilities; cap != NULL; cap = cap->link) {
+ for ( bd = cap->r.rCurrentNursery->link; bd != NULL; bd = bd->link ) {
+ allocated -= BLOCK_SIZE_W;
+ }
+ if (cap->r.rCurrentNursery->free < cap->r.rCurrentNursery->start
+ + BLOCK_SIZE_W) {
+ allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
+ - cap->r.rCurrentNursery->free;
+ }
+ }
+
+#else /* !SMP */
+ bdescr *current_nursery = MainCapability.r.rCurrentNursery;
+
+ allocated = (g0s0->n_blocks * BLOCK_SIZE_W) + allocated_bytes();
+ for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
+ allocated -= BLOCK_SIZE_W;
+ }
+ if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
+ allocated -= (current_nursery->start + BLOCK_SIZE_W)
+ - current_nursery->free;
+ }
+#endif
+
+ total_allocated += allocated;
+ return allocated;
+}
+
+/* Approximate the amount of live data in the heap. To be called just
+ * after garbage collection (see GarbageCollect()).
+ */
+extern lnat
+calcLive(void)
+{
+ nat g, s;
+ lnat live = 0;
+ step *stp;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W +
+ ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
+ return live;
+ }
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ /* approximate amount of live data (doesn't take into account slop
+ * at end of each block).
+ */
+ if (g == 0 && s == 0) {
+ continue;
+ }
+ stp = &generations[g].steps[s];
+ live += (stp->n_large_blocks + stp->n_blocks - 1) * BLOCK_SIZE_W;
+ if (stp->hp_bd != NULL) {
+ live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start)
+ / sizeof(W_);
+ }
+ }
+ }
+ return live;
+}
+
+/* Approximate the number of blocks that will be needed at the next
+ * garbage collection.
+ *
+ * Assume: all data currently live will remain live. Steps that will
+ * be collected next time will therefore need twice as many blocks
+ * since all the data will be copied.
+ */
+extern lnat
+calcNeeded(void)
+{
+ lnat needed = 0;
+ nat g, s;
+ step *stp;
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ stp = &generations[g].steps[s];
+ if (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ > generations[g].max_blocks
+ && stp->is_compacted == 0) {
+ needed += 2 * stp->n_blocks;
+ } else {
+ needed += stp->n_blocks;
+ }
+ }
+ }
+ return needed;
+}
+
+/* -----------------------------------------------------------------------------
+ Debugging
+
+ memInventory() checks for memory leaks by counting up all the
+ blocks we know about and comparing that to the number of blocks
+ allegedly floating around in the system.
+ -------------------------------------------------------------------------- */
+
+#ifdef DEBUG
+
+void
+memInventory(void)
+{
+ nat g, s;
+ step *stp;
+ bdescr *bd;
+ lnat total_blocks = 0, free_blocks = 0;
+
+ /* count the blocks we current have */
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+ total_blocks += stp->n_blocks;
+ if (RtsFlags.GcFlags.generations == 1) {
+ /* two-space collector has a to-space too :-) */
+ total_blocks += g0s0->n_to_blocks;
+ }
+ for (bd = stp->large_objects; bd; bd = bd->link) {
+ total_blocks += bd->blocks;
+ /* hack for megablock groups: they have an extra block or two in
+ the second and subsequent megablocks where the block
+ descriptors would normally go.
+ */
+ if (bd->blocks > BLOCKS_PER_MBLOCK) {
+ total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
+ * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
+ }
+ }
+ }
+ }
+
+ /* any blocks held by allocate() */
+ for (bd = small_alloc_list; bd; bd = bd->link) {
+ total_blocks += bd->blocks;
+ }
+
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
+ total_blocks += retainerStackBlocks();
+ }
+#endif
+
+ // count the blocks allocated by the arena allocator
+ total_blocks += arenaBlocks();
+
+ /* count the blocks on the free list */
+ free_blocks = countFreeList();
+
+ if (total_blocks + free_blocks != mblocks_allocated *
+ BLOCKS_PER_MBLOCK) {
+ fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
+ total_blocks, free_blocks, total_blocks + free_blocks,
+ mblocks_allocated * BLOCKS_PER_MBLOCK);
+ }
+
+ ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
+}
+
+
+nat
+countBlocks(bdescr *bd)
+{
+ nat n;
+ for (n=0; bd != NULL; bd=bd->link) {
+ n += bd->blocks;
+ }
+ return n;
+}
+
+/* Full heap sanity check. */
+void
+checkSanity( void )
+{
+ nat g, s;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ checkHeap(g0s0->to_blocks);
+ checkChain(g0s0->large_objects);
+ } else {
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ ASSERT(countBlocks(generations[g].steps[s].blocks)
+ == generations[g].steps[s].n_blocks);
+ ASSERT(countBlocks(generations[g].steps[s].large_objects)
+ == generations[g].steps[s].n_large_blocks);
+ if (g == 0 && s == 0) { continue; }
+ checkHeap(generations[g].steps[s].blocks);
+ checkChain(generations[g].steps[s].large_objects);
+ if (g > 0) {
+ checkMutableList(generations[g].mut_list, g);
+ checkMutOnceList(generations[g].mut_once_list, g);
+ }
+ }
+ }
+ checkFreeListSanity();
+ }
+}
+
+// handy function for use in gdb, because Bdescr() is inlined.
+extern bdescr *_bdescr( StgPtr p );
+
+bdescr *
+_bdescr( StgPtr p )
+{
+ return Bdescr(p);
+}
+
+#endif