#include "RetainerProfile.h" // for counting memory blocks (memInventory)
#include "OSMem.h"
#include "Trace.h"
+#include "GC.h"
+#include "GCUtils.h"
+#include "Evac.h"
#include <stdlib.h>
#include <string.h>
generation *oldest_gen = NULL; /* oldest generation, for convenience */
step *g0s0 = NULL; /* generation 0, step 0, for convenience */
+nat total_steps = 0;
+step *all_steps = NULL; /* single array of steps */
+
ullong total_allocated = 0; /* total memory allocated during run */
nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */
initStep (step *stp, int g, int s)
{
stp->no = s;
+ stp->abs_no = RtsFlags.GcFlags.steps * g + s;
stp->blocks = NULL;
stp->n_blocks = 0;
stp->old_blocks = NULL;
stp->n_old_blocks = 0;
stp->gen = &generations[g];
stp->gen_no = g;
- stp->hp = NULL;
- stp->hpLim = NULL;
- stp->hp_bd = NULL;
- stp->scavd_hp = NULL;
- stp->scavd_hpLim = NULL;
- stp->scan = NULL;
- stp->scan_bd = NULL;
stp->large_objects = NULL;
stp->n_large_blocks = 0;
- stp->new_large_objects = NULL;
stp->scavenged_large_objects = NULL;
stp->n_scavenged_large_blocks = 0;
stp->is_compacted = 0;
stp->bitmap = NULL;
+#ifdef THREADED_RTS
+ initSpinLock(&stp->sync_todo);
+ initSpinLock(&stp->sync_large_objects);
+#endif
}
void
/* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
* doing something reasonable.
*/
- ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info));
+ /* We use the NOT_NULL variant or gcc warns that the test is always true */
+ ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL(&stg_BLACKHOLE_info));
ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
* sizeof(struct generation_),
"initStorage: gens");
+ /* allocate all the steps into an array. It is important that we do
+ it this way, because we need the invariant that two step pointers
+ can be directly compared to see which is the oldest.
+ Remember that the last generation has only one step. */
+ total_steps = 1 + (RtsFlags.GcFlags.generations - 1) * RtsFlags.GcFlags.steps;
+ all_steps = stgMallocBytes(total_steps * sizeof(struct step_),
+ "initStorage: steps");
+
/* Initialise all generations */
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen = &generations[g];
/* Oldest generation: one step */
oldest_gen->n_steps = 1;
- oldest_gen->steps =
- stgMallocBytes(1 * sizeof(struct step_), "initStorage: last step");
+ oldest_gen->steps = all_steps + (RtsFlags.GcFlags.generations - 1)
+ * RtsFlags.GcFlags.steps;
/* set up all except the oldest generation with 2 steps */
for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
generations[g].n_steps = RtsFlags.GcFlags.steps;
- generations[g].steps =
- stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct step_),
- "initStorage: steps");
+ generations[g].steps = all_steps + g * RtsFlags.GcFlags.steps;
}
} else {
/* single generation, i.e. a two-space collector */
g0->n_steps = 1;
- g0->steps = stgMallocBytes (sizeof(struct step_), "initStorage: steps");
+ g0->steps = all_steps;
}
#ifdef THREADED_RTS
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
+#ifdef THREADED_RTS
+ initSpinLock(&gc_alloc_block_sync);
+ initSpinLock(&static_objects_sync);
+ initSpinLock(&recordMutableGen_sync);
+ whitehole_spin = 0;
+#endif
+
IF_DEBUG(gc, statDescribeGens());
RELEASE_SM_LOCK;
void
freeStorage (void)
{
- nat g;
-
- for(g = 0; g < RtsFlags.GcFlags.generations; g++)
- stgFree(generations[g].steps);
+ stgFree(g0s0); // frees all the steps
stgFree(generations);
freeAllMBlocks();
#if defined(THREADED_RTS)
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_))
{
nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
+
+ // Attempting to allocate an object larger than maxHeapSize
+ // should definitely be disallowed. (bug #1791)
+ if (RtsFlags.GcFlags.maxHeapSize > 0 &&
+ req_blocks >= RtsFlags.GcFlags.maxHeapSize) {
+ heapOverflow();
+ }
+
bd = allocGroup(req_blocks);
dbl_link_onto(bd, &stp->large_objects);
stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
return allocated;
}
+// split N blocks off the start of the given bdescr, returning the
+// remainder as a new block group. We treat the remainder as if it
+// had been freshly allocated in generation 0.
+bdescr *
+splitLargeBlock (bdescr *bd, nat blocks)
+{
+ bdescr *new_bd;
+
+ // subtract the original number of blocks from the counter first
+ bd->step->n_large_blocks -= bd->blocks;
+
+ new_bd = splitBlockGroup (bd, blocks);
+
+ dbl_link_onto(new_bd, &g0s0->large_objects);
+ g0s0->n_large_blocks += new_bd->blocks;
+ new_bd->gen_no = g0s0->no;
+ new_bd->step = g0s0;
+ new_bd->flags = BF_LARGE;
+ new_bd->free = bd->free;
+
+ // add the new number of blocks to the counter. Due to the gaps
+ // for block descriptor, new_bd->blocks + bd->blocks might not be
+ // equal to the original bd->blocks, which is why we do it this way.
+ bd->step->n_large_blocks += bd->blocks;
+
+ return new_bd;
+}
+
/* -----------------------------------------------------------------------------
allocateLocal()
bd->gen_no = 0;
bd->step = cap->r.rNursery;
bd->flags = 0;
- alloc_blocks++;
+ // NO: alloc_blocks++;
+ // calcAllocated() uses the size of the nursery, and we've
+ // already bumpted nursery->n_blocks above.
} else {
// we have a block in the nursery: take it and put
// it at the *front* of the nursery list, and use it
/* Approximate the amount of live data in the heap. To be called just
* after garbage collection (see GarbageCollect()).
*/
-extern lnat
-calcLive(void)
+lnat
+calcLiveBlocks(void)
{
nat g, s;
lnat live = 0;
step *stp;
if (RtsFlags.GcFlags.generations == 1) {
- return (g0s0->n_large_blocks + g0s0->n_blocks) * BLOCK_SIZE_W;
+ return g0s0->n_large_blocks + g0s0->n_blocks;
}
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
continue;
}
stp = &generations[g].steps[s];
- live += (stp->n_large_blocks + stp->n_blocks) * BLOCK_SIZE_W;
+ live += stp->n_large_blocks + stp->n_blocks;
}
}
return live;
}
+lnat
+countOccupied(bdescr *bd)
+{
+ lnat words;
+
+ words = 0;
+ for (; bd != NULL; bd = bd->link) {
+ words += bd->free - bd->start;
+ }
+ return words;
+}
+
+// Return an accurate count of the live data in the heap, excluding
+// generation 0.
+lnat
+calcLiveWords(void)
+{
+ nat g, s;
+ lnat live;
+ step *stp;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ return countOccupied(g0s0->blocks) + countOccupied(g0s0->large_objects);
+ }
+
+ live = 0;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) continue;
+ stp = &generations[g].steps[s];
+ live += countOccupied(stp->blocks) +
+ countOccupied(stp->large_objects);
+ }
+ }
+ return live;
+}
+
/* Approximate the number of blocks that will be needed at the next
* garbage collection.
*
for (s = 0; s < generations[g].n_steps; s++) {
if (g == 0 && s == 0) { continue; }
stp = &generations[g].steps[s];
- if (generations[g].steps[0].n_blocks +
- generations[g].steps[0].n_large_blocks
- > generations[g].max_blocks
- && stp->is_compacted == 0) {
- needed += 2 * stp->n_blocks;
+ if (g == 0 || // always collect gen 0
+ (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ > generations[g].max_blocks
+ && stp->is_compacted == 0)) {
+ needed += 2 * stp->n_blocks + stp->n_large_blocks;
} else {
- needed += stp->n_blocks;
+ needed += stp->n_blocks + stp->n_large_blocks;
}
}
}
#ifdef DEBUG
+// Useful for finding partially full blocks in gdb
+void findSlop(bdescr *bd);
+void findSlop(bdescr *bd)
+{
+ lnat slop;
+
+ for (; bd != NULL; bd = bd->link) {
+ slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
+ if (slop > (1024/sizeof(W_))) {
+ debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
+ bd->start, bd, slop / (1024/sizeof(W_)));
+ }
+ }
+}
+
nat
countBlocks(bdescr *bd)
{
}
void
-memInventory(void)
+memInventory (rtsBool show)
{
nat g, s, i;
step *stp;
lnat nursery_blocks, retainer_blocks,
arena_blocks, exec_blocks;
lnat live_blocks = 0, free_blocks = 0;
+ rtsBool leak;
// count the blocks we current have
live_blocks += nursery_blocks +
+ retainer_blocks + arena_blocks + exec_blocks;
- if (live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK)
+#define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
+
+ leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
+ if (show || leak)
{
- debugBelch("Memory leak detected\n");
+ if (leak) {
+ debugBelch("Memory leak detected:\n");
+ } else {
+ debugBelch("Memory inventory:\n");
+ }
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- debugBelch(" gen %d blocks : %4lu\n", g, gen_blocks[g]);
+ debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
+ gen_blocks[g], MB(gen_blocks[g]));
+ }
+ debugBelch(" nursery : %5lu blocks (%lu MB)\n",
+ nursery_blocks, MB(nursery_blocks));
+ debugBelch(" retainer : %5lu blocks (%lu MB)\n",
+ retainer_blocks, MB(retainer_blocks));
+ debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
+ arena_blocks, MB(arena_blocks));
+ debugBelch(" exec : %5lu blocks (%lu MB)\n",
+ exec_blocks, MB(exec_blocks));
+ debugBelch(" free : %5lu blocks (%lu MB)\n",
+ free_blocks, MB(free_blocks));
+ debugBelch(" total : %5lu blocks (%lu MB)\n",
+ live_blocks + free_blocks, MB(live_blocks+free_blocks));
+ if (leak) {
+ debugBelch("\n in system : %5lu blocks (%lu MB)\n",
+ mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
}
- debugBelch(" nursery : %4lu\n", nursery_blocks);
- debugBelch(" retainer : %4lu\n", retainer_blocks);
- debugBelch(" arena blocks : %4lu\n", arena_blocks);
- debugBelch(" exec : %4lu\n", exec_blocks);
- debugBelch(" free : %4lu\n", free_blocks);
- debugBelch(" total : %4lu\n\n", live_blocks + free_blocks);
- debugBelch(" in system : %4lu\n", mblocks_allocated * BLOCKS_PER_MBLOCK);
- ASSERT(0);
}
}