/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team, 1998-2006
+ * (c) The GHC Team, 1998-2008
*
* Storage manager front end
*
#include "RetainerProfile.h" // for counting memory blocks (memInventory)
#include "OSMem.h"
#include "Trace.h"
+#include "GC.h"
+#include "Evac.h"
#include <stdlib.h>
#include <string.h>
+#include "ffi.h"
+
/*
* All these globals require sm_mutex to access in THREADED_RTS mode.
*/
StgClosure *revertible_caf_list = NULL;
rtsBool keepCAFs;
-bdescr *small_alloc_list; /* allocate()d small objects */
bdescr *pinned_object_block; /* allocate pinned objects into this block */
nat alloc_blocks; /* number of allocate()d blocks since GC */
nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
-StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
-StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
+static bdescr *exec_block;
generation *generations = NULL; /* all the generations */
generation *g0 = NULL; /* generation 0, for convenience */
generation *oldest_gen = NULL; /* oldest generation, for convenience */
step *g0s0 = NULL; /* generation 0, step 0, for convenience */
+nat total_steps = 0;
+step *all_steps = NULL; /* single array of steps */
+
ullong total_allocated = 0; /* total memory allocated during run */
nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */
initStep (step *stp, int g, int s)
{
stp->no = s;
+ stp->abs_no = RtsFlags.GcFlags.steps * g + s;
stp->blocks = NULL;
stp->n_blocks = 0;
+ stp->n_words = 0;
+ stp->live_estimate = 0;
stp->old_blocks = NULL;
stp->n_old_blocks = 0;
stp->gen = &generations[g];
stp->gen_no = g;
- stp->hp = NULL;
- stp->hpLim = NULL;
- stp->hp_bd = NULL;
- stp->scavd_hp = NULL;
- stp->scavd_hpLim = NULL;
- stp->scan = NULL;
- stp->scan_bd = NULL;
stp->large_objects = NULL;
stp->n_large_blocks = 0;
- stp->new_large_objects = NULL;
stp->scavenged_large_objects = NULL;
stp->n_scavenged_large_blocks = 0;
- stp->is_compacted = 0;
+ stp->mark = 0;
+ stp->compact = 0;
stp->bitmap = NULL;
+#ifdef THREADED_RTS
+ initSpinLock(&stp->sync_todo);
+ initSpinLock(&stp->sync_large_objects);
+#endif
+ stp->threads = END_TSO_QUEUE;
+ stp->old_threads = END_TSO_QUEUE;
}
void
return;
}
+ initMBlocks();
+
/* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
* doing something reasonable.
*/
- ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info));
+ /* We use the NOT_NULL variant or gcc warns that the test is always true */
+ ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL((StgWord)&stg_BLACKHOLE_info));
ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
* sizeof(struct generation_),
"initStorage: gens");
+ /* allocate all the steps into an array. It is important that we do
+ it this way, because we need the invariant that two step pointers
+ can be directly compared to see which is the oldest.
+ Remember that the last generation has only one step. */
+ total_steps = 1 + (RtsFlags.GcFlags.generations - 1) * RtsFlags.GcFlags.steps;
+ all_steps = stgMallocBytes(total_steps * sizeof(struct step_),
+ "initStorage: steps");
+
/* Initialise all generations */
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen = &generations[g];
gen->no = g;
gen->mut_list = allocBlock();
gen->collections = 0;
+ gen->par_collections = 0;
gen->failed_promotions = 0;
gen->max_blocks = 0;
}
/* Oldest generation: one step */
oldest_gen->n_steps = 1;
- oldest_gen->steps =
- stgMallocBytes(1 * sizeof(struct step_), "initStorage: last step");
+ oldest_gen->steps = all_steps + (RtsFlags.GcFlags.generations - 1)
+ * RtsFlags.GcFlags.steps;
/* set up all except the oldest generation with 2 steps */
for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
generations[g].n_steps = RtsFlags.GcFlags.steps;
- generations[g].steps =
- stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct step_),
- "initStorage: steps");
+ generations[g].steps = all_steps + g * RtsFlags.GcFlags.steps;
}
} else {
/* single generation, i.e. a two-space collector */
g0->n_steps = 1;
- g0->steps = stgMallocBytes (sizeof(struct step_), "initStorage: steps");
+ g0->steps = all_steps;
}
#ifdef THREADED_RTS
n_nurseries = n_capabilities;
- nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
- "initStorage: nurseries");
#else
n_nurseries = 1;
- nurseries = g0->steps; // just share nurseries[0] with g0s0
-#endif
+#endif
+ nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
+ "initStorage: nurseries");
/* Initialise all steps */
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
}
}
-#ifdef THREADED_RTS
for (s = 0; s < n_nurseries; s++) {
initStep(&nurseries[s], 0, s);
}
-#endif
/* Set up the destination pointers in each younger gen. step */
for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
}
oldest_gen->steps[0].to = &oldest_gen->steps[0];
-#ifdef THREADED_RTS
for (s = 0; s < n_nurseries; s++) {
nurseries[s].to = generations[0].steps[0].to;
}
-#endif
/* The oldest generation has one step. */
- if (RtsFlags.GcFlags.compact) {
+ if (RtsFlags.GcFlags.compact || RtsFlags.GcFlags.sweep) {
if (RtsFlags.GcFlags.generations == 1) {
- errorBelch("WARNING: compaction is incompatible with -G1; disabled");
+ errorBelch("WARNING: compact/sweep is incompatible with -G1; disabled");
} else {
- oldest_gen->steps[0].is_compacted = 1;
+ oldest_gen->steps[0].mark = 1;
+ if (RtsFlags.GcFlags.compact)
+ oldest_gen->steps[0].compact = 1;
}
}
-#ifdef THREADED_RTS
- if (RtsFlags.GcFlags.generations == 1) {
- errorBelch("-G1 is incompatible with -threaded");
- stg_exit(EXIT_FAILURE);
- }
-#endif
-
- /* generation 0 is special: that's the nursery */
generations[0].max_blocks = 0;
+ g0s0 = &generations[0].steps[0];
- /* G0S0: the allocation area. Policy: keep the allocation area
+ /* The allocation area. Policy: keep the allocation area
* small to begin with, even if we have a large suggested heap
* size. Reason: we're going to do a major collection first, and we
* don't want it to be a big one. This vague idea is borne out by
* rigorous experimental evidence.
*/
- g0s0 = &generations[0].steps[0];
-
allocNurseries();
weak_ptr_list = NULL;
revertible_caf_list = NULL;
/* initialise the allocate() interface */
- small_alloc_list = NULL;
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
+ exec_block = NULL;
+
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
+#ifdef THREADED_RTS
+ initSpinLock(&gc_alloc_block_sync);
+ whitehole_spin = 0;
+#endif
+
+ N = 0;
+
+ initGcThreads();
+
IF_DEBUG(gc, statDescribeGens());
RELEASE_SM_LOCK;
void
freeStorage (void)
{
- nat g;
-
- for(g = 0; g < RtsFlags.GcFlags.generations; g++)
- stgFree(generations[g].steps);
+ stgFree(g0s0); // frees all the steps
stgFree(generations);
freeAllMBlocks();
#if defined(THREADED_RTS)
closeMutex(&sm_mutex);
closeMutex(&atomic_modify_mutvar_mutex);
#endif
+ stgFree(nurseries);
}
/* -----------------------------------------------------------------------------
* any more and can use it as a STATIC_LINK.
*/
((StgIndStatic *)caf)->saved_info = NULL;
- recordMutableGen(caf, oldest_gen);
+ recordMutableGen(caf, oldest_gen->no);
}
RELEASE_SM_LOCK;
resizeNurseriesFixed(blocks / n_nurseries);
}
+
+/* -----------------------------------------------------------------------------
+ move_TSO is called to update the TSO structure after it has been
+ moved from one place to another.
+ -------------------------------------------------------------------------- */
+
+void
+move_TSO (StgTSO *src, StgTSO *dest)
+{
+ ptrdiff_t diff;
+
+ // relocate the stack pointer...
+ diff = (StgPtr)dest - (StgPtr)src; // In *words*
+ dest->sp = (StgPtr)dest->sp + diff;
+}
+
/* -----------------------------------------------------------------------------
The allocate() interface
- allocate(n) always succeeds, and returns a chunk of memory n words
- long. n can be larger than the size of a block if necessary, in
- which case a contiguous block group will be allocated.
+ allocateInGen() function allocates memory directly into a specific
+ generation. It always succeeds, and returns a chunk of memory n
+ words long. n can be larger than the size of a block if necessary,
+ in which case a contiguous block group will be allocated.
+
+ allocate(n) is equivalent to allocateInGen(g0).
-------------------------------------------------------------------------- */
StgPtr
-allocate( nat n )
+allocateInGen (generation *g, lnat n)
{
+ step *stp;
bdescr *bd;
- StgPtr p;
+ StgPtr ret;
ACQUIRE_SM_LOCK;
-
+
TICK_ALLOC_HEAP_NOCTR(n);
CCS_ALLOC(CCCS,n);
- /* big allocation (>LARGE_OBJECT_THRESHOLD) */
- /* ToDo: allocate directly into generation 1 */
- if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
+ stp = &g->steps[0];
+
+ if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_))
+ {
+ lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
+
+ // Attempting to allocate an object larger than maxHeapSize
+ // should definitely be disallowed. (bug #1791)
+ if (RtsFlags.GcFlags.maxHeapSize > 0 &&
+ req_blocks >= RtsFlags.GcFlags.maxHeapSize) {
+ heapOverflow();
+ // heapOverflow() doesn't exit (see #2592), but we aren't
+ // in a position to do a clean shutdown here: we
+ // either have to allocate the memory or exit now.
+ // Allocating the memory would be bad, because the user
+ // has requested that we not exceed maxHeapSize, so we
+ // just exit.
+ stg_exit(EXIT_HEAPOVERFLOW);
+ }
+
bd = allocGroup(req_blocks);
- dbl_link_onto(bd, &g0s0->large_objects);
- g0s0->n_large_blocks += req_blocks;
- bd->gen_no = 0;
- bd->step = g0s0;
+ dbl_link_onto(bd, &stp->large_objects);
+ stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
+ alloc_blocks += bd->blocks;
+ bd->gen_no = g->no;
+ bd->step = stp;
bd->flags = BF_LARGE;
bd->free = bd->start + n;
- alloc_blocks += req_blocks;
- RELEASE_SM_LOCK;
- return bd->start;
-
- /* small allocation (<LARGE_OBJECT_THRESHOLD) */
- } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
- if (small_alloc_list) {
- small_alloc_list->free = alloc_Hp;
- }
- bd = allocBlock();
- bd->link = small_alloc_list;
- small_alloc_list = bd;
- bd->gen_no = 0;
- bd->step = g0s0;
- bd->flags = 0;
- alloc_Hp = bd->start;
- alloc_HpLim = bd->start + BLOCK_SIZE_W;
- alloc_blocks++;
+ ret = bd->start;
}
-
- p = alloc_Hp;
- alloc_Hp += n;
+ else
+ {
+ // small allocation (<LARGE_OBJECT_THRESHOLD) */
+ bd = stp->blocks;
+ if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
+ bd = allocBlock();
+ bd->gen_no = g->no;
+ bd->step = stp;
+ bd->flags = 0;
+ bd->link = stp->blocks;
+ stp->blocks = bd;
+ stp->n_blocks++;
+ alloc_blocks++;
+ }
+ ret = bd->free;
+ bd->free += n;
+ }
+
RELEASE_SM_LOCK;
- return p;
+
+ return ret;
+}
+
+StgPtr
+allocate (lnat n)
+{
+ return allocateInGen(g0,n);
}
lnat
{
lnat allocated;
- allocated = alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp);
+ allocated = alloc_blocks * BLOCK_SIZE_W;
if (pinned_object_block != NULL) {
allocated -= (pinned_object_block->start + BLOCK_SIZE_W) -
pinned_object_block->free;
return allocated;
}
-void
-tidyAllocateLists (void)
+// split N blocks off the front of the given bdescr, returning the
+// new block group. We treat the remainder as if it
+// had been freshly allocated in generation 0.
+bdescr *
+splitLargeBlock (bdescr *bd, nat blocks)
{
- if (small_alloc_list != NULL) {
- ASSERT(alloc_Hp >= small_alloc_list->start &&
- alloc_Hp <= small_alloc_list->start + BLOCK_SIZE);
- small_alloc_list->free = alloc_Hp;
- }
+ bdescr *new_bd;
+
+ // subtract the original number of blocks from the counter first
+ bd->step->n_large_blocks -= bd->blocks;
+
+ new_bd = splitBlockGroup (bd, blocks);
+
+ dbl_link_onto(new_bd, &g0s0->large_objects);
+ g0s0->n_large_blocks += new_bd->blocks;
+ new_bd->gen_no = g0s0->no;
+ new_bd->step = g0s0;
+ new_bd->flags = BF_LARGE;
+ new_bd->free = bd->free;
+ ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
+
+ // add the new number of blocks to the counter. Due to the gaps
+ // for block descriptor, new_bd->blocks + bd->blocks might not be
+ // equal to the original bd->blocks, which is why we do it this way.
+ bd->step->n_large_blocks += bd->blocks;
+
+ return new_bd;
}
/* -----------------------------------------------------------------------------
-------------------------------------------------------------------------- */
StgPtr
-allocateLocal (Capability *cap, nat n)
+allocateLocal (Capability *cap, lnat n)
{
bdescr *bd;
StgPtr p;
- TICK_ALLOC_HEAP_NOCTR(n);
- CCS_ALLOC(CCCS,n);
-
- /* big allocation (>LARGE_OBJECT_THRESHOLD) */
- /* ToDo: allocate directly into generation 1 */
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
- ACQUIRE_SM_LOCK;
- bd = allocGroup(req_blocks);
- dbl_link_onto(bd, &g0s0->large_objects);
- g0s0->n_large_blocks += req_blocks;
- bd->gen_no = 0;
- bd->step = g0s0;
- bd->flags = BF_LARGE;
- bd->free = bd->start + n;
- alloc_blocks += req_blocks;
- RELEASE_SM_LOCK;
- return bd->start;
-
- /* small allocation (<LARGE_OBJECT_THRESHOLD) */
- } else {
+ return allocateInGen(g0,n);
+ }
- bd = cap->r.rCurrentAlloc;
- if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
+ /* small allocation (<LARGE_OBJECT_THRESHOLD) */
- // The CurrentAlloc block is full, we need to find another
- // one. First, we try taking the next block from the
- // nursery:
- bd = cap->r.rCurrentNursery->link;
-
- if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
- // The nursery is empty, or the next block is already
- // full: allocate a fresh block (we can't fail here).
- ACQUIRE_SM_LOCK;
- bd = allocBlock();
- cap->r.rNursery->n_blocks++;
- RELEASE_SM_LOCK;
- bd->gen_no = 0;
- bd->step = cap->r.rNursery;
- bd->flags = 0;
- } else {
- // we have a block in the nursery: take it and put
- // it at the *front* of the nursery list, and use it
- // to allocate() from.
- cap->r.rCurrentNursery->link = bd->link;
- if (bd->link != NULL) {
- bd->link->u.back = cap->r.rCurrentNursery;
- }
- }
- dbl_link_onto(bd, &cap->r.rNursery->blocks);
- cap->r.rCurrentAlloc = bd;
- IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
- }
+ TICK_ALLOC_HEAP_NOCTR(n);
+ CCS_ALLOC(CCCS,n);
+
+ bd = cap->r.rCurrentAlloc;
+ if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
+
+ // The CurrentAlloc block is full, we need to find another
+ // one. First, we try taking the next block from the
+ // nursery:
+ bd = cap->r.rCurrentNursery->link;
+
+ if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
+ // The nursery is empty, or the next block is already
+ // full: allocate a fresh block (we can't fail here).
+ ACQUIRE_SM_LOCK;
+ bd = allocBlock();
+ cap->r.rNursery->n_blocks++;
+ RELEASE_SM_LOCK;
+ bd->gen_no = 0;
+ bd->step = cap->r.rNursery;
+ bd->flags = 0;
+ // NO: alloc_blocks++;
+ // calcAllocated() uses the size of the nursery, and we've
+ // already bumpted nursery->n_blocks above. We'll GC
+ // pretty quickly now anyway, because MAYBE_GC() will
+ // notice that CurrentNursery->link is NULL.
+ } else {
+ // we have a block in the nursery: take it and put
+ // it at the *front* of the nursery list, and use it
+ // to allocate() from.
+ cap->r.rCurrentNursery->link = bd->link;
+ if (bd->link != NULL) {
+ bd->link->u.back = cap->r.rCurrentNursery;
+ }
+ }
+ dbl_link_onto(bd, &cap->r.rNursery->blocks);
+ cap->r.rCurrentAlloc = bd;
+ IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
}
p = bd->free;
bd->free += n;
------------------------------------------------------------------------- */
StgPtr
-allocatePinned( nat n )
+allocatePinned( lnat n )
{
StgPtr p;
bdescr *bd = pinned_object_block;
}
/* -----------------------------------------------------------------------------
+ Write Barriers
+ -------------------------------------------------------------------------- */
+
+/*
This is the write barrier for MUT_VARs, a.k.a. IORefs. A
MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
and is put on the mutable list.
- -------------------------------------------------------------------------- */
-
+*/
void
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
{
}
}
+// Setting a TSO's link field with a write barrier.
+// It is *not* necessary to call this function when
+// * setting the link field to END_TSO_QUEUE
+// * putting a TSO on the blackhole_queue
+// * setting the link field of the currently running TSO, as it
+// will already be dirty.
+void
+setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
+{
+ bdescr *bd;
+ if ((tso->flags & (TSO_DIRTY|TSO_LINK_DIRTY)) == 0) {
+ tso->flags |= TSO_LINK_DIRTY;
+ bd = Bdescr((StgPtr)tso);
+ if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ }
+ tso->_link = target;
+}
+
+void
+dirty_TSO (Capability *cap, StgTSO *tso)
+{
+ bdescr *bd;
+ if ((tso->flags & (TSO_DIRTY|TSO_LINK_DIRTY)) == 0) {
+ bd = Bdescr((StgPtr)tso);
+ if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ }
+ tso->flags |= TSO_DIRTY;
+}
+
+/*
+ This is the write barrier for MVARs. An MVAR_CLEAN objects is not
+ on the mutable list; a MVAR_DIRTY is. When written to, a
+ MVAR_CLEAN turns into a MVAR_DIRTY and is put on the mutable list.
+ The check for MVAR_CLEAN is inlined at the call site for speed,
+ this really does make a difference on concurrency-heavy benchmarks
+ such as Chaneneos and cheap-concurrency.
+*/
+void
+dirty_MVAR(StgRegTable *reg, StgClosure *p)
+{
+ Capability *cap = regTableToCapability(reg);
+ bdescr *bd;
+ bd = Bdescr((StgPtr)p);
+ if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
+}
+
/* -----------------------------------------------------------------------------
Allocation functions for GMP.
static void *
stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
{
+ size_t min_size;
void *new_stuff_ptr = stgAllocForGMP(new_size);
nat i = 0;
char *p = (char *) ptr;
char *q = (char *) new_stuff_ptr;
- for (; i < old_size; i++, p++, q++) {
+ min_size = old_size < new_size ? old_size : new_size;
+ for (; i < min_size; i++, p++, q++) {
*q = *p;
}
/* Approximate the amount of live data in the heap. To be called just
* after garbage collection (see GarbageCollect()).
*/
-extern lnat
-calcLive(void)
+lnat
+calcLiveBlocks(void)
{
nat g, s;
lnat live = 0;
step *stp;
if (RtsFlags.GcFlags.generations == 1) {
- return (g0s0->n_large_blocks + g0s0->n_blocks) * BLOCK_SIZE_W;
+ return g0s0->n_large_blocks + g0s0->n_blocks;
}
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
continue;
}
stp = &generations[g].steps[s];
- live += (stp->n_large_blocks + stp->n_blocks) * BLOCK_SIZE_W;
+ live += stp->n_large_blocks + stp->n_blocks;
}
}
return live;
}
+lnat
+countOccupied(bdescr *bd)
+{
+ lnat words;
+
+ words = 0;
+ for (; bd != NULL; bd = bd->link) {
+ ASSERT(bd->free <= bd->start + bd->blocks * BLOCK_SIZE_W);
+ words += bd->free - bd->start;
+ }
+ return words;
+}
+
+// Return an accurate count of the live data in the heap, excluding
+// generation 0.
+lnat
+calcLiveWords(void)
+{
+ nat g, s;
+ lnat live;
+ step *stp;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ return g0s0->n_words + countOccupied(g0s0->large_objects);
+ }
+
+ live = 0;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) continue;
+ stp = &generations[g].steps[s];
+ live += stp->n_words + countOccupied(stp->large_objects);
+ }
+ }
+ return live;
+}
+
/* Approximate the number of blocks that will be needed at the next
* garbage collection.
*
for (s = 0; s < generations[g].n_steps; s++) {
if (g == 0 && s == 0) { continue; }
stp = &generations[g].steps[s];
- if (generations[g].steps[0].n_blocks +
- generations[g].steps[0].n_large_blocks
- > generations[g].max_blocks
- && stp->is_compacted == 0) {
- needed += 2 * stp->n_blocks;
- } else {
- needed += stp->n_blocks;
+
+ // we need at least this much space
+ needed += stp->n_blocks + stp->n_large_blocks;
+
+ // any additional space needed to collect this gen next time?
+ if (g == 0 || // always collect gen 0
+ (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ > generations[g].max_blocks)) {
+ // we will collect this gen next time
+ if (stp->mark) {
+ // bitmap:
+ needed += stp->n_blocks / BITS_IN(W_);
+ // mark stack:
+ needed += stp->n_blocks / 100;
+ }
+ if (stp->compact) {
+ continue; // no additional space needed for compaction
+ } else {
+ needed += stp->n_blocks;
+ }
}
}
}
in the page, and when the page is emptied (all objects on the page
are free) we free the page again, not forgetting to make it
non-executable.
+
+ TODO: The inability to handle objects bigger than BLOCK_SIZE_W means that
+ the linker cannot use allocateExec for loading object code files
+ on Windows. Once allocateExec can handle larger objects, the linker
+ should be modified to use allocateExec instead of VirtualAlloc.
------------------------------------------------------------------------- */
-static bdescr *exec_block;
+#if defined(linux_HOST_OS)
+
+// On Linux we need to use libffi for allocating executable memory,
+// because it knows how to work around the restrictions put in place
+// by SELinux.
+
+void *allocateExec (nat bytes, void **exec_ret)
+{
+ void **ret, **exec;
+ ACQUIRE_SM_LOCK;
+ ret = ffi_closure_alloc (sizeof(void *) + (size_t)bytes, (void**)&exec);
+ RELEASE_SM_LOCK;
+ if (ret == NULL) return ret;
+ *ret = ret; // save the address of the writable mapping, for freeExec().
+ *exec_ret = exec + 1;
+ return (ret + 1);
+}
+
+// freeExec gets passed the executable address, not the writable address.
+void freeExec (void *addr)
+{
+ void *writable;
+ writable = *((void**)addr - 1);
+ ACQUIRE_SM_LOCK;
+ ffi_closure_free (writable);
+ RELEASE_SM_LOCK
+}
+
+#else
-void *allocateExec (nat bytes)
+void *allocateExec (nat bytes, void **exec_ret)
{
void *ret;
nat n;
exec_block->free += n + 1;
RELEASE_SM_LOCK
+ *exec_ret = ret;
return ret;
}
bd->gen_no -= *(StgPtr)p;
*(StgPtr)p = 0;
- // Free the block if it is empty, but not if it is the block at
- // the head of the queue.
- if (bd->gen_no == 0 && bd != exec_block) {
- debugTrace(DEBUG_gc, "free exec block %p", bd->start);
- if (bd->u.back) {
- bd->u.back->link = bd->link;
- } else {
- exec_block = bd->link;
- }
- if (bd->link) {
- bd->link->u.back = bd->u.back;
- }
- setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse);
- freeGroup(bd);
+ if (bd->gen_no == 0) {
+ // Free the block if it is empty, but not if it is the block at
+ // the head of the queue.
+ if (bd != exec_block) {
+ debugTrace(DEBUG_gc, "free exec block %p", bd->start);
+ dbl_link_remove(bd, &exec_block);
+ setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse);
+ freeGroup(bd);
+ } else {
+ bd->free = bd->start;
+ }
}
RELEASE_SM_LOCK
}
+#endif /* mingw32_HOST_OS */
+
/* -----------------------------------------------------------------------------
Debugging
#ifdef DEBUG
-static lnat
-stepBlocks (step *stp)
+// Useful for finding partially full blocks in gdb
+void findSlop(bdescr *bd);
+void findSlop(bdescr *bd)
{
- lnat total_blocks;
- bdescr *bd;
+ lnat slop;
+
+ for (; bd != NULL; bd = bd->link) {
+ slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
+ if (slop > (1024/sizeof(W_))) {
+ debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
+ bd->start, bd, slop / (1024/sizeof(W_)));
+ }
+ }
+}
- total_blocks = stp->n_blocks;
- total_blocks += stp->n_old_blocks;
- for (bd = stp->large_objects; bd; bd = bd->link) {
- total_blocks += bd->blocks;
- /* hack for megablock groups: they have an extra block or two in
- the second and subsequent megablocks where the block
- descriptors would normally go.
- */
+nat
+countBlocks(bdescr *bd)
+{
+ nat n;
+ for (n=0; bd != NULL; bd=bd->link) {
+ n += bd->blocks;
+ }
+ return n;
+}
+
+// (*1) Just like countBlocks, except that we adjust the count for a
+// megablock group so that it doesn't include the extra few blocks
+// that would be taken up by block descriptors in the second and
+// subsequent megablock. This is so we can tally the count with the
+// number of blocks allocated in the system, for memInventory().
+static nat
+countAllocdBlocks(bdescr *bd)
+{
+ nat n;
+ for (n=0; bd != NULL; bd=bd->link) {
+ n += bd->blocks;
+ // hack for megablock groups: see (*1) above
if (bd->blocks > BLOCKS_PER_MBLOCK) {
- total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
+ n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
* (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
}
}
- return total_blocks;
+ return n;
+}
+
+static lnat
+stepBlocks (step *stp)
+{
+ ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
+ ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks);
+ return stp->n_blocks + stp->n_old_blocks +
+ countAllocdBlocks(stp->large_objects);
}
+// If memInventory() calculates that we have a memory leak, this
+// function will try to find the block(s) that are leaking by marking
+// all the ones that we know about, and search through memory to find
+// blocks that are not marked. In the debugger this can help to give
+// us a clue about what kind of block leaked. In the future we might
+// annotate blocks with their allocation site to give more helpful
+// info.
+static void
+findMemoryLeak (void)
+{
+ nat g, s, i;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (i = 0; i < n_capabilities; i++) {
+ markBlocks(capabilities[i].mut_lists[g]);
+ }
+ markBlocks(generations[g].mut_list);
+ for (s = 0; s < generations[g].n_steps; s++) {
+ markBlocks(generations[g].steps[s].blocks);
+ markBlocks(generations[g].steps[s].large_objects);
+ }
+ }
+
+ for (i = 0; i < n_nurseries; i++) {
+ markBlocks(nurseries[i].blocks);
+ markBlocks(nurseries[i].large_objects);
+ }
+
+#ifdef PROFILING
+ // TODO:
+ // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
+ // markRetainerBlocks();
+ // }
+#endif
+
+ // count the blocks allocated by the arena allocator
+ // TODO:
+ // markArenaBlocks();
+
+ // count the blocks containing executable memory
+ markBlocks(exec_block);
+
+ reportUnmarkedBlocks();
+}
+
+
void
-memInventory(void)
+memInventory (rtsBool show)
{
nat g, s, i;
step *stp;
- bdescr *bd;
lnat gen_blocks[RtsFlags.GcFlags.generations];
- lnat nursery_blocks, allocate_blocks, retainer_blocks,
+ lnat nursery_blocks, retainer_blocks,
arena_blocks, exec_blocks;
lnat live_blocks = 0, free_blocks = 0;
+ rtsBool leak;
// count the blocks we current have
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen_blocks[g] = 0;
for (i = 0; i < n_capabilities; i++) {
- for (bd = capabilities[i].mut_lists[g]; bd != NULL; bd = bd->link) {
- gen_blocks[g] += bd->blocks;
- }
+ gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
}
- for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
- gen_blocks[g] += bd->blocks;
- }
+ gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
for (s = 0; s < generations[g].n_steps; s++) {
- if (g==0 && s==0) continue;
stp = &generations[g].steps[s];
gen_blocks[g] += stepBlocks(stp);
}
for (i = 0; i < n_nurseries; i++) {
nursery_blocks += stepBlocks(&nurseries[i]);
}
-#ifdef THREADED_RTS
- // We put pinned object blocks in g0s0, so better count blocks there too.
- gen_blocks[0] += stepBlocks(g0s0);
-#endif
-
- /* any blocks held by allocate() */
- allocate_blocks = 0;
- for (bd = small_alloc_list; bd; bd = bd->link) {
- allocate_blocks += bd->blocks;
- }
retainer_blocks = 0;
#ifdef PROFILING
arena_blocks = arenaBlocks();
// count the blocks containing executable memory
- exec_blocks = 0;
- for (bd = exec_block; bd; bd = bd->link) {
- exec_blocks += bd->blocks;
- }
+ exec_blocks = countAllocdBlocks(exec_block);
/* count the blocks on the free list */
free_blocks = countFreeList();
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
live_blocks += gen_blocks[g];
}
- live_blocks += nursery_blocks + allocate_blocks
+ live_blocks += nursery_blocks +
+ retainer_blocks + arena_blocks + exec_blocks;
- if (live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK)
+#define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
+
+ leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
+
+ if (show || leak)
{
- debugBelch("Memory leak detected\n");
+ if (leak) {
+ debugBelch("Memory leak detected:\n");
+ } else {
+ debugBelch("Memory inventory:\n");
+ }
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- debugBelch(" gen %d blocks : %4lu\n", g, gen_blocks[g]);
+ debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g,
+ gen_blocks[g], MB(gen_blocks[g]));
+ }
+ debugBelch(" nursery : %5lu blocks (%lu MB)\n",
+ nursery_blocks, MB(nursery_blocks));
+ debugBelch(" retainer : %5lu blocks (%lu MB)\n",
+ retainer_blocks, MB(retainer_blocks));
+ debugBelch(" arena blocks : %5lu blocks (%lu MB)\n",
+ arena_blocks, MB(arena_blocks));
+ debugBelch(" exec : %5lu blocks (%lu MB)\n",
+ exec_blocks, MB(exec_blocks));
+ debugBelch(" free : %5lu blocks (%lu MB)\n",
+ free_blocks, MB(free_blocks));
+ debugBelch(" total : %5lu blocks (%lu MB)\n",
+ live_blocks + free_blocks, MB(live_blocks+free_blocks));
+ if (leak) {
+ debugBelch("\n in system : %5lu blocks (%lu MB)\n",
+ mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
}
- debugBelch(" nursery : %4lu\n", nursery_blocks);
- debugBelch(" allocate() : %4lu\n", allocate_blocks);
- debugBelch(" retainer : %4lu\n", retainer_blocks);
- debugBelch(" arena blocks : %4lu\n", arena_blocks);
- debugBelch(" exec : %4lu\n", exec_blocks);
- debugBelch(" free : %4lu\n", free_blocks);
- debugBelch(" total : %4lu\n\n", live_blocks + free_blocks);
- debugBelch(" in system : %4lu\n", mblocks_allocated + BLOCKS_PER_MBLOCK);
- ASSERT(0);
}
-}
-
-nat
-countBlocks(bdescr *bd)
-{
- nat n;
- for (n=0; bd != NULL; bd=bd->link) {
- n += bd->blocks;
- }
- return n;
+ if (leak) {
+ debugBelch("\n");
+ findMemoryLeak();
+ }
+ ASSERT(n_alloc_blocks == live_blocks);
+ ASSERT(!leak);
}
+
/* Full heap sanity check. */
void
checkSanity( void )
== generations[g].steps[s].n_large_blocks);
checkHeap(generations[g].steps[s].blocks);
checkChain(generations[g].steps[s].large_objects);
- if (g > 0) {
- checkMutableList(generations[g].mut_list, g);
- }
}
}
checkFreeListSanity();
}
+
+#if defined(THREADED_RTS)
+ // check the stacks too in threaded mode, because we don't do a
+ // full heap sanity check in this case (see checkHeap())
+ checkMutableLists(rtsTrue);
+#else
+ checkMutableLists(rtsFalse);
+#endif
}
/* Nursery sanity check */