#include <stdlib.h>
#include <string.h>
+#include "ffi.h"
+
/*
* All these globals require sm_mutex to access in THREADED_RTS mode.
*/
nat alloc_blocks; /* number of allocate()d blocks since GC */
nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
+static bdescr *exec_block;
+
generation *generations = NULL; /* all the generations */
generation *g0 = NULL; /* generation 0, for convenience */
generation *oldest_gen = NULL; /* oldest generation, for convenience */
stp->blocks = NULL;
stp->n_blocks = 0;
stp->n_words = 0;
+ stp->live_estimate = 0;
stp->old_blocks = NULL;
stp->n_old_blocks = 0;
stp->gen = &generations[g];
stp->n_large_blocks = 0;
stp->scavenged_large_objects = NULL;
stp->n_scavenged_large_blocks = 0;
- stp->is_compacted = 0;
+ stp->mark = 0;
+ stp->compact = 0;
stp->bitmap = NULL;
#ifdef THREADED_RTS
initSpinLock(&stp->sync_todo);
initSpinLock(&stp->sync_large_objects);
#endif
+ stp->threads = END_TSO_QUEUE;
+ stp->old_threads = END_TSO_QUEUE;
}
void
* doing something reasonable.
*/
/* We use the NOT_NULL variant or gcc warns that the test is always true */
- ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL(&stg_BLACKHOLE_info));
+ ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL((StgWord)&stg_BLACKHOLE_info));
ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
}
/* The oldest generation has one step. */
- if (RtsFlags.GcFlags.compact) {
+ if (RtsFlags.GcFlags.compact || RtsFlags.GcFlags.sweep) {
if (RtsFlags.GcFlags.generations == 1) {
- errorBelch("WARNING: compaction is incompatible with -G1; disabled");
+ errorBelch("WARNING: compact/sweep is incompatible with -G1; disabled");
} else {
- oldest_gen->steps[0].is_compacted = 1;
+ oldest_gen->steps[0].mark = 1;
+ if (RtsFlags.GcFlags.compact)
+ oldest_gen->steps[0].compact = 1;
}
}
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
+ exec_block = NULL;
+
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
resizeNurseriesFixed(blocks / n_nurseries);
}
+
+/* -----------------------------------------------------------------------------
+ move_TSO is called to update the TSO structure after it has been
+ moved from one place to another.
+ -------------------------------------------------------------------------- */
+
+void
+move_TSO (StgTSO *src, StgTSO *dest)
+{
+ ptrdiff_t diff;
+
+ // relocate the stack pointer...
+ diff = (StgPtr)dest - (StgPtr)src; // In *words*
+ dest->sp = (StgPtr)dest->sp + diff;
+}
+
/* -----------------------------------------------------------------------------
The allocate() interface
-------------------------------------------------------------------------- */
StgPtr
-allocateInGen (generation *g, nat n)
+allocateInGen (generation *g, lnat n)
{
step *stp;
bdescr *bd;
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_))
{
- nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
+ lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
// Attempting to allocate an object larger than maxHeapSize
// should definitely be disallowed. (bug #1791)
}
StgPtr
-allocate (nat n)
+allocate (lnat n)
{
return allocateInGen(g0,n);
}
return allocated;
}
-// split N blocks off the start of the given bdescr, returning the
-// remainder as a new block group. We treat the remainder as if it
+// split N blocks off the front of the given bdescr, returning the
+// new block group. We treat the remainder as if it
// had been freshly allocated in generation 0.
bdescr *
splitLargeBlock (bdescr *bd, nat blocks)
new_bd->step = g0s0;
new_bd->flags = BF_LARGE;
new_bd->free = bd->free;
+ ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
// add the new number of blocks to the counter. Due to the gaps
// for block descriptor, new_bd->blocks + bd->blocks might not be
bd->step->n_large_blocks += bd->blocks;
return new_bd;
-}
+}
/* -----------------------------------------------------------------------------
allocateLocal()
-------------------------------------------------------------------------- */
StgPtr
-allocateLocal (Capability *cap, nat n)
+allocateLocal (Capability *cap, lnat n)
{
bdescr *bd;
StgPtr p;
------------------------------------------------------------------------- */
StgPtr
-allocatePinned( nat n )
+allocatePinned( lnat n )
{
StgPtr p;
bdescr *bd = pinned_object_block;
dirty_TSO (Capability *cap, StgTSO *tso)
{
bdescr *bd;
- if ((tso->flags & TSO_DIRTY) == 0) {
- tso->flags |= TSO_DIRTY;
+ if ((tso->flags & (TSO_DIRTY|TSO_LINK_DIRTY)) == 0) {
bd = Bdescr((StgPtr)tso);
if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
}
+ tso->flags |= TSO_DIRTY;
}
/*
static void *
stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
{
+ size_t min_size;
void *new_stuff_ptr = stgAllocForGMP(new_size);
nat i = 0;
char *p = (char *) ptr;
char *q = (char *) new_stuff_ptr;
- for (; i < old_size; i++, p++, q++) {
+ min_size = old_size < new_size ? old_size : new_size;
+ for (; i < min_size; i++, p++, q++) {
*q = *p;
}
words = 0;
for (; bd != NULL; bd = bd->link) {
+ ASSERT(bd->free <= bd->start + bd->blocks * BLOCK_SIZE_W);
words += bd->free - bd->start;
}
return words;
for (s = 0; s < generations[g].n_steps; s++) {
if (g == 0 && s == 0) { continue; }
stp = &generations[g].steps[s];
+
+ // we need at least this much space
+ needed += stp->n_blocks + stp->n_large_blocks;
+
+ // any additional space needed to collect this gen next time?
if (g == 0 || // always collect gen 0
(generations[g].steps[0].n_blocks +
generations[g].steps[0].n_large_blocks
- > generations[g].max_blocks
- && stp->is_compacted == 0)) {
- needed += 2 * stp->n_blocks + stp->n_large_blocks;
- } else {
- needed += stp->n_blocks + stp->n_large_blocks;
+ > generations[g].max_blocks)) {
+ // we will collect this gen next time
+ if (stp->mark) {
+ // bitmap:
+ needed += stp->n_blocks / BITS_IN(W_);
+ // mark stack:
+ needed += stp->n_blocks / 100;
+ }
+ if (stp->compact) {
+ continue; // no additional space needed for compaction
+ } else {
+ needed += stp->n_blocks;
+ }
}
}
}
should be modified to use allocateExec instead of VirtualAlloc.
------------------------------------------------------------------------- */
-static bdescr *exec_block;
+#if defined(linux_HOST_OS)
+
+// On Linux we need to use libffi for allocating executable memory,
+// because it knows how to work around the restrictions put in place
+// by SELinux.
+
+void *allocateExec (nat bytes, void **exec_ret)
+{
+ void **ret, **exec;
+ ACQUIRE_SM_LOCK;
+ ret = ffi_closure_alloc (sizeof(void *) + (size_t)bytes, (void**)&exec);
+ RELEASE_SM_LOCK;
+ if (ret == NULL) return ret;
+ *ret = ret; // save the address of the writable mapping, for freeExec().
+ *exec_ret = exec + 1;
+ return (ret + 1);
+}
+
+// freeExec gets passed the executable address, not the writable address.
+void freeExec (void *addr)
+{
+ void *writable;
+ writable = *((void**)addr - 1);
+ ACQUIRE_SM_LOCK;
+ ffi_closure_free (writable);
+ RELEASE_SM_LOCK
+}
+
+#else
-void *allocateExec (nat bytes)
+void *allocateExec (nat bytes, void **exec_ret)
{
void *ret;
nat n;
exec_block->free += n + 1;
RELEASE_SM_LOCK
+ *exec_ret = ret;
return ret;
}
RELEASE_SM_LOCK
}
+#endif /* mingw32_HOST_OS */
+
/* -----------------------------------------------------------------------------
Debugging
countAllocdBlocks(stp->large_objects);
}
+// If memInventory() calculates that we have a memory leak, this
+// function will try to find the block(s) that are leaking by marking
+// all the ones that we know about, and search through memory to find
+// blocks that are not marked. In the debugger this can help to give
+// us a clue about what kind of block leaked. In the future we might
+// annotate blocks with their allocation site to give more helpful
+// info.
+static void
+findMemoryLeak (void)
+{
+ nat g, s, i;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (i = 0; i < n_capabilities; i++) {
+ markBlocks(capabilities[i].mut_lists[g]);
+ }
+ markBlocks(generations[g].mut_list);
+ for (s = 0; s < generations[g].n_steps; s++) {
+ markBlocks(generations[g].steps[s].blocks);
+ markBlocks(generations[g].steps[s].large_objects);
+ }
+ }
+
+ for (i = 0; i < n_nurseries; i++) {
+ markBlocks(nurseries[i].blocks);
+ markBlocks(nurseries[i].large_objects);
+ }
+
+#ifdef PROFILING
+ // TODO:
+ // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
+ // markRetainerBlocks();
+ // }
+#endif
+
+ // count the blocks allocated by the arena allocator
+ // TODO:
+ // markArenaBlocks();
+
+ // count the blocks containing executable memory
+ markBlocks(exec_block);
+
+ reportUnmarkedBlocks();
+}
+
+
void
memInventory (rtsBool show)
{
#define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_)))
leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK;
+
if (show || leak)
{
if (leak) {
mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated);
}
}
+
+ if (leak) {
+ debugBelch("\n");
+ findMemoryLeak();
+ }
+ ASSERT(n_alloc_blocks == live_blocks);
+ ASSERT(!leak);
}
checkFreeListSanity();
}
+
+#if defined(THREADED_RTS)
+ // check the stacks too in threaded mode, because we don't do a
+ // full heap sanity check in this case (see checkHeap())
+ checkGlobalTSOList(rtsTrue);
+#else
+ checkGlobalTSOList(rtsFalse);
+#endif
}
/* Nursery sanity check */