generation *g0 = NULL; /* generation 0, for convenience */
generation *oldest_gen = NULL; /* oldest generation, for convenience */
-nat total_steps = 0;
-step *all_steps = NULL; /* single array of steps */
-
-ullong total_allocated = 0; /* total memory allocated during run */
-
-step *nurseries = NULL; /* array of nurseries, size == n_capabilities */
+nursery *nurseries = NULL; /* array of nurseries, size == n_capabilities */
#ifdef THREADED_RTS
/*
static void allocNurseries ( void );
static void
-initStep (step *stp, int g, int s)
+initGeneration (generation *gen, int g)
{
- stp->no = s;
- stp->abs_no = RtsFlags.GcFlags.steps * g + s;
- stp->blocks = NULL;
- stp->n_blocks = 0;
- stp->n_words = 0;
- stp->live_estimate = 0;
- stp->old_blocks = NULL;
- stp->n_old_blocks = 0;
- stp->gen = &generations[g];
- stp->gen_no = g;
- stp->large_objects = NULL;
- stp->n_large_blocks = 0;
- stp->scavenged_large_objects = NULL;
- stp->n_scavenged_large_blocks = 0;
- stp->mark = 0;
- stp->compact = 0;
- stp->bitmap = NULL;
+ gen->no = g;
+ gen->collections = 0;
+ gen->par_collections = 0;
+ gen->failed_promotions = 0;
+ gen->max_blocks = 0;
+ gen->blocks = NULL;
+ gen->n_blocks = 0;
+ gen->n_words = 0;
+ gen->live_estimate = 0;
+ gen->old_blocks = NULL;
+ gen->n_old_blocks = 0;
+ gen->large_objects = NULL;
+ gen->n_large_blocks = 0;
+ gen->n_new_large_blocks = 0;
+ gen->mut_list = allocBlock();
+ gen->scavenged_large_objects = NULL;
+ gen->n_scavenged_large_blocks = 0;
+ gen->mark = 0;
+ gen->compact = 0;
+ gen->bitmap = NULL;
#ifdef THREADED_RTS
- initSpinLock(&stp->sync_large_objects);
+ initSpinLock(&gen->sync_large_objects);
#endif
- stp->threads = END_TSO_QUEUE;
- stp->old_threads = END_TSO_QUEUE;
+ gen->threads = END_TSO_QUEUE;
+ gen->old_threads = END_TSO_QUEUE;
}
void
initStorage( void )
{
- nat g, s;
- generation *gen;
+ nat g, n;
if (generations != NULL) {
// multi-init protection
* doing something reasonable.
*/
/* We use the NOT_NULL variant or gcc warns that the test is always true */
- ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL((StgWord)&stg_BLACKHOLE_info));
+ ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL((StgWord)&stg_BLOCKING_QUEUE_CLEAN_info));
ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
/* Initialise all generations */
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
- gen = &generations[g];
- gen->no = g;
- gen->mut_list = allocBlock();
- gen->collections = 0;
- gen->par_collections = 0;
- gen->failed_promotions = 0;
- gen->max_blocks = 0;
+ initGeneration(&generations[g], g);
}
/* A couple of convenience pointers */
g0 = &generations[0];
oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
- /* allocate all the steps into an array. It is important that we do
- it this way, because we need the invariant that two step pointers
- can be directly compared to see which is the oldest.
- Remember that the last generation has only one step. */
- total_steps = 1 + (RtsFlags.GcFlags.generations - 1) * RtsFlags.GcFlags.steps;
- all_steps = stgMallocBytes(total_steps * sizeof(struct step_),
- "initStorage: steps");
-
- /* Allocate step structures in each generation */
- if (RtsFlags.GcFlags.generations > 1) {
- /* Only for multiple-generations */
-
- /* Oldest generation: one step */
- oldest_gen->n_steps = 1;
- oldest_gen->steps = all_steps + (RtsFlags.GcFlags.generations - 1)
- * RtsFlags.GcFlags.steps;
-
- /* set up all except the oldest generation with 2 steps */
- for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
- generations[g].n_steps = RtsFlags.GcFlags.steps;
- generations[g].steps = all_steps + g * RtsFlags.GcFlags.steps;
- }
-
- } else {
- /* single generation, i.e. a two-space collector */
- g0->n_steps = 1;
- g0->steps = all_steps;
- }
-
- nurseries = stgMallocBytes (n_capabilities * sizeof(struct step_),
- "initStorage: nurseries");
-
- /* Initialise all steps */
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- initStep(&generations[g].steps[s], g, s);
- }
- }
-
- for (s = 0; s < n_capabilities; s++) {
- initStep(&nurseries[s], 0, s);
- }
+ nurseries = stgMallocBytes(n_capabilities * sizeof(struct nursery_),
+ "initStorage: nurseries");
/* Set up the destination pointers in each younger gen. step */
for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
- for (s = 0; s < generations[g].n_steps-1; s++) {
- generations[g].steps[s].to = &generations[g].steps[s+1];
- }
- generations[g].steps[s].to = &generations[g+1].steps[0];
- }
- oldest_gen->steps[0].to = &oldest_gen->steps[0];
-
- for (s = 0; s < n_capabilities; s++) {
- nurseries[s].to = generations[0].steps[0].to;
+ generations[g].to = &generations[g+1];
}
+ oldest_gen->to = oldest_gen;
/* The oldest generation has one step. */
if (RtsFlags.GcFlags.compact || RtsFlags.GcFlags.sweep) {
if (RtsFlags.GcFlags.generations == 1) {
errorBelch("WARNING: compact/sweep is incompatible with -G1; disabled");
} else {
- oldest_gen->steps[0].mark = 1;
+ oldest_gen->mark = 1;
if (RtsFlags.GcFlags.compact)
- oldest_gen->steps[0].compact = 1;
+ oldest_gen->compact = 1;
}
}
allocNurseries();
weak_ptr_list = NULL;
- caf_list = NULL;
- revertible_caf_list = NULL;
+ caf_list = END_OF_STATIC_LIST;
+ revertible_caf_list = END_OF_STATIC_LIST;
/* initialise the allocate() interface */
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
N = 0;
+ // allocate a block for each mut list
+ for (n = 0; n < n_capabilities; n++) {
+ for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
+ capabilities[n].mut_lists[g] = allocBlock();
+ }
+ }
+
initGcThreads();
IF_DEBUG(gc, statDescribeGens());
void
freeStorage (void)
{
- stgFree(all_steps); // frees all the steps
stgFree(generations);
freeAllMBlocks();
#if defined(THREADED_RTS)
The entry code for every CAF does the following:
- - builds a CAF_BLACKHOLE in the heap
- - pushes an update frame pointing to the CAF_BLACKHOLE
- - invokes UPD_CAF(), which:
- - calls newCaf, below
- - updates the CAF with a static indirection to the CAF_BLACKHOLE
+ - builds a BLACKHOLE in the heap
+ - pushes an update frame pointing to the BLACKHOLE
+ - calls newCaf, below
+ - updates the CAF with a static indirection to the BLACKHOLE
- Why do we build a BLACKHOLE in the heap rather than just updating
+ Why do we build an BLACKHOLE in the heap rather than just updating
the thunk directly? It's so that we only need one kind of update
frame - otherwise we'd need a static version of the update frame too.
newCaf() does the following:
- - it puts the CAF on the oldest generation's mut-once list.
- This is so that we can treat the CAF as a root when collecting
+ - it puts the CAF on the oldest generation's mutable list.
+ This is so that we treat the CAF as a root when collecting
younger generations.
For GHCI, we have additional requirements when dealing with CAFs:
-------------------------------------------------------------------------- */
void
-newCAF(StgClosure* caf)
+newCAF(StgRegTable *reg, StgClosure* caf)
{
- ACQUIRE_SM_LOCK;
-
-#ifdef DYNAMIC
if(keepCAFs)
{
// HACK:
// do another hack here and do an address range test on caf to figure
// out whether it is from a dynamic library.
((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
+
+ ACQUIRE_SM_LOCK; // caf_list is global, locked by sm_mutex
((StgIndStatic *)caf)->static_link = caf_list;
caf_list = caf;
+ RELEASE_SM_LOCK;
}
else
-#endif
{
- /* Put this CAF on the mutable list for the old generation.
- * This is a HACK - the IND_STATIC closure doesn't really have
- * a mut_link field, but we pretend it has - in fact we re-use
- * the STATIC_LINK field for the time being, because when we
- * come to do a major GC we won't need the mut_link field
- * any more and can use it as a STATIC_LINK.
- */
+ // Put this CAF on the mutable list for the old generation.
((StgIndStatic *)caf)->saved_info = NULL;
- recordMutableGen(caf, oldest_gen->no);
+ if (oldest_gen->no != 0) {
+ recordMutableCap(caf, regTableToCapability(reg), oldest_gen->no);
+ }
}
-
- RELEASE_SM_LOCK;
+}
+
+// External API for setting the keepCAFs flag. see #3900.
+void
+setKeepCAFs (void)
+{
+ keepCAFs = 1;
}
// An alternate version of newCaf which is used for dynamically loaded
// The linker hackily arranges that references to newCaf from dynamic
// code end up pointing to newDynCAF.
void
-newDynCAF(StgClosure *caf)
+newDynCAF (StgRegTable *reg STG_UNUSED, StgClosure *caf)
{
ACQUIRE_SM_LOCK;
-------------------------------------------------------------------------- */
static bdescr *
-allocNursery (step *stp, bdescr *tail, nat blocks)
+allocNursery (bdescr *tail, nat blocks)
{
bdescr *bd;
nat i;
if (tail != NULL) {
tail->u.back = bd;
}
- initBdescr(bd, stp);
+ initBdescr(bd, g0, g0);
bd->flags = 0;
bd->free = bd->start;
tail = bd;
for (i = 0; i < n_capabilities; i++) {
nurseries[i].blocks =
- allocNursery(&nurseries[i], NULL,
- RtsFlags.GcFlags.minAllocAreaSize);
- nurseries[i].n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
- nurseries[i].old_blocks = NULL;
- nurseries[i].n_old_blocks = 0;
+ allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
+ nurseries[i].n_blocks =
+ RtsFlags.GcFlags.minAllocAreaSize;
}
assignNurseriesToCapabilities();
}
{
nat i;
bdescr *bd;
- step *stp;
for (i = 0; i < n_capabilities; i++) {
- stp = &nurseries[i];
- for (bd = stp->blocks; bd; bd = bd->link) {
+ for (bd = nurseries[i].blocks; bd; bd = bd->link) {
bd->free = bd->start;
ASSERT(bd->gen_no == 0);
- ASSERT(bd->step == stp);
+ ASSERT(bd->gen == g0);
IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
}
- // these large objects are dead, since we have just GC'd
- freeChain(stp->large_objects);
- stp->large_objects = NULL;
- stp->n_large_blocks = 0;
}
assignNurseriesToCapabilities();
}
for (i = 0; i < n_capabilities; i++) {
blocks += nurseries[i].n_blocks;
- blocks += nurseries[i].n_large_blocks;
}
return blocks;
}
static void
-resizeNursery ( step *stp, nat blocks )
+resizeNursery ( nursery *nursery, nat blocks )
{
bdescr *bd;
nat nursery_blocks;
- nursery_blocks = stp->n_blocks;
+ nursery_blocks = nursery->n_blocks;
if (nursery_blocks == blocks) return;
if (nursery_blocks < blocks) {
debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks",
blocks);
- stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
+ nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks);
}
else {
bdescr *next_bd;
debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks",
blocks);
- bd = stp->blocks;
+ bd = nursery->blocks;
while (nursery_blocks > blocks) {
next_bd = bd->link;
next_bd->u.back = NULL;
freeGroup(bd);
bd = next_bd;
}
- stp->blocks = bd;
+ nursery->blocks = bd;
// might have gone just under, by freeing a large block, so make
// up the difference.
if (nursery_blocks < blocks) {
- stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
+ nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks);
}
}
- stp->n_blocks = blocks;
- ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
+ nursery->n_blocks = blocks;
+ ASSERT(countBlocks(nursery->blocks) == nursery->n_blocks);
}
//
ACQUIRE_SM_LOCK;
- ASSERT(countBlocks(bd->step->large_objects) == bd->step->n_large_blocks);
+ ASSERT(countBlocks(bd->gen->large_objects) == bd->gen->n_large_blocks);
// subtract the original number of blocks from the counter first
- bd->step->n_large_blocks -= bd->blocks;
+ bd->gen->n_large_blocks -= bd->blocks;
new_bd = splitBlockGroup (bd, blocks);
- initBdescr(new_bd, bd->step);
+ initBdescr(new_bd, bd->gen, bd->gen->to);
new_bd->flags = BF_LARGE | (bd->flags & BF_EVACUATED);
// if new_bd is in an old generation, we have to set BF_EVACUATED
new_bd->free = bd->free;
- dbl_link_onto(new_bd, &bd->step->large_objects);
+ dbl_link_onto(new_bd, &bd->gen->large_objects);
ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W);
// add the new number of blocks to the counter. Due to the gaps
// for block descriptors, new_bd->blocks + bd->blocks might not be
// equal to the original bd->blocks, which is why we do it this way.
- bd->step->n_large_blocks += bd->blocks + new_bd->blocks;
+ bd->gen->n_large_blocks += bd->blocks + new_bd->blocks;
- ASSERT(countBlocks(bd->step->large_objects) == bd->step->n_large_blocks);
+ ASSERT(countBlocks(bd->gen->large_objects) == bd->gen->n_large_blocks);
RELEASE_SM_LOCK;
{
bdescr *bd;
StgPtr p;
- step *stp;
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
stg_exit(EXIT_HEAPOVERFLOW);
}
- stp = &nurseries[cap->no];
-
+ ACQUIRE_SM_LOCK
bd = allocGroup(req_blocks);
- dbl_link_onto(bd, &stp->large_objects);
- stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
- initBdescr(bd, stp);
+ dbl_link_onto(bd, &g0->large_objects);
+ g0->n_large_blocks += bd->blocks; // might be larger than req_blocks
+ g0->n_new_large_blocks += bd->blocks;
+ RELEASE_SM_LOCK;
+ initBdescr(bd, g0, g0);
bd->flags = BF_LARGE;
bd->free = bd->start + n;
return bd->start;
bd = allocBlock();
cap->r.rNursery->n_blocks++;
RELEASE_SM_LOCK;
- initBdescr(bd, cap->r.rNursery);
+ initBdescr(bd, g0, g0);
bd->flags = 0;
// If we had to allocate a new block, then we'll GC
// pretty quickly now, because MAYBE_GC() will
}
p = bd->free;
bd->free += n;
+
+ IF_DEBUG(sanity, ASSERT(*((StgWord8*)p) == 0xaa));
return p;
}
We allocate small pinned objects into a single block, allocating a
new block when the current one overflows. The block is chained
- onto the large_object_list of generation 0 step 0.
+ onto the large_object_list of generation 0.
NOTE: The GC can't in general handle pinned objects. This
interface is only safe to use for ByteArrays, which have no
{
StgPtr p;
bdescr *bd;
- step *stp;
// If the request is for a large object, then allocate()
// will give us a pinned object anyway.
// If we don't have a block of pinned objects yet, or the current
// one isn't large enough to hold the new object, allocate a new one.
if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
- ACQUIRE_SM_LOCK
+ ACQUIRE_SM_LOCK;
cap->pinned_object_block = bd = allocBlock();
- RELEASE_SM_LOCK
- stp = &nurseries[cap->no];
- dbl_link_onto(bd, &stp->large_objects);
- stp->n_large_blocks++;
- initBdescr(bd, stp);
+ dbl_link_onto(bd, &g0->large_objects);
+ g0->n_large_blocks++;
+ g0->n_new_large_blocks++;
+ RELEASE_SM_LOCK;
+ initBdescr(bd, g0, g0);
bd->flags = BF_PINNED | BF_LARGE;
bd->free = bd->start;
}
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
{
Capability *cap = regTableToCapability(reg);
- bdescr *bd;
if (p->header.info == &stg_MUT_VAR_CLEAN_info) {
p->header.info = &stg_MUT_VAR_DIRTY_info;
- bd = Bdescr((StgPtr)p);
- if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
+ recordClosureMutated(cap,p);
}
}
void
setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
{
- bdescr *bd;
if (tso->dirty == 0 && (tso->flags & TSO_LINK_DIRTY) == 0) {
tso->flags |= TSO_LINK_DIRTY;
- bd = Bdescr((StgPtr)tso);
- if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ recordClosureMutated(cap,(StgClosure*)tso);
}
tso->_link = target;
}
void
+setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target)
+{
+ if (tso->dirty == 0 && (tso->flags & TSO_LINK_DIRTY) == 0) {
+ tso->flags |= TSO_LINK_DIRTY;
+ recordClosureMutated(cap,(StgClosure*)tso);
+ }
+ tso->block_info.prev = target;
+}
+
+void
dirty_TSO (Capability *cap, StgTSO *tso)
{
- bdescr *bd;
if (tso->dirty == 0 && (tso->flags & TSO_LINK_DIRTY) == 0) {
- bd = Bdescr((StgPtr)tso);
- if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ recordClosureMutated(cap,(StgClosure*)tso);
}
tso->dirty = 1;
}
void
dirty_MVAR(StgRegTable *reg, StgClosure *p)
{
- Capability *cap = regTableToCapability(reg);
- bdescr *bd;
- bd = Bdescr((StgPtr)p);
- if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
+ recordClosureMutated(regTableToCapability(reg),p);
}
/* -----------------------------------------------------------------------------
}
}
- total_allocated += allocated;
+ allocated += g0->n_new_large_blocks * BLOCK_SIZE_W;
+
return allocated;
}
/* Approximate the amount of live data in the heap. To be called just
* after garbage collection (see GarbageCollect()).
*/
-lnat
-calcLiveBlocks(void)
+lnat calcLiveBlocks (void)
{
- nat g, s;
+ nat g;
lnat live = 0;
- step *stp;
+ generation *gen;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
/* approximate amount of live data (doesn't take into account slop
* at end of each block).
*/
- if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- stp = &generations[g].steps[s];
- live += stp->n_large_blocks + stp->n_blocks;
- }
+ gen = &generations[g];
+ live += gen->n_large_blocks + gen->n_blocks;
}
return live;
}
-lnat
-countOccupied(bdescr *bd)
+lnat countOccupied (bdescr *bd)
{
lnat words;
// Return an accurate count of the live data in the heap, excluding
// generation 0.
-lnat
-calcLiveWords(void)
+lnat calcLiveWords (void)
{
- nat g, s;
+ nat g;
lnat live;
- step *stp;
+ generation *gen;
live = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) continue;
- stp = &generations[g].steps[s];
- live += stp->n_words + countOccupied(stp->large_objects);
- }
+ gen = &generations[g];
+ live += gen->n_words + countOccupied(gen->large_objects);
}
return live;
}
/* Approximate the number of blocks that will be needed at the next
* garbage collection.
*
- * Assume: all data currently live will remain live. Steps that will
- * be collected next time will therefore need twice as many blocks
- * since all the data will be copied.
+ * Assume: all data currently live will remain live. Generationss
+ * that will be collected next time will therefore need twice as many
+ * blocks since all the data will be copied.
*/
extern lnat
calcNeeded(void)
{
lnat needed = 0;
- nat g, s;
- step *stp;
+ nat g;
+ generation *gen;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0) { continue; }
- stp = &generations[g].steps[s];
-
- // we need at least this much space
- needed += stp->n_blocks + stp->n_large_blocks;
-
- // any additional space needed to collect this gen next time?
- if (g == 0 || // always collect gen 0
- (generations[g].steps[0].n_blocks +
- generations[g].steps[0].n_large_blocks
- > generations[g].max_blocks)) {
- // we will collect this gen next time
- if (stp->mark) {
- // bitmap:
- needed += stp->n_blocks / BITS_IN(W_);
- // mark stack:
- needed += stp->n_blocks / 100;
- }
- if (stp->compact) {
- continue; // no additional space needed for compaction
- } else {
- needed += stp->n_blocks;
- }
- }
- }
+ gen = &generations[g];
+
+ // we need at least this much space
+ needed += gen->n_blocks + gen->n_large_blocks;
+
+ // any additional space needed to collect this gen next time?
+ if (g == 0 || // always collect gen 0
+ (gen->n_blocks + gen->n_large_blocks > gen->max_blocks)) {
+ // we will collect this gen next time
+ if (gen->mark) {
+ // bitmap:
+ needed += gen->n_blocks / BITS_IN(W_);
+ // mark stack:
+ needed += gen->n_blocks / 100;
+ }
+ if (gen->compact) {
+ continue; // no additional space needed for compaction
+ } else {
+ needed += gen->n_blocks;
+ }
+ }
}
return needed;
}