Now that we use the per-capability mutable lists exclusively.
struct_field(bdescr, link);
struct_size(generation);
- struct_field(generation, mut_list);
struct_field(generation, n_new_large_words);
struct_size(CostCentreStack);
// (for allocation stats)
unsigned int max_blocks; // max blocks
- bdescr *mut_list; // mut objects in this gen (not G0)
StgTSO * threads; // threads in this gen
// linked via global_link
unsigned int n_old_blocks; // number of blocks in from-space
unsigned int live_estimate; // for sweeping: estimate of live data
- bdescr * saved_mut_list;
-
bdescr * part_blocks; // partially-full scanned blocks
unsigned int n_part_blocks; // count of above
void
statDescribeGens(void)
{
- nat g, mut, lge;
+ nat g, mut, lge, i;
lnat live, slop;
lnat tot_live, tot_slop;
bdescr *bd;
tot_slop = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
mut = 0;
- for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
- mut += (bd->free - bd->start) * sizeof(W_);
+ for (i = 0; i < n_capabilities; i++) {
+ mut += countOccupied(capabilities[i].mut_lists[g]);
}
gen = &generations[g];
bdescr *bd;
StgPtr p;
nat n;
- for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
- for (p = bd->start; p < bd->free; p++) {
- thread((StgClosure **)p);
- }
- }
for (n = 0; n < n_capabilities; n++) {
for (bd = capabilities[n].mut_lists[g];
bd != NULL; bd = bd->link) {
inc_running();
wakeup_gc_threads(n_gc_threads, gct->thread_index);
- // Mutable lists from each generation > N
- // we want to *scavenge* these roots, not evacuate them: they're not
- // going to move in this GC.
- // Also do them in reverse generation order, for the usual reason:
- // namely to reduce the likelihood of spurious old->new pointers.
- //
- for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
-#if defined(THREADED_RTS)
- if (n_gc_threads > 1) {
- scavenge_mutable_list(generations[g].saved_mut_list, &generations[g]);
- } else {
- scavenge_mutable_list1(generations[g].saved_mut_list, &generations[g]);
- }
-#else
- scavenge_mutable_list(generations[g].saved_mut_list, &generations[g]);
-#endif
- freeChain_sync(generations[g].saved_mut_list);
- generations[g].saved_mut_list = NULL;
-
- }
-
// scavenge the capability-private mutable lists. This isn't part
// of markSomeCapabilities() because markSomeCapabilities() can only
// call back into the GC via mark_root() (due to the gct register
// stats. Every mutable list is copied during every GC.
if (g > 0) {
nat mut_list_size = 0;
- for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
- mut_list_size += bd->free - bd->start;
- }
for (n = 0; n < n_capabilities; n++) {
- for (bd = capabilities[n].mut_lists[g];
- bd != NULL; bd = bd->link) {
- mut_list_size += bd->free - bd->start;
- }
+ mut_list_size += countOccupied(capabilities[n].mut_lists[g]);
}
copied += mut_list_size;
// list always has at least one block; this means we can avoid a
// check for NULL in recordMutable().
if (g != 0) {
- freeChain(generations[g].mut_list);
- generations[g].mut_list = allocBlock();
- for (i = 0; i < n_capabilities; i++) {
+ for (i = 0; i < n_capabilities; i++) {
freeChain(capabilities[i].mut_lists[g]);
capabilities[i].mut_lists[g] = allocBlock();
}
// save the current mutable lists for this generation, and
// allocate a fresh block for each one. We'll traverse these
// mutable lists as roots early on in the GC.
- generations[g].saved_mut_list = generations[g].mut_list;
- generations[g].mut_list = allocBlock();
for (n = 0; n < n_capabilities; n++) {
capabilities[n].saved_mut_lists[g] = capabilities[n].mut_lists[g];
capabilities[n].mut_lists[g] = allocBlock();
#if DEBUG
void
-printMutableList(generation *gen)
+printMutableList(bdescr *bd)
{
- bdescr *bd;
StgPtr p;
- debugBelch("mutable list %p: ", gen->mut_list);
+ debugBelch("mutable list %p: ", bd);
- for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
+ for (; bd != NULL; bd = bd->link) {
for (p = bd->start; p < bd->free; p++) {
debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));
}
#if DEBUG
-void printMutableList (generation *gen);
+void printMutableList (bdescr *bd);
#endif
// Version of recordMutableGen for use during GC. This uses the
nat g, i;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- checkMutableList(generations[g].mut_list, g);
for (i = 0; i < n_capabilities; i++) {
checkMutableList(capabilities[i].mut_lists[g], g);
}
for (i = 0; i < n_capabilities; i++) {
markBlocks(capabilities[i].mut_lists[g]);
}
- markBlocks(generations[g].mut_list);
markBlocks(generations[g].blocks);
markBlocks(generations[g].large_objects);
}
for (i = 0; i < n_capabilities; i++) {
gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
}
- gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
gen_blocks[g] += genBlocks(&generations[g]);
}
gen->large_objects = NULL;
gen->n_large_blocks = 0;
gen->n_new_large_words = 0;
- gen->mut_list = allocBlock();
gen->scavenged_large_objects = NULL;
gen->n_scavenged_large_blocks = 0;
gen->mark = 0;
/* -----------------------------------------------------------------------------
Generational garbage collection support
- recordMutable(StgPtr p) Informs the garbage collector that a
- previously immutable object has
- become (permanently) mutable. Used
- by thawArray and similar.
-
updateWithIndirection(p1,p2) Updates the object at p1 with an
indirection pointing to p2. This is
normally called for objects in an old
#define ASSERT_SM_LOCK()
#endif
-INLINE_HEADER void
-recordMutableGen(StgClosure *p, nat gen_no)
-{
- bdescr *bd;
-
- bd = generations[gen_no].mut_list;
- if (bd->free >= bd->start + BLOCK_SIZE_W) {
- bdescr *new_bd;
- new_bd = allocBlock();
- new_bd->link = bd;
- bd = new_bd;
- generations[gen_no].mut_list = bd;
- }
- *bd->free++ = (StgWord)p;
-
-}
-
-INLINE_HEADER void
-recordMutableGenLock(StgClosure *p, nat gen_no)
-{
- ACQUIRE_SM_LOCK;
- recordMutableGen(p,gen_no);
- RELEASE_SM_LOCK;
-}
-
-INLINE_HEADER void
-recordMutable(StgClosure *p)
-{
- bdescr *bd;
- ASSERT(closure_MUTABLE(p));
- bd = Bdescr((P_)p);
- if (bd->gen_no > 0) recordMutableGen(p, bd->gen_no);
-}
-
-INLINE_HEADER void
-recordMutableLock(StgClosure *p)
-{
- ACQUIRE_SM_LOCK;
- recordMutable(p);
- RELEASE_SM_LOCK;
-}
-
/* -----------------------------------------------------------------------------
The write barrier for MVARs
-------------------------------------------------------------------------- */