#include "Trace.h"
#include "RetainerProfile.h"
#include "RaiseAsync.h"
-#include "Sparks.h"
#include "Papi.h"
#include "GC.h"
bdescr *bd;
step *stp;
lnat live, allocated, max_copied, avg_copied, slop;
- lnat oldgen_saved_blocks = 0;
gc_thread *saved_gct;
nat g, s, t, n;
// Update pointers from the Task list
update_task_list();
+ // Update pointers from capabilities (probably just the spark queues)
+ updateCapabilitiesPostGC();
+
// Now see which stable names are still alive.
gcStablePtrTable();
#endif
// NO MORE EVACUATION AFTER THIS POINT!
- // Finally: compaction of the oldest generation.
- if (major_gc && oldest_gen->steps[0].is_compacted) {
- // save number of blocks for stats
- oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
- compact(gct->scavenged_static_objects);
- }
-
- IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
// Two-space collector: free the old to-space.
// g0s0->old_blocks is the old nursery
}
}
- // For each workspace, in each thread:
- // * clear the BF_EVACUATED flag from each copied block
- // * move the copied blocks to the step
+ // For each workspace, in each thread, move the copied blocks to the step
{
gc_thread *thr;
step_workspace *ws;
prev = NULL;
for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED; // now from-space
ws->step->n_words += bd->free - bd->start;
prev = bd;
}
ws->step->blocks = ws->scavd_list;
}
ws->step->n_blocks += ws->n_scavd_blocks;
+ }
+ }
+
+ // Add all the partial blocks *after* we've added all the full
+ // blocks. This is so that we can grab the partial blocks back
+ // again and try to fill them up in the next GC.
+ for (t = 0; t < n_gc_threads; t++) {
+ thr = gc_threads[t];
+
+ // not step 0
+ if (RtsFlags.GcFlags.generations == 1) {
+ s = 0;
+ } else {
+ s = 1;
+ }
+ for (; s < total_steps; s++) {
+ ws = &thr->steps[s];
prev = NULL;
for (bd = ws->part_list; bd != NULL; bd = next) {
freeGroup(bd);
ws->n_part_blocks--;
} else {
- bd->flags &= ~BF_EVACUATED; // now from-space
ws->step->n_words += bd->free - bd->start;
prev = bd;
}
}
}
+ // Finally: compaction of the oldest generation.
+ if (major_gc && oldest_gen->steps[0].is_compacted) {
+ compact(gct->scavenged_static_objects);
+ }
+
+ IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
+
/* run through all the generations/steps and tidy up
*/
copied = 0;
if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
if (stp->is_compacted)
{
- // for a compacted step, just shift the new to-space
- // onto the front of the now-compacted existing blocks.
- for (bd = stp->blocks; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED; // now from-space
- stp->n_words += bd->free - bd->start;
- }
// tack the new blocks on the end of the existing blocks
if (stp->old_blocks != NULL) {
for (bd = stp->old_blocks; bd != NULL; bd = next) {
+ stp->n_words += bd->free - bd->start;
+
// NB. this step might not be compacted next
// time, so reset the BF_COMPACTED flags.
// They are set before GC if we're going to
// compact. (search for BF_COMPACTED above).
bd->flags &= ~BF_COMPACTED;
+
+ // between GCs, all blocks in the heap except
+ // for the nursery have the BF_EVACUATED flag set.
+ bd->flags |= BF_EVACUATED;
+
next = bd->link;
if (next == NULL) {
bd->link = stp->blocks;
bd = next;
}
- // update the count of blocks used by large objects
- for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
- bd->flags &= ~BF_EVACUATED;
- }
stp->large_objects = stp->scavenged_large_objects;
stp->n_large_blocks = stp->n_scavenged_large_blocks;
*/
for (bd = stp->scavenged_large_objects; bd; bd = next) {
next = bd->link;
- bd->flags &= ~BF_EVACUATED;
dbl_link_onto(bd, &stp->large_objects);
}
// send exceptions to any threads which were about to die
RELEASE_SM_LOCK;
resurrectThreads(resurrected_threads);
+ performPendingThrowTos(exception_threads);
ACQUIRE_SM_LOCK;
// Update the stable pointer hash table.
gct->thread_index, r);
while (gc_running_threads != 0) {
- usleep(1);
+ // usleep(1);
if (any_work()) {
inc_running();
goto loop;
for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+ ASSERT(stp->gen_no == g);
+
+ // we'll construct a new list of threads in this step
+ // during GC, throw away the current list.
+ stp->old_threads = stp->threads;
+ stp->threads = END_TSO_QUEUE;
+
// generation 0, step 0 doesn't need to-space
if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
continue;
}
- stp = &generations[g].steps[s];
- ASSERT(stp->gen_no == g);
-
// deprecate the existing blocks
stp->old_blocks = stp->blocks;
stp->n_old_blocks = stp->n_blocks;
stp->scavenged_large_objects = NULL;
stp->n_scavenged_large_blocks = 0;
- // mark the large objects as not evacuated yet
+ // mark the small objects as from-space
+ for (bd = stp->old_blocks; bd; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED;
+ }
+
+ // mark the large objects as from-space
for (bd = stp->large_objects; bd; bd = bd->link) {
bd->flags &= ~BF_EVACUATED;
}
stp->n_scavenged_large_blocks = 0;
}
- for (t = 0; t < threads; t++) {
- for (s = 0; s < generations[g].n_steps; s++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+
+ for (t = 0; t < threads; t++) {
ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
- stp = ws->step;
ws->buffer_todo_bd = NULL;
ws->todo_large_objects = NULL;
alloc_todo_block(ws,0);
}
}
+
+ // deal out any more partial blocks to the threads' part_lists
+ t = 0;
+ while (stp->blocks && isPartiallyFull(stp->blocks))
+ {
+ bd = stp->blocks;
+ stp->blocks = bd->link;
+ ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
+ bd->link = ws->part_list;
+ ws->part_list = bd;
+ ws->n_part_blocks += 1;
+ bd->u.scan = bd->free;
+ stp->n_blocks -= 1;
+ stp->n_words -= bd->free - bd->start;
+ t++;
+ if (t == n_gc_threads) t = 0;
+ }
}
+
// Move the private mutable lists from each capability onto the
// main mutable list for the generation.
for (i = 0; i < n_capabilities; i++) {