+ // for a compacted generation, we need to allocate the bitmap
+ if (gen->mark) {
+ nat bitmap_size; // in bytes
+ bdescr *bitmap_bdescr;
+ StgWord *bitmap;
+
+ bitmap_size = gen->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
+
+ if (bitmap_size > 0) {
+ bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
+ / BLOCK_SIZE);
+ gen->bitmap = bitmap_bdescr;
+ bitmap = bitmap_bdescr->start;
+
+ debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
+ bitmap_size, bitmap);
+
+ // don't forget to fill it with zeros!
+ memset(bitmap, 0, bitmap_size);
+
+ // For each block in this step, point to its bitmap from the
+ // block descriptor.
+ for (bd=gen->old_blocks; bd != NULL; bd = bd->link) {
+ bd->u.bitmap = bitmap;
+ bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
+
+ // Also at this point we set the BF_MARKED flag
+ // for this block. The invariant is that
+ // BF_MARKED is always unset, except during GC
+ // when it is set on those blocks which will be
+ // compacted.
+ if (!(bd->flags & BF_FRAGMENTED)) {
+ bd->flags |= BF_MARKED;
+ }
+
+ // BF_SWEPT should be marked only for blocks that are being
+ // collected in sweep()
+ bd->flags &= ~BF_SWEPT;
+ }
+ }
+ }
+}
+
+
+/* ----------------------------------------------------------------------------
+ Save the mutable lists in saved_mut_lists
+ ------------------------------------------------------------------------- */
+
+static void
+stash_mut_list (Capability *cap, nat gen_no)
+{
+ cap->saved_mut_lists[gen_no] = cap->mut_lists[gen_no];
+ cap->mut_lists[gen_no] = allocBlock_sync();
+}
+
+/* ----------------------------------------------------------------------------
+ Initialise a generation that is *not* to be collected
+ ------------------------------------------------------------------------- */
+
+static void
+prepare_uncollected_gen (generation *gen)
+{
+ nat i;
+
+
+ ASSERT(gen->no > 0);
+
+ // save the current mutable lists for this generation, and
+ // allocate a fresh block for each one. We'll traverse these
+ // mutable lists as roots early on in the GC.
+ for (i = 0; i < n_capabilities; i++) {
+ stash_mut_list(&capabilities[i], gen->no);
+ }
+
+ ASSERT(gen->scavenged_large_objects == NULL);
+ ASSERT(gen->n_scavenged_large_blocks == 0);
+}
+
+/* -----------------------------------------------------------------------------
+ Collect the completed blocks from a GC thread and attach them to
+ the generation.
+ -------------------------------------------------------------------------- */
+
+static void
+collect_gct_blocks (void)
+{
+ nat g;
+ gen_workspace *ws;
+ bdescr *bd, *prev;
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ ws = &gct->gens[g];
+
+ // there may still be a block attached to ws->todo_bd;
+ // leave it there to use next time.
+
+ if (ws->scavd_list != NULL) {
+ ACQUIRE_SPIN_LOCK(&ws->gen->sync);
+
+ ASSERT(gct->scan_bd == NULL);
+ ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks);
+
+ prev = NULL;
+ for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
+ ws->gen->n_words += bd->free - bd->start;
+ prev = bd;
+ }
+ if (prev != NULL) {
+ prev->link = ws->gen->blocks;
+ ws->gen->blocks = ws->scavd_list;
+ }
+ ws->gen->n_blocks += ws->n_scavd_blocks;
+
+ ws->scavd_list = NULL;
+ ws->n_scavd_blocks = 0;
+
+ RELEASE_SPIN_LOCK(&ws->gen->sync);
+ }