+static void
+init_collected_gen (nat g, nat n_threads)
+{
+ nat s, t, i;
+ step_workspace *ws;
+ step *stp;
+ bdescr *bd;
+
+ // Throw away the current mutable list. Invariant: the mutable
+ // list always has at least one block; this means we can avoid a
+ // check for NULL in recordMutable().
+ if (g != 0) {
+ freeChain(generations[g].mut_list);
+ generations[g].mut_list = allocBlock();
+ for (i = 0; i < n_capabilities; i++) {
+ freeChain(capabilities[i].mut_lists[g]);
+ capabilities[i].mut_lists[g] = allocBlock();
+ }
+ }
+
+ for (s = 0; s < generations[g].n_steps; s++) {
+
+ stp = &generations[g].steps[s];
+ ASSERT(stp->gen_no == g);
+
+ // we'll construct a new list of threads in this step
+ // during GC, throw away the current list.
+ stp->old_threads = stp->threads;
+ stp->threads = END_TSO_QUEUE;
+
+ // generation 0, step 0 doesn't need to-space
+ if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+
+ // deprecate the existing blocks
+ stp->old_blocks = stp->blocks;
+ stp->n_old_blocks = stp->n_blocks;
+ stp->blocks = NULL;
+ stp->n_blocks = 0;
+ stp->n_words = 0;
+
+ // we don't have any to-be-scavenged blocks yet
+ stp->todos = NULL;
+ stp->todos_last = NULL;
+ stp->n_todos = 0;
+
+ // initialise the large object queues.
+ stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
+
+ // mark the small objects as from-space
+ for (bd = stp->old_blocks; bd; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED;
+ }
+
+ // mark the large objects as from-space
+ for (bd = stp->large_objects; bd; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED;
+ }
+
+ // for a compacted step, we need to allocate the bitmap
+ if (stp->is_compacted) {
+ nat bitmap_size; // in bytes
+ bdescr *bitmap_bdescr;
+ StgWord *bitmap;
+
+ bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
+
+ if (bitmap_size > 0) {
+ bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
+ / BLOCK_SIZE);
+ stp->bitmap = bitmap_bdescr;
+ bitmap = bitmap_bdescr->start;
+
+ debugTrace(DEBUG_gc, "bitmap_size: %d, bitmap: %p",
+ bitmap_size, bitmap);
+
+ // don't forget to fill it with zeros!
+ memset(bitmap, 0, bitmap_size);
+
+ // For each block in this step, point to its bitmap from the
+ // block descriptor.
+ for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
+ bd->u.bitmap = bitmap;
+ bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
+
+ // Also at this point we set the BF_COMPACTED flag
+ // for this block. The invariant is that
+ // BF_COMPACTED is always unset, except during GC
+ // when it is set on those blocks which will be
+ // compacted.
+ bd->flags |= BF_COMPACTED;
+ }
+ }
+ }
+ }
+
+ // For each GC thread, for each step, allocate a "todo" block to
+ // store evacuated objects to be scavenged, and a block to store
+ // evacuated objects that do not need to be scavenged.
+ for (t = 0; t < n_threads; t++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+
+ // we don't copy objects into g0s0, unless -G0
+ if (g==0 && s==0 && RtsFlags.GcFlags.generations > 1) continue;
+
+ ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
+
+ ws->todo_large_objects = NULL;
+
+ ws->part_list = NULL;
+ ws->n_part_blocks = 0;
+
+ // allocate the first to-space block; extra blocks will be
+ // chained on as necessary.
+ ws->todo_bd = NULL;
+ ws->buffer_todo_bd = NULL;
+ alloc_todo_block(ws,0);
+
+ ws->scavd_list = NULL;
+ ws->n_scavd_blocks = 0;
+ }