X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FGC.c;h=1d6469947c1fa8b504a4004ae6ab179d356ecad5;hb=e7987f16175f88daa11f06f25d10161a95f84bc4;hp=978b7a0a20b76bd642a5f6651dbbcd979e35f4d5;hpb=49780c2e25cfbe821d585c5a31cb95aa49f41f14;p=ghc-hetmet.git diff --git a/rts/sm/GC.c b/rts/sm/GC.c index 978b7a0..1d64699 100644 --- a/rts/sm/GC.c +++ b/rts/sm/GC.c @@ -408,9 +408,7 @@ GarbageCollect ( rtsBool force_major_gc ) } } - // For each workspace, in each thread: - // * clear the BF_EVACUATED flag from each copied block - // * move the copied blocks to the step + // For each workspace, in each thread, move the copied blocks to the step { gc_thread *thr; step_workspace *ws; @@ -438,7 +436,6 @@ GarbageCollect ( rtsBool force_major_gc ) prev = NULL; for (bd = ws->scavd_list; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space ws->step->n_words += bd->free - bd->start; prev = bd; } @@ -447,6 +444,23 @@ GarbageCollect ( rtsBool force_major_gc ) ws->step->blocks = ws->scavd_list; } ws->step->n_blocks += ws->n_scavd_blocks; + } + } + + // Add all the partial blocks *after* we've added all the full + // blocks. This is so that we can grab the partial blocks back + // again and try to fill them up in the next GC. + for (t = 0; t < n_gc_threads; t++) { + thr = gc_threads[t]; + + // not step 0 + if (RtsFlags.GcFlags.generations == 1) { + s = 0; + } else { + s = 1; + } + for (; s < total_steps; s++) { + ws = &thr->steps[s]; prev = NULL; for (bd = ws->part_list; bd != NULL; bd = next) { @@ -460,7 +474,6 @@ GarbageCollect ( rtsBool force_major_gc ) freeGroup(bd); ws->n_part_blocks--; } else { - bd->flags &= ~BF_EVACUATED; // now from-space ws->step->n_words += bd->free - bd->start; prev = bd; } @@ -543,7 +556,6 @@ GarbageCollect ( rtsBool force_major_gc ) // for a compacted step, just shift the new to-space // onto the front of the now-compacted existing blocks. for (bd = stp->blocks; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space stp->n_words += bd->free - bd->start; } // tack the new blocks on the end of the existing blocks @@ -585,10 +597,6 @@ GarbageCollect ( rtsBool force_major_gc ) bd = next; } - // update the count of blocks used by large objects - for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; - } stp->large_objects = stp->scavenged_large_objects; stp->n_large_blocks = stp->n_scavenged_large_blocks; @@ -601,7 +609,6 @@ GarbageCollect ( rtsBool force_major_gc ) */ for (bd = stp->scavenged_large_objects; bd; bd = next) { next = bd->link; - bd->flags &= ~BF_EVACUATED; dbl_link_onto(bd, &stp->large_objects); } @@ -685,6 +692,7 @@ GarbageCollect ( rtsBool force_major_gc ) // send exceptions to any threads which were about to die RELEASE_SM_LOCK; resurrectThreads(resurrected_threads); + performPendingThrowTos(exception_threads); ACQUIRE_SM_LOCK; // Update the stable pointer hash table. @@ -1071,14 +1079,19 @@ init_collected_gen (nat g, nat n_threads) for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + ASSERT(stp->gen_no == g); + + // we'll construct a new list of threads in this step + // during GC, throw away the current list. + stp->old_threads = stp->threads; + stp->threads = END_TSO_QUEUE; + // generation 0, step 0 doesn't need to-space if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { continue; } - stp = &generations[g].steps[s]; - ASSERT(stp->gen_no == g); - // deprecate the existing blocks stp->old_blocks = stp->blocks; stp->n_old_blocks = stp->n_blocks; @@ -1095,7 +1108,12 @@ init_collected_gen (nat g, nat n_threads) stp->scavenged_large_objects = NULL; stp->n_scavenged_large_blocks = 0; - // mark the large objects as not evacuated yet + // mark the small objects as from-space + for (bd = stp->old_blocks; bd; bd = bd->link) { + bd->flags &= ~BF_EVACUATED; + } + + // mark the large objects as from-space for (bd = stp->large_objects; bd; bd = bd->link) { bd->flags &= ~BF_EVACUATED; } @@ -1184,11 +1202,12 @@ init_uncollected_gen (nat g, nat threads) stp->n_scavenged_large_blocks = 0; } - for (t = 0; t < threads; t++) { - for (s = 0; s < generations[g].n_steps; s++) { + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + + for (t = 0; t < threads; t++) { ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s]; - stp = ws->step; ws->buffer_todo_bd = NULL; ws->todo_large_objects = NULL; @@ -1219,8 +1238,26 @@ init_uncollected_gen (nat g, nat threads) alloc_todo_block(ws,0); } } + + // deal out any more partial blocks to the threads' part_lists + t = 0; + while (stp->blocks && isPartiallyFull(stp->blocks)) + { + bd = stp->blocks; + stp->blocks = bd->link; + ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s]; + bd->link = ws->part_list; + ws->part_list = bd; + ws->n_part_blocks += 1; + bd->u.scan = bd->free; + stp->n_blocks -= 1; + stp->n_words -= bd->free - bd->start; + t++; + if (t == n_gc_threads) t = 0; + } } + // Move the private mutable lists from each capability onto the // main mutable list for the generation. for (i = 0; i < n_capabilities; i++) {