X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FGCUtils.c;h=4432ad6ce5181f893b3ff0b2c27d422464b08201;hb=7e09fadd0a587f7555db5b966af594419800b534;hp=a8f0099e9c7cc96c7dec15553e04d103550ef17d;hpb=ba733a0f6b6b5d977e3f41d373a56f570691b1eb;p=ghc-hetmet.git diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c index a8f0099..4432ad6 100644 --- a/rts/sm/GCUtils.c +++ b/rts/sm/GCUtils.c @@ -1,6 +1,6 @@ /* ----------------------------------------------------------------------------- * - * (c) The GHC Team 1998-2006 + * (c) The GHC Team 1998-2008 * * Generational garbage collector: utilities * @@ -15,6 +15,7 @@ #include "RtsFlags.h" #include "Storage.h" #include "GC.h" +#include "GCThread.h" #include "GCUtils.h" #include "Printer.h" #include "Trace.h" @@ -33,6 +34,39 @@ allocBlock_sync(void) return bd; } + +#if 0 +static void +allocBlocks_sync(nat n, bdescr **hd, bdescr **tl, + nat gen_no, step *stp, + StgWord32 flags) +{ + bdescr *bd; + nat i; + ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); + bd = allocGroup(n); + for (i = 0; i < n; i++) { + bd[i].blocks = 1; + bd[i].gen_no = gen_no; + bd[i].step = stp; + bd[i].flags = flags; + bd[i].link = &bd[i+1]; + bd[i].u.scan = bd[i].free = bd[i].start; + } + *hd = bd; + *tl = &bd[n-1]; + RELEASE_SPIN_LOCK(&gc_alloc_block_sync); +} +#endif + +void +freeChain_sync(bdescr *bd) +{ + ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); + freeChain(bd); + RELEASE_SPIN_LOCK(&gc_alloc_block_sync); +} + /* ----------------------------------------------------------------------------- Workspace utilities -------------------------------------------------------------------------- */ @@ -43,7 +77,7 @@ grab_todo_block (step_workspace *ws) bdescr *bd; step *stp; - stp = ws->stp; + stp = ws->step; bd = NULL; if (ws->buffer_todo_bd) @@ -57,6 +91,9 @@ grab_todo_block (step_workspace *ws) ACQUIRE_SPIN_LOCK(&stp->sync_todo); if (stp->todos) { bd = stp->todos; + if (stp->todos == stp->todos_last) { + stp->todos_last = NULL; + } stp->todos = bd->link; stp->n_todos--; bd->link = NULL; @@ -65,85 +102,150 @@ grab_todo_block (step_workspace *ws) return bd; } -static void -push_todo_block (bdescr *bd, step *stp) +void +push_scanned_block (bdescr *bd, step_workspace *ws) { + ASSERT(bd != NULL); ASSERT(bd->link == NULL); - ACQUIRE_SPIN_LOCK(&stp->sync_todo); - bd->link = stp->todos; - stp->todos = bd; - stp->n_todos++; - trace(TRACE_gc, "step %d, n_todos: %d", stp->abs_no, stp->n_todos); - RELEASE_SPIN_LOCK(&stp->sync_todo); + ASSERT(bd->step == ws->step); + ASSERT(bd->u.scan == bd->free); + + if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS) + { + // a partially full block: put it on the part_list list. + bd->link = ws->part_list; + ws->part_list = bd; + ws->n_part_blocks++; + IF_DEBUG(sanity, + ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks)); + } + else + { + // put the scan block on the ws->scavd_list. + bd->link = ws->scavd_list; + ws->scavd_list = bd; + ws->n_scavd_blocks ++; + IF_DEBUG(sanity, + ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks)); + } } -void -push_scan_block (bdescr *bd, step_workspace *ws) +StgPtr +todo_block_full (nat size, step_workspace *ws) { + bdescr *bd; + + bd = ws->todo_bd; + ASSERT(bd != NULL); ASSERT(bd->link == NULL); + ASSERT(bd->step == ws->step); + + // If the global list is not empty, or there's not much work in + // this block to push, and there's enough room in + // this block to evacuate the current object, then just increase + // the limit. + if (ws->step->todos != NULL || + (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) { + if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) { + ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W, + ws->todo_lim + stg_max(WORK_UNIT_WORDS,size)); + debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim); + return ws->todo_free; + } + } + + gct->copied += ws->todo_free - bd->free; + bd->free = ws->todo_free; + + ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free); + + // If this block is not the scan block, we want to push it out and + // make room for a new todo block. + if (bd != gct->scan_bd) + { + // If this block does not have enough space to allocate the + // current object, but it also doesn't have any work to push, then + // push it on to the scanned list. It cannot be empty, because + // then there would be enough room to copy the current object. + if (bd->u.scan == bd->free) + { + ASSERT(bd->free != bd->start); + push_scanned_block(bd, ws); + } + // Otherwise, push this block out to the global list. + else + { + step *stp; + stp = ws->step; + trace(TRACE_gc|DEBUG_gc, "push todo block %p (%ld words), step %d, n_todos: %d", + bd->start, (unsigned long)(bd->free - bd->u.scan), + stp->abs_no, stp->n_todos); + // ToDo: use buffer_todo + ACQUIRE_SPIN_LOCK(&stp->sync_todo); + if (stp->todos_last == NULL) { + stp->todos_last = bd; + stp->todos = bd; + } else { + stp->todos_last->link = bd; + stp->todos_last = bd; + } + stp->n_todos++; + RELEASE_SPIN_LOCK(&stp->sync_todo); + } + } - // update stats: this is a block that has been copied & scavenged - copied += bd->free - bd->start; + ws->todo_bd = NULL; + ws->todo_free = NULL; + ws->todo_lim = NULL; - // put the scan block on the ws->scavd_list. - bd->link = ws->scavd_list; - ws->scavd_list = bd; - ws->n_scavd_blocks ++; + alloc_todo_block(ws, size); - IF_DEBUG(sanity, - ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks)); + return ws->todo_free; } StgPtr -gc_alloc_todo_block (step_workspace *ws) +alloc_todo_block (step_workspace *ws, nat size) { - bdescr *bd; + bdescr *bd/*, *hd, *tl*/; - if (ws->todo_bd != NULL) { - ws->todo_bd->free = ws->todo_free; + // Grab a part block if we have one, and it has enough room + if (ws->part_list != NULL && + ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size) + { + bd = ws->part_list; + ws->part_list = bd->link; + ws->n_part_blocks--; } + else + { + // blocks in to-space get the BF_EVACUATED flag. - // If we already have a todo block, it must be full, so we push it - // out: first to the buffer_todo_bd, then to the step. BUT, don't - // push out the block out if it is already the scan block. - if (ws->todo_bd != NULL && ws->scan_bd != ws->todo_bd) { - ASSERT(ws->todo_bd->link == NULL); - if (ws->buffer_todo_bd == NULL) { - // If the global todo list is empty, push this block - // out immediately rather than caching it in - // buffer_todo_bd, because there might be other threads - // waiting for work. - if (ws->stp->todos == NULL) { - push_todo_block(ws->todo_bd, ws->stp); - } else { - ws->buffer_todo_bd = ws->todo_bd; - } - } else { - ASSERT(ws->buffer_todo_bd->link == NULL); - push_todo_block(ws->buffer_todo_bd, ws->stp); - ws->buffer_todo_bd = ws->todo_bd; - } - ws->todo_bd = NULL; - } +// allocBlocks_sync(4, &hd, &tl, +// ws->step->gen_no, ws->step, BF_EVACUATED); +// +// tl->link = ws->part_list; +// ws->part_list = hd->link; +// ws->n_part_blocks += 3; +// +// bd = hd; - bd = allocBlock_sync(); + bd = allocBlock_sync(); + bd->step = ws->step; + bd->gen_no = ws->step->gen_no; + bd->flags = BF_EVACUATED; + bd->u.scan = bd->free = bd->start; + } - bd->gen_no = ws->stp->gen_no; - bd->step = ws->stp; bd->link = NULL; - // blocks in to-space in generations up to and including N - // get the BF_EVACUATED flag. - if (ws->stp->gen_no <= N) { - bd->flags = BF_EVACUATED; - } else { - bd->flags = 0; - } - ws->todo_bd = bd; - ws->todo_free = bd->start; - ws->todo_lim = bd->start + BLOCK_SIZE_W; + ws->todo_free = bd->free; + ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W, + bd->free + stg_max(WORK_UNIT_WORDS,size)); + + debugTrace(DEBUG_gc, "alloc new todo block %p for step %d", + bd->free, ws->step->abs_no); return ws->todo_free; }