/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team 1998-2006
+ * (c) The GHC Team 1998-2008
*
* Generational garbage collector: utilities
*
#include "RtsFlags.h"
#include "Storage.h"
#include "GC.h"
+#include "GCThread.h"
#include "GCUtils.h"
#include "Printer.h"
#include "Trace.h"
+#ifdef THREADED_RTS
+#include "WSDeque.h"
+#endif
#ifdef THREADED_RTS
SpinLock gc_alloc_block_sync;
return bd;
}
+
+#if 0
+static void
+allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
+ nat gen_no, step *stp,
+ StgWord32 flags)
+{
+ bdescr *bd;
+ nat i;
+ ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ bd = allocGroup(n);
+ for (i = 0; i < n; i++) {
+ bd[i].blocks = 1;
+ bd[i].gen_no = gen_no;
+ bd[i].step = stp;
+ bd[i].flags = flags;
+ bd[i].link = &bd[i+1];
+ bd[i].u.scan = bd[i].free = bd[i].start;
+ }
+ *hd = bd;
+ *tl = &bd[n-1];
+ RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+}
+#endif
+
void
freeChain_sync(bdescr *bd)
{
-------------------------------------------------------------------------- */
bdescr *
-grab_todo_block (step_workspace *ws)
+grab_local_todo_block (step_workspace *ws)
{
bdescr *bd;
step *stp;
stp = ws->step;
- bd = NULL;
- if (ws->buffer_todo_bd)
+ bd = ws->todo_overflow;
+ if (bd != NULL)
+ {
+ ws->todo_overflow = bd->link;
+ bd->link = NULL;
+ ws->n_todo_overflow--;
+ return bd;
+ }
+
+ bd = popWSDeque(ws->todo_q);
+ if (bd != NULL)
{
- bd = ws->buffer_todo_bd;
ASSERT(bd->link == NULL);
- ws->buffer_todo_bd = NULL;
return bd;
}
- ACQUIRE_SPIN_LOCK(&stp->sync_todo);
- if (stp->todos) {
- bd = stp->todos;
- if (stp->todos == stp->todos_last) {
- stp->todos_last = NULL;
+ return NULL;
+}
+
+bdescr *
+steal_todo_block (nat s)
+{
+ nat n;
+ bdescr *bd;
+
+ // look for work to steal
+ for (n = 0; n < n_gc_threads; n++) {
+ if (n == gct->thread_index) continue;
+ bd = stealWSDeque(gc_threads[n]->steps[s].todo_q);
+ if (bd) {
+ return bd;
}
- stp->todos = bd->link;
- stp->n_todos--;
- bd->link = NULL;
- }
- RELEASE_SPIN_LOCK(&stp->sync_todo);
- return bd;
+ }
+ return NULL;
}
void
StgPtr
todo_block_full (nat size, step_workspace *ws)
{
+ StgPtr p;
bdescr *bd;
+ // todo_free has been pre-incremented by Evac.c:alloc_for_copy(). We
+ // are expected to leave it bumped when we've finished here.
+ ws->todo_free -= size;
+
bd = ws->todo_bd;
ASSERT(bd != NULL);
ASSERT(bd->link == NULL);
ASSERT(bd->step == ws->step);
- bd->free = ws->todo_free;
-
// If the global list is not empty, or there's not much work in
// this block to push, and there's enough room in
// this block to evacuate the current object, then just increase
// the limit.
- if (ws->step->todos != NULL ||
- (bd->free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
- if (bd->free + size < bd->start + BLOCK_SIZE_W) {
- debugTrace(DEBUG_gc, "increasing limit for %p", bd->start);
+ if (!looksEmptyWSDeque(ws->todo_q) ||
+ (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
+ if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
- return ws->todo_free;
+ debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
+ p = ws->todo_free;
+ ws->todo_free += size;
+ return p;
}
}
+ gct->copied += ws->todo_free - bd->free;
+ bd->free = ws->todo_free;
+
ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
// If this block is not the scan block, we want to push it out and
// make room for a new todo block.
- if (bd != ws->scan_bd)
+ if (bd != gct->scan_bd)
{
// If this block does not have enough space to allocate the
// current object, but it also doesn't have any work to push, then
{
step *stp;
stp = ws->step;
- trace(TRACE_gc|DEBUG_gc, "push todo block %p (%d words), step %d, n_todos: %d",
- bd->start, bd->free - bd->u.scan, stp->abs_no, stp->n_todos);
- // ToDo: use buffer_todo
- ACQUIRE_SPIN_LOCK(&stp->sync_todo);
- if (stp->todos_last == NULL) {
- stp->todos_last = bd;
- stp->todos = bd;
- } else {
- stp->todos_last->link = bd;
- stp->todos_last = bd;
+ debugTrace(DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld",
+ bd->start, (unsigned long)(bd->free - bd->u.scan),
+ stp->abs_no, dequeElements(ws->todo_q));
+
+ if (!pushWSDeque(ws->todo_q, bd)) {
+ bd->link = ws->todo_overflow;
+ ws->todo_overflow = bd;
+ ws->n_todo_overflow++;
}
- stp->n_todos++;
- RELEASE_SPIN_LOCK(&stp->sync_todo);
}
}
alloc_todo_block(ws, size);
- return ws->todo_free;
+ p = ws->todo_free;
+ ws->todo_free += size;
+ return p;
}
StgPtr
alloc_todo_block (step_workspace *ws, nat size)
{
- bdescr *bd;
+ bdescr *bd/*, *hd, *tl */;
// Grab a part block if we have one, and it has enough room
if (ws->part_list != NULL &&
}
else
{
+ // blocks in to-space get the BF_EVACUATED flag.
+
+// allocBlocks_sync(16, &hd, &tl,
+// ws->step->gen_no, ws->step, BF_EVACUATED);
+//
+// tl->link = ws->part_list;
+// ws->part_list = hd->link;
+// ws->n_part_blocks += 15;
+//
+// bd = hd;
+
bd = allocBlock_sync();
- bd->gen_no = ws->step->gen_no;
bd->step = ws->step;
- bd->u.scan = bd->start;
-
- // blocks in to-space in generations up to and including N
- // get the BF_EVACUATED flag.
- if (ws->step->gen_no <= N) {
- bd->flags = BF_EVACUATED;
- } else {
- bd->flags = 0;
- }
+ bd->gen_no = ws->step->gen_no;
+ bd->flags = BF_EVACUATED;
+ bd->u.scan = bd->free = bd->start;
}
bd->link = NULL;
bd->free + stg_max(WORK_UNIT_WORDS,size));
debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
- bd->start, ws->step->abs_no);
+ bd->free, ws->step->abs_no);
return ws->todo_free;
}