summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
0f38eff)
It is possible for the program to allocate single object larger than a
block, without going through the normal large-object mechanisms that
we have for arrays and threads and so on.
The GC was assuming that no object was larger than a block, but #3424
contains a program that breaks the assumption. This patch removes the
assumption. The objects in question will still be copied, that is
they don't get the normal large-object treatment, but this case is
unlikely to occur often in practice.
In the future we may improve things by generating code to allocate
them as large objects in the first place.
+static bdescr *
+allocGroup_sync(nat n)
+{
+ bdescr *bd;
+ ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ bd = allocGroup(n);
+ RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ return bd;
+}
+
ASSERT(bd->step == ws->step);
ASSERT(bd->u.scan == bd->free);
ASSERT(bd->step == ws->step);
ASSERT(bd->u.scan == bd->free);
- if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
+ if (bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
{
// a partially full block: put it on the part_list list.
bd->link = ws->part_list;
ws->part_list = bd;
{
// a partially full block: put it on the part_list list.
bd->link = ws->part_list;
ws->part_list = bd;
+ ws->n_part_blocks += bd->blocks;
IF_DEBUG(sanity,
ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
}
IF_DEBUG(sanity,
ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
}
// put the scan block on the ws->scavd_list.
bd->link = ws->scavd_list;
ws->scavd_list = bd;
// put the scan block on the ws->scavd_list.
bd->link = ws->scavd_list;
ws->scavd_list = bd;
+ ws->n_scavd_blocks += bd->blocks;
IF_DEBUG(sanity,
ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
}
IF_DEBUG(sanity,
ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
}
// the limit.
if (!looksEmptyWSDeque(ws->todo_q) ||
(ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
// the limit.
if (!looksEmptyWSDeque(ws->todo_q) ||
(ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
- if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
- ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
+ if (ws->todo_free + size < bd->start + bd->blocks * BLOCK_SIZE_W) {
+ ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
p = ws->todo_free;
ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
p = ws->todo_free;
bdescr *bd/*, *hd, *tl */;
// Grab a part block if we have one, and it has enough room
bdescr *bd/*, *hd, *tl */;
// Grab a part block if we have one, and it has enough room
- if (ws->part_list != NULL &&
- ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size)
+ bd = ws->part_list;
+ if (bd != NULL &&
+ bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > (int)size)
ws->part_list = bd->link;
ws->part_list = bd->link;
+ ws->n_part_blocks -= bd->blocks;
- bd = allocBlock_sync();
+ if (size > BLOCK_SIZE_W) {
+ bd = allocGroup_sync((lnat)BLOCK_ROUND_UP(size*sizeof(W_))
+ / BLOCK_SIZE);
+ } else {
+ bd = allocBlock_sync();
+ }
bd->step = ws->step;
bd->gen_no = ws->step->gen_no;
bd->flags = BF_EVACUATED;
bd->step = ws->step;
bd->gen_no = ws->step->gen_no;
bd->flags = BF_EVACUATED;
ws->todo_bd = bd;
ws->todo_free = bd->free;
ws->todo_bd = bd;
ws->todo_free = bd->free;
- ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
+ ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
bd->free + stg_max(WORK_UNIT_WORDS,size));
debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
bd->free + stg_max(WORK_UNIT_WORDS,size));
debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",