1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
17 #include "BlockAlloc.h"
30 SpinLock gc_alloc_block_sync;
37 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
39 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
44 allocGroup_sync(nat n)
47 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
49 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
56 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
57 nat gen_no, step *stp,
62 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
64 for (i = 0; i < n; i++) {
66 bd[i].gen_no = gen_no;
69 bd[i].link = &bd[i+1];
70 bd[i].u.scan = bd[i].free = bd[i].start;
74 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
79 freeChain_sync(bdescr *bd)
81 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
83 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
86 /* -----------------------------------------------------------------------------
88 -------------------------------------------------------------------------- */
91 grab_local_todo_block (gen_workspace *ws)
98 bd = ws->todo_overflow;
101 ws->todo_overflow = bd->link;
103 ws->n_todo_overflow--;
107 bd = popWSDeque(ws->todo_q);
110 ASSERT(bd->link == NULL);
117 #if defined(THREADED_RTS)
119 steal_todo_block (nat g)
124 // look for work to steal
125 for (n = 0; n < n_gc_threads; n++) {
126 if (n == gct->thread_index) continue;
127 bd = stealWSDeque(gc_threads[n]->gens[g].todo_q);
137 push_scanned_block (bdescr *bd, gen_workspace *ws)
140 ASSERT(bd->link == NULL);
141 ASSERT(bd->gen == ws->gen);
142 ASSERT(bd->u.scan == bd->free);
144 if (bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
146 // a partially full block: put it on the part_list list.
147 bd->link = ws->part_list;
149 ws->n_part_blocks += bd->blocks;
151 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
155 // put the scan block on the ws->scavd_list.
156 bd->link = ws->scavd_list;
158 ws->n_scavd_blocks += bd->blocks;
160 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
165 todo_block_full (nat size, gen_workspace *ws)
170 // todo_free has been pre-incremented by Evac.c:alloc_for_copy(). We
171 // are expected to leave it bumped when we've finished here.
172 ws->todo_free -= size;
177 ASSERT(bd->link == NULL);
178 ASSERT(bd->gen == ws->gen);
180 // If the global list is not empty, or there's not much work in
181 // this block to push, and there's enough room in
182 // this block to evacuate the current object, then just increase
184 if (!looksEmptyWSDeque(ws->todo_q) ||
185 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
186 if (ws->todo_free + size < bd->start + bd->blocks * BLOCK_SIZE_W) {
187 ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
188 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
189 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
191 ws->todo_free += size;
196 gct->copied += ws->todo_free - bd->free;
197 bd->free = ws->todo_free;
199 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
201 // If this block is not the scan block, we want to push it out and
202 // make room for a new todo block.
203 if (bd != gct->scan_bd)
205 // If this block does not have enough space to allocate the
206 // current object, but it also doesn't have any work to push, then
207 // push it on to the scanned list. It cannot be empty, because
208 // then there would be enough room to copy the current object.
209 if (bd->u.scan == bd->free)
211 ASSERT(bd->free != bd->start);
212 push_scanned_block(bd, ws);
214 // Otherwise, push this block out to the global list.
219 debugTrace(DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld",
220 bd->start, (unsigned long)(bd->free - bd->u.scan),
221 gen->no, dequeElements(ws->todo_q));
223 if (!pushWSDeque(ws->todo_q, bd)) {
224 bd->link = ws->todo_overflow;
225 ws->todo_overflow = bd;
226 ws->n_todo_overflow++;
232 ws->todo_free = NULL;
235 alloc_todo_block(ws, size);
238 ws->todo_free += size;
243 alloc_todo_block (gen_workspace *ws, nat size)
245 bdescr *bd/*, *hd, *tl */;
247 // Grab a part block if we have one, and it has enough room
250 bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > (int)size)
252 ws->part_list = bd->link;
253 ws->n_part_blocks -= bd->blocks;
257 // blocks in to-space get the BF_EVACUATED flag.
259 // allocBlocks_sync(16, &hd, &tl,
260 // ws->step->gen_no, ws->step, BF_EVACUATED);
262 // tl->link = ws->part_list;
263 // ws->part_list = hd->link;
264 // ws->n_part_blocks += 15;
268 if (size > BLOCK_SIZE_W) {
269 bd = allocGroup_sync((lnat)BLOCK_ROUND_UP(size*sizeof(W_))
272 bd = allocBlock_sync();
274 initBdescr(bd, ws->gen, ws->gen->to);
275 bd->flags = BF_EVACUATED;
276 bd->u.scan = bd->free = bd->start;
282 ws->todo_free = bd->free;
283 ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
284 bd->free + stg_max(WORK_UNIT_WORDS,size));
286 debugTrace(DEBUG_gc, "alloc new todo block %p for gen %d",
287 bd->free, ws->gen->no);
289 return ws->todo_free;
292 /* -----------------------------------------------------------------------------
294 * -------------------------------------------------------------------------- */
298 printMutableList(bdescr *bd)
302 debugBelch("mutable list %p: ", bd);
304 for (; bd != NULL; bd = bd->link) {
305 for (p = bd->start; p < bd->free; p++) {
306 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));