1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
28 SpinLock gc_alloc_block_sync;
35 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
37 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
42 allocGroup_sync(nat n)
45 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
47 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
54 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
55 nat gen_no, step *stp,
60 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
62 for (i = 0; i < n; i++) {
64 bd[i].gen_no = gen_no;
67 bd[i].link = &bd[i+1];
68 bd[i].u.scan = bd[i].free = bd[i].start;
72 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
77 freeChain_sync(bdescr *bd)
79 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
81 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
84 /* -----------------------------------------------------------------------------
86 -------------------------------------------------------------------------- */
89 grab_local_todo_block (step_workspace *ws)
96 bd = ws->todo_overflow;
99 ws->todo_overflow = bd->link;
101 ws->n_todo_overflow--;
105 bd = popWSDeque(ws->todo_q);
108 ASSERT(bd->link == NULL);
115 #if defined(THREADED_RTS)
117 steal_todo_block (nat s)
122 // look for work to steal
123 for (n = 0; n < n_gc_threads; n++) {
124 if (n == gct->thread_index) continue;
125 bd = stealWSDeque(gc_threads[n]->steps[s].todo_q);
135 push_scanned_block (bdescr *bd, step_workspace *ws)
138 ASSERT(bd->link == NULL);
139 ASSERT(bd->step == ws->step);
140 ASSERT(bd->u.scan == bd->free);
142 if (bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
144 // a partially full block: put it on the part_list list.
145 bd->link = ws->part_list;
147 ws->n_part_blocks += bd->blocks;
149 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
153 // put the scan block on the ws->scavd_list.
154 bd->link = ws->scavd_list;
156 ws->n_scavd_blocks += bd->blocks;
158 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
163 todo_block_full (nat size, step_workspace *ws)
168 // todo_free has been pre-incremented by Evac.c:alloc_for_copy(). We
169 // are expected to leave it bumped when we've finished here.
170 ws->todo_free -= size;
175 ASSERT(bd->link == NULL);
176 ASSERT(bd->step == ws->step);
178 // If the global list is not empty, or there's not much work in
179 // this block to push, and there's enough room in
180 // this block to evacuate the current object, then just increase
182 if (!looksEmptyWSDeque(ws->todo_q) ||
183 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
184 if (ws->todo_free + size < bd->start + bd->blocks * BLOCK_SIZE_W) {
185 ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
186 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
187 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
189 ws->todo_free += size;
194 gct->copied += ws->todo_free - bd->free;
195 bd->free = ws->todo_free;
197 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
199 // If this block is not the scan block, we want to push it out and
200 // make room for a new todo block.
201 if (bd != gct->scan_bd)
203 // If this block does not have enough space to allocate the
204 // current object, but it also doesn't have any work to push, then
205 // push it on to the scanned list. It cannot be empty, because
206 // then there would be enough room to copy the current object.
207 if (bd->u.scan == bd->free)
209 ASSERT(bd->free != bd->start);
210 push_scanned_block(bd, ws);
212 // Otherwise, push this block out to the global list.
217 debugTrace(DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld",
218 bd->start, (unsigned long)(bd->free - bd->u.scan),
219 stp->abs_no, dequeElements(ws->todo_q));
221 if (!pushWSDeque(ws->todo_q, bd)) {
222 bd->link = ws->todo_overflow;
223 ws->todo_overflow = bd;
224 ws->n_todo_overflow++;
230 ws->todo_free = NULL;
233 alloc_todo_block(ws, size);
236 ws->todo_free += size;
241 alloc_todo_block (step_workspace *ws, nat size)
243 bdescr *bd/*, *hd, *tl */;
245 // Grab a part block if we have one, and it has enough room
248 bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > (int)size)
250 ws->part_list = bd->link;
251 ws->n_part_blocks -= bd->blocks;
255 // blocks in to-space get the BF_EVACUATED flag.
257 // allocBlocks_sync(16, &hd, &tl,
258 // ws->step->gen_no, ws->step, BF_EVACUATED);
260 // tl->link = ws->part_list;
261 // ws->part_list = hd->link;
262 // ws->n_part_blocks += 15;
266 if (size > BLOCK_SIZE_W) {
267 bd = allocGroup_sync((lnat)BLOCK_ROUND_UP(size*sizeof(W_))
270 bd = allocBlock_sync();
273 bd->gen_no = ws->step->gen_no;
274 bd->flags = BF_EVACUATED;
275 bd->u.scan = bd->free = bd->start;
281 ws->todo_free = bd->free;
282 ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
283 bd->free + stg_max(WORK_UNIT_WORDS,size));
285 debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
286 bd->free, ws->step->abs_no);
288 return ws->todo_free;
291 /* -----------------------------------------------------------------------------
293 * -------------------------------------------------------------------------- */
297 printMutableList(generation *gen)
302 debugBelch("mutable list %p: ", gen->mut_list);
304 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
305 for (p = bd->start; p < bd->free; p++) {
306 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));