1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
17 #include "BlockAlloc.h"
29 SpinLock gc_alloc_block_sync;
36 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
38 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
43 allocGroup_sync(nat n)
46 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
48 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
55 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
56 nat gen_no, step *stp,
61 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
63 for (i = 0; i < n; i++) {
65 bd[i].gen_no = gen_no;
68 bd[i].link = &bd[i+1];
69 bd[i].u.scan = bd[i].free = bd[i].start;
73 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
78 freeChain_sync(bdescr *bd)
80 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
82 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
85 /* -----------------------------------------------------------------------------
87 -------------------------------------------------------------------------- */
90 grab_local_todo_block (gen_workspace *ws)
97 bd = ws->todo_overflow;
100 ws->todo_overflow = bd->link;
102 ws->n_todo_overflow--;
106 bd = popWSDeque(ws->todo_q);
109 ASSERT(bd->link == NULL);
116 #if defined(THREADED_RTS)
118 steal_todo_block (nat g)
123 // look for work to steal
124 for (n = 0; n < n_gc_threads; n++) {
125 if (n == gct->thread_index) continue;
126 bd = stealWSDeque(gc_threads[n]->gens[g].todo_q);
136 push_scanned_block (bdescr *bd, gen_workspace *ws)
139 ASSERT(bd->link == NULL);
140 ASSERT(bd->gen == ws->gen);
141 ASSERT(bd->u.scan == bd->free);
143 if (bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
145 // a partially full block: put it on the part_list list.
146 bd->link = ws->part_list;
148 ws->n_part_blocks += bd->blocks;
150 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
154 // put the scan block on the ws->scavd_list.
155 bd->link = ws->scavd_list;
157 ws->n_scavd_blocks += bd->blocks;
159 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
164 todo_block_full (nat size, gen_workspace *ws)
169 // todo_free has been pre-incremented by Evac.c:alloc_for_copy(). We
170 // are expected to leave it bumped when we've finished here.
171 ws->todo_free -= size;
176 ASSERT(bd->link == NULL);
177 ASSERT(bd->gen == ws->gen);
179 // If the global list is not empty, or there's not much work in
180 // this block to push, and there's enough room in
181 // this block to evacuate the current object, then just increase
183 if (!looksEmptyWSDeque(ws->todo_q) ||
184 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
185 if (ws->todo_free + size < bd->start + bd->blocks * BLOCK_SIZE_W) {
186 ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
187 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
188 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
190 ws->todo_free += size;
195 gct->copied += ws->todo_free - bd->free;
196 bd->free = ws->todo_free;
198 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
200 // If this block is not the scan block, we want to push it out and
201 // make room for a new todo block.
202 if (bd != gct->scan_bd)
204 // If this block does not have enough space to allocate the
205 // current object, but it also doesn't have any work to push, then
206 // push it on to the scanned list. It cannot be empty, because
207 // then there would be enough room to copy the current object.
208 if (bd->u.scan == bd->free)
210 ASSERT(bd->free != bd->start);
211 push_scanned_block(bd, ws);
213 // Otherwise, push this block out to the global list.
218 debugTrace(DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld",
219 bd->start, (unsigned long)(bd->free - bd->u.scan),
220 gen->no, dequeElements(ws->todo_q));
222 if (!pushWSDeque(ws->todo_q, bd)) {
223 bd->link = ws->todo_overflow;
224 ws->todo_overflow = bd;
225 ws->n_todo_overflow++;
231 ws->todo_free = NULL;
234 alloc_todo_block(ws, size);
237 ws->todo_free += size;
242 alloc_todo_block (gen_workspace *ws, nat size)
244 bdescr *bd/*, *hd, *tl */;
246 // Grab a part block if we have one, and it has enough room
249 bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > (int)size)
251 ws->part_list = bd->link;
252 ws->n_part_blocks -= bd->blocks;
256 // blocks in to-space get the BF_EVACUATED flag.
258 // allocBlocks_sync(16, &hd, &tl,
259 // ws->step->gen_no, ws->step, BF_EVACUATED);
261 // tl->link = ws->part_list;
262 // ws->part_list = hd->link;
263 // ws->n_part_blocks += 15;
267 if (size > BLOCK_SIZE_W) {
268 bd = allocGroup_sync((lnat)BLOCK_ROUND_UP(size*sizeof(W_))
271 bd = allocBlock_sync();
273 initBdescr(bd, ws->gen, ws->gen->to);
274 bd->flags = BF_EVACUATED;
275 bd->u.scan = bd->free = bd->start;
281 ws->todo_free = bd->free;
282 ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W,
283 bd->free + stg_max(WORK_UNIT_WORDS,size));
285 debugTrace(DEBUG_gc, "alloc new todo block %p for gen %d",
286 bd->free, ws->gen->no);
288 return ws->todo_free;
291 /* -----------------------------------------------------------------------------
293 * -------------------------------------------------------------------------- */
297 printMutableList(generation *gen)
302 debugBelch("mutable list %p: ", gen->mut_list);
304 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
305 for (p = bd->start; p < bd->free; p++) {
306 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));