1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
28 SpinLock gc_alloc_block_sync;
35 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
37 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
44 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
45 nat gen_no, step *stp,
50 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
52 for (i = 0; i < n; i++) {
54 bd[i].gen_no = gen_no;
57 bd[i].link = &bd[i+1];
58 bd[i].u.scan = bd[i].free = bd[i].start;
62 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
67 freeChain_sync(bdescr *bd)
69 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
71 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
74 /* -----------------------------------------------------------------------------
76 -------------------------------------------------------------------------- */
79 grab_local_todo_block (step_workspace *ws)
86 bd = ws->todo_overflow;
89 ws->todo_overflow = bd->link;
91 ws->n_todo_overflow--;
95 bd = popWSDeque(ws->todo_q);
98 ASSERT(bd->link == NULL);
105 #if defined(THREADED_RTS)
107 steal_todo_block (nat s)
112 // look for work to steal
113 for (n = 0; n < n_gc_threads; n++) {
114 if (n == gct->thread_index) continue;
115 bd = stealWSDeque(gc_threads[n]->steps[s].todo_q);
125 push_scanned_block (bdescr *bd, step_workspace *ws)
128 ASSERT(bd->link == NULL);
129 ASSERT(bd->step == ws->step);
130 ASSERT(bd->u.scan == bd->free);
132 if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
134 // a partially full block: put it on the part_list list.
135 bd->link = ws->part_list;
139 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
143 // put the scan block on the ws->scavd_list.
144 bd->link = ws->scavd_list;
146 ws->n_scavd_blocks ++;
148 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
153 todo_block_full (nat size, step_workspace *ws)
158 // todo_free has been pre-incremented by Evac.c:alloc_for_copy(). We
159 // are expected to leave it bumped when we've finished here.
160 ws->todo_free -= size;
165 ASSERT(bd->link == NULL);
166 ASSERT(bd->step == ws->step);
168 // If the global list is not empty, or there's not much work in
169 // this block to push, and there's enough room in
170 // this block to evacuate the current object, then just increase
172 if (!looksEmptyWSDeque(ws->todo_q) ||
173 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
174 if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
175 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
176 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
177 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
179 ws->todo_free += size;
184 gct->copied += ws->todo_free - bd->free;
185 bd->free = ws->todo_free;
187 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
189 // If this block is not the scan block, we want to push it out and
190 // make room for a new todo block.
191 if (bd != gct->scan_bd)
193 // If this block does not have enough space to allocate the
194 // current object, but it also doesn't have any work to push, then
195 // push it on to the scanned list. It cannot be empty, because
196 // then there would be enough room to copy the current object.
197 if (bd->u.scan == bd->free)
199 ASSERT(bd->free != bd->start);
200 push_scanned_block(bd, ws);
202 // Otherwise, push this block out to the global list.
207 debugTrace(DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld",
208 bd->start, (unsigned long)(bd->free - bd->u.scan),
209 stp->abs_no, dequeElements(ws->todo_q));
211 if (!pushWSDeque(ws->todo_q, bd)) {
212 bd->link = ws->todo_overflow;
213 ws->todo_overflow = bd;
214 ws->n_todo_overflow++;
220 ws->todo_free = NULL;
223 alloc_todo_block(ws, size);
226 ws->todo_free += size;
231 alloc_todo_block (step_workspace *ws, nat size)
233 bdescr *bd/*, *hd, *tl */;
235 // Grab a part block if we have one, and it has enough room
236 if (ws->part_list != NULL &&
237 ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size)
240 ws->part_list = bd->link;
245 // blocks in to-space get the BF_EVACUATED flag.
247 // allocBlocks_sync(16, &hd, &tl,
248 // ws->step->gen_no, ws->step, BF_EVACUATED);
250 // tl->link = ws->part_list;
251 // ws->part_list = hd->link;
252 // ws->n_part_blocks += 15;
256 bd = allocBlock_sync();
258 bd->gen_no = ws->step->gen_no;
259 bd->flags = BF_EVACUATED;
260 bd->u.scan = bd->free = bd->start;
266 ws->todo_free = bd->free;
267 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
268 bd->free + stg_max(WORK_UNIT_WORDS,size));
270 debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
271 bd->free, ws->step->abs_no);
273 return ws->todo_free;
276 /* -----------------------------------------------------------------------------
278 * -------------------------------------------------------------------------- */
282 printMutableList(generation *gen)
287 debugBelch("mutable list %p: ", gen->mut_list);
289 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
290 for (p = bd->start; p < bd->free; p++) {
291 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));