1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
27 SpinLock gc_alloc_block_sync;
34 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
36 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
43 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
44 nat gen_no, step *stp,
49 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
51 for (i = 0; i < n; i++) {
53 bd[i].gen_no = gen_no;
56 bd[i].link = &bd[i+1];
57 bd[i].u.scan = bd[i].free = bd[i].start;
61 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
66 freeChain_sync(bdescr *bd)
68 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
70 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
73 /* -----------------------------------------------------------------------------
75 -------------------------------------------------------------------------- */
78 grab_local_todo_block (step_workspace *ws)
85 bd = ws->todo_overflow;
88 ws->todo_overflow = bd->link;
90 ws->n_todo_overflow--;
94 bd = popWSDeque(ws->todo_q);
97 ASSERT(bd->link == NULL);
105 steal_todo_block (nat s)
110 // look for work to steal
111 for (n = 0; n < n_gc_threads; n++) {
112 if (n == gct->thread_index) continue;
113 bd = stealWSDeque(gc_threads[n]->steps[s].todo_q);
122 push_scanned_block (bdescr *bd, step_workspace *ws)
125 ASSERT(bd->link == NULL);
126 ASSERT(bd->step == ws->step);
127 ASSERT(bd->u.scan == bd->free);
129 if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
131 // a partially full block: put it on the part_list list.
132 bd->link = ws->part_list;
136 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
140 // put the scan block on the ws->scavd_list.
141 bd->link = ws->scavd_list;
143 ws->n_scavd_blocks ++;
145 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
150 todo_block_full (nat size, step_workspace *ws)
155 // todo_free has been pre-incremented by Evac.c:alloc_for_copy(). We
156 // are expected to leave it bumped when we've finished here.
157 ws->todo_free -= size;
162 ASSERT(bd->link == NULL);
163 ASSERT(bd->step == ws->step);
165 // If the global list is not empty, or there's not much work in
166 // this block to push, and there's enough room in
167 // this block to evacuate the current object, then just increase
169 if (!looksEmptyWSDeque(ws->todo_q) ||
170 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
171 if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
172 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
173 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
174 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
176 ws->todo_free += size;
181 gct->copied += ws->todo_free - bd->free;
182 bd->free = ws->todo_free;
184 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
186 // If this block is not the scan block, we want to push it out and
187 // make room for a new todo block.
188 if (bd != gct->scan_bd)
190 // If this block does not have enough space to allocate the
191 // current object, but it also doesn't have any work to push, then
192 // push it on to the scanned list. It cannot be empty, because
193 // then there would be enough room to copy the current object.
194 if (bd->u.scan == bd->free)
196 ASSERT(bd->free != bd->start);
197 push_scanned_block(bd, ws);
199 // Otherwise, push this block out to the global list.
204 debugTrace(DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld",
205 bd->start, (unsigned long)(bd->free - bd->u.scan),
206 stp->abs_no, dequeElements(ws->todo_q));
208 if (!pushWSDeque(ws->todo_q, bd)) {
209 bd->link = ws->todo_overflow;
210 ws->todo_overflow = bd;
211 ws->n_todo_overflow++;
217 ws->todo_free = NULL;
220 alloc_todo_block(ws, size);
223 ws->todo_free += size;
228 alloc_todo_block (step_workspace *ws, nat size)
230 bdescr *bd/*, *hd, *tl */;
232 // Grab a part block if we have one, and it has enough room
233 if (ws->part_list != NULL &&
234 ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size)
237 ws->part_list = bd->link;
242 // blocks in to-space get the BF_EVACUATED flag.
244 // allocBlocks_sync(16, &hd, &tl,
245 // ws->step->gen_no, ws->step, BF_EVACUATED);
247 // tl->link = ws->part_list;
248 // ws->part_list = hd->link;
249 // ws->n_part_blocks += 15;
253 bd = allocBlock_sync();
255 bd->gen_no = ws->step->gen_no;
256 bd->flags = BF_EVACUATED;
257 bd->u.scan = bd->free = bd->start;
263 ws->todo_free = bd->free;
264 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
265 bd->free + stg_max(WORK_UNIT_WORDS,size));
267 debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
268 bd->free, ws->step->abs_no);
270 return ws->todo_free;
273 /* -----------------------------------------------------------------------------
275 * -------------------------------------------------------------------------- */
279 printMutableList(generation *gen)
284 debugBelch("mutable list %p: ", gen->mut_list);
286 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
287 for (p = bd->start; p < bd->free; p++) {
288 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));