1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
27 SpinLock gc_alloc_block_sync;
34 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
36 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
43 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
44 nat gen_no, step *stp,
49 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
51 for (i = 0; i < n; i++) {
53 bd[i].gen_no = gen_no;
56 bd[i].link = &bd[i+1];
57 bd[i].u.scan = bd[i].free = bd[i].start;
61 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
66 freeChain_sync(bdescr *bd)
68 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
70 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
73 /* -----------------------------------------------------------------------------
75 -------------------------------------------------------------------------- */
78 grab_local_todo_block (step_workspace *ws)
85 bd = ws->todo_overflow;
88 ws->todo_overflow = bd->link;
90 ws->n_todo_overflow--;
94 bd = popWSDeque(ws->todo_q);
97 ASSERT(bd->link == NULL);
105 steal_todo_block (nat s)
110 // look for work to steal
111 for (n = 0; n < n_gc_threads; n++) {
112 if (n == gct->thread_index) continue;
113 bd = stealWSDeque(gc_threads[n]->steps[s].todo_q);
122 push_scanned_block (bdescr *bd, step_workspace *ws)
125 ASSERT(bd->link == NULL);
126 ASSERT(bd->step == ws->step);
127 ASSERT(bd->u.scan == bd->free);
129 if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
131 // a partially full block: put it on the part_list list.
132 bd->link = ws->part_list;
136 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
140 // put the scan block on the ws->scavd_list.
141 bd->link = ws->scavd_list;
143 ws->n_scavd_blocks ++;
145 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
150 todo_block_full (nat size, step_workspace *ws)
157 ASSERT(bd->link == NULL);
158 ASSERT(bd->step == ws->step);
160 // If the global list is not empty, or there's not much work in
161 // this block to push, and there's enough room in
162 // this block to evacuate the current object, then just increase
164 if (!looksEmptyWSDeque(ws->todo_q) ||
165 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
166 if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
167 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
168 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
169 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
170 return ws->todo_free;
174 gct->copied += ws->todo_free - bd->free;
175 bd->free = ws->todo_free;
177 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
179 // If this block is not the scan block, we want to push it out and
180 // make room for a new todo block.
181 if (bd != gct->scan_bd)
183 // If this block does not have enough space to allocate the
184 // current object, but it also doesn't have any work to push, then
185 // push it on to the scanned list. It cannot be empty, because
186 // then there would be enough room to copy the current object.
187 if (bd->u.scan == bd->free)
189 ASSERT(bd->free != bd->start);
190 push_scanned_block(bd, ws);
192 // Otherwise, push this block out to the global list.
197 trace(TRACE_gc|DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld",
198 bd->start, (unsigned long)(bd->free - bd->u.scan),
199 stp->abs_no, dequeElements(ws->todo_q));
201 if (!pushWSDeque(ws->todo_q, bd)) {
202 bd->link = ws->todo_overflow;
203 ws->todo_overflow = bd;
204 ws->n_todo_overflow++;
210 ws->todo_free = NULL;
213 alloc_todo_block(ws, size);
215 return ws->todo_free;
219 alloc_todo_block (step_workspace *ws, nat size)
221 bdescr *bd/*, *hd, *tl */;
223 // Grab a part block if we have one, and it has enough room
224 if (ws->part_list != NULL &&
225 ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size)
228 ws->part_list = bd->link;
233 // blocks in to-space get the BF_EVACUATED flag.
235 // allocBlocks_sync(16, &hd, &tl,
236 // ws->step->gen_no, ws->step, BF_EVACUATED);
238 // tl->link = ws->part_list;
239 // ws->part_list = hd->link;
240 // ws->n_part_blocks += 15;
244 bd = allocBlock_sync();
246 bd->gen_no = ws->step->gen_no;
247 bd->flags = BF_EVACUATED;
248 bd->u.scan = bd->free = bd->start;
254 ws->todo_free = bd->free;
255 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
256 bd->free + stg_max(WORK_UNIT_WORDS,size));
258 debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
259 bd->free, ws->step->abs_no);
261 return ws->todo_free;
264 /* -----------------------------------------------------------------------------
266 * -------------------------------------------------------------------------- */
270 printMutableList(generation *gen)
275 debugBelch("mutable list %p: ", gen->mut_list);
277 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
278 for (p = bd->start; p < bd->free; p++) {
279 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));