1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
24 SpinLock gc_alloc_block_sync;
31 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
33 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
40 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
41 nat gen_no, step *stp,
46 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
48 for (i = 0; i < n; i++) {
50 bd[i].gen_no = gen_no;
53 bd[i].link = &bd[i+1];
54 bd[i].u.scan = bd[i].free = bd[i].start;
58 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
63 freeChain_sync(bdescr *bd)
65 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
67 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
70 /* -----------------------------------------------------------------------------
72 -------------------------------------------------------------------------- */
75 grab_todo_block (step_workspace *ws)
83 if (ws->buffer_todo_bd)
85 bd = ws->buffer_todo_bd;
86 ASSERT(bd->link == NULL);
87 ws->buffer_todo_bd = NULL;
91 ACQUIRE_SPIN_LOCK(&stp->sync_todo);
94 if (stp->todos == stp->todos_last) {
95 stp->todos_last = NULL;
97 stp->todos = bd->link;
101 RELEASE_SPIN_LOCK(&stp->sync_todo);
106 push_scanned_block (bdescr *bd, step_workspace *ws)
109 ASSERT(bd->link == NULL);
110 ASSERT(bd->step == ws->step);
111 ASSERT(bd->u.scan == bd->free);
113 if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
115 // a partially full block: put it on the part_list list.
116 bd->link = ws->part_list;
120 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
124 // put the scan block on the ws->scavd_list.
125 bd->link = ws->scavd_list;
127 ws->n_scavd_blocks ++;
129 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
134 todo_block_full (nat size, step_workspace *ws)
141 ASSERT(bd->link == NULL);
142 ASSERT(bd->step == ws->step);
144 // If the global list is not empty, or there's not much work in
145 // this block to push, and there's enough room in
146 // this block to evacuate the current object, then just increase
148 if (ws->step->todos != NULL ||
149 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
150 if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
151 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
152 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
153 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
154 return ws->todo_free;
158 gct->copied += ws->todo_free - bd->free;
159 bd->free = ws->todo_free;
161 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
163 // If this block is not the scan block, we want to push it out and
164 // make room for a new todo block.
165 if (bd != gct->scan_bd)
167 // If this block does not have enough space to allocate the
168 // current object, but it also doesn't have any work to push, then
169 // push it on to the scanned list. It cannot be empty, because
170 // then there would be enough room to copy the current object.
171 if (bd->u.scan == bd->free)
173 ASSERT(bd->free != bd->start);
174 push_scanned_block(bd, ws);
176 // Otherwise, push this block out to the global list.
181 trace(TRACE_gc|DEBUG_gc, "push todo block %p (%ld words), step %d, n_todos: %d",
182 bd->start, (unsigned long)(bd->free - bd->u.scan),
183 stp->abs_no, stp->n_todos);
184 // ToDo: use buffer_todo
185 ACQUIRE_SPIN_LOCK(&stp->sync_todo);
186 if (stp->todos_last == NULL) {
187 stp->todos_last = bd;
190 stp->todos_last->link = bd;
191 stp->todos_last = bd;
194 RELEASE_SPIN_LOCK(&stp->sync_todo);
199 ws->todo_free = NULL;
202 alloc_todo_block(ws, size);
204 return ws->todo_free;
208 alloc_todo_block (step_workspace *ws, nat size)
210 bdescr *bd/*, *hd, *tl*/;
212 // Grab a part block if we have one, and it has enough room
213 if (ws->part_list != NULL &&
214 ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size)
217 ws->part_list = bd->link;
222 // blocks in to-space get the BF_EVACUATED flag.
224 // allocBlocks_sync(4, &hd, &tl,
225 // ws->step->gen_no, ws->step, BF_EVACUATED);
227 // tl->link = ws->part_list;
228 // ws->part_list = hd->link;
229 // ws->n_part_blocks += 3;
233 bd = allocBlock_sync();
235 bd->gen_no = ws->step->gen_no;
236 bd->flags = BF_EVACUATED;
237 bd->u.scan = bd->free = bd->start;
243 ws->todo_free = bd->free;
244 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
245 bd->free + stg_max(WORK_UNIT_WORDS,size));
247 debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
248 bd->free, ws->step->abs_no);
250 return ws->todo_free;
253 /* -----------------------------------------------------------------------------
255 * -------------------------------------------------------------------------- */
259 printMutableList(generation *gen)
264 debugBelch("mutable list %p: ", gen->mut_list);
266 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
267 for (p = bd->start; p < bd->free; p++) {
268 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));