1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
23 SpinLock gc_alloc_block_sync;
30 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
32 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
38 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
39 nat gen_no, step *stp,
44 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
46 for (i = 0; i < n; i++) {
48 bd[i].gen_no = gen_no;
51 bd[i].link = &bd[i+1];
52 bd[i].u.scan = bd[i].free = bd[i].start;
56 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
60 freeChain_sync(bdescr *bd)
62 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
64 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
67 /* -----------------------------------------------------------------------------
69 -------------------------------------------------------------------------- */
72 grab_todo_block (step_workspace *ws)
80 if (ws->buffer_todo_bd)
82 bd = ws->buffer_todo_bd;
83 ASSERT(bd->link == NULL);
84 ws->buffer_todo_bd = NULL;
88 ACQUIRE_SPIN_LOCK(&stp->sync_todo);
91 if (stp->todos == stp->todos_last) {
92 stp->todos_last = NULL;
94 stp->todos = bd->link;
98 RELEASE_SPIN_LOCK(&stp->sync_todo);
103 push_scanned_block (bdescr *bd, step_workspace *ws)
106 ASSERT(bd->link == NULL);
107 ASSERT(bd->step == ws->step);
108 ASSERT(bd->u.scan == bd->free);
110 if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
112 // a partially full block: put it on the part_list list.
113 bd->link = ws->part_list;
117 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
121 // put the scan block on the ws->scavd_list.
122 bd->link = ws->scavd_list;
124 ws->n_scavd_blocks ++;
126 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
131 todo_block_full (nat size, step_workspace *ws)
138 ASSERT(bd->link == NULL);
139 ASSERT(bd->step == ws->step);
141 // If the global list is not empty, or there's not much work in
142 // this block to push, and there's enough room in
143 // this block to evacuate the current object, then just increase
145 if (ws->step->todos != NULL ||
146 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
147 if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
148 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
149 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
150 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
151 return ws->todo_free;
155 gct->copied += ws->todo_free - bd->free;
156 bd->free = ws->todo_free;
158 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
160 // If this block is not the scan block, we want to push it out and
161 // make room for a new todo block.
162 if (bd != gct->scan_bd)
164 // If this block does not have enough space to allocate the
165 // current object, but it also doesn't have any work to push, then
166 // push it on to the scanned list. It cannot be empty, because
167 // then there would be enough room to copy the current object.
168 if (bd->u.scan == bd->free)
170 ASSERT(bd->free != bd->start);
171 push_scanned_block(bd, ws);
173 // Otherwise, push this block out to the global list.
178 trace(TRACE_gc|DEBUG_gc, "push todo block %p (%d words), step %d, n_todos: %d",
179 bd->start, bd->free - bd->u.scan, stp->abs_no, stp->n_todos);
180 // ToDo: use buffer_todo
181 ACQUIRE_SPIN_LOCK(&stp->sync_todo);
182 if (stp->todos_last == NULL) {
183 stp->todos_last = bd;
186 stp->todos_last->link = bd;
187 stp->todos_last = bd;
190 RELEASE_SPIN_LOCK(&stp->sync_todo);
195 ws->todo_free = NULL;
198 alloc_todo_block(ws, size);
200 return ws->todo_free;
204 alloc_todo_block (step_workspace *ws, nat size)
206 bdescr *bd, *hd, *tl;
209 // Grab a part block if we have one, and it has enough room
210 if (ws->part_list != NULL &&
211 ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size)
214 ws->part_list = bd->link;
219 // blocks in to-space in generations up to and including N
220 // get the BF_EVACUATED flag.
221 if (ws->step->gen_no <= N) {
222 flags = BF_EVACUATED;
226 allocBlocks_sync(4, &hd, &tl,
227 ws->step->gen_no, ws->step, flags);
229 tl->link = ws->part_list;
230 ws->part_list = hd->link;
231 ws->n_part_blocks += 3;
239 ws->todo_free = bd->free;
240 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
241 bd->free + stg_max(WORK_UNIT_WORDS,size));
243 debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
244 bd->free, ws->step->abs_no);
246 return ws->todo_free;
249 /* -----------------------------------------------------------------------------
251 * -------------------------------------------------------------------------- */
255 printMutableList(generation *gen)
260 debugBelch("mutable list %p: ", gen->mut_list);
262 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
263 for (p = bd->start; p < bd->free; p++) {
264 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));