1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: utilities
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
24 SpinLock gc_alloc_block_sync;
31 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
33 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
39 allocBlocks_sync(nat n, bdescr **hd, bdescr **tl,
40 nat gen_no, step *stp,
45 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
47 for (i = 0; i < n; i++) {
49 bd[i].gen_no = gen_no;
52 bd[i].link = &bd[i+1];
53 bd[i].u.scan = bd[i].free = bd[i].start;
57 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
61 freeChain_sync(bdescr *bd)
63 ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
65 RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
68 /* -----------------------------------------------------------------------------
70 -------------------------------------------------------------------------- */
73 grab_todo_block (step_workspace *ws)
81 if (ws->buffer_todo_bd)
83 bd = ws->buffer_todo_bd;
84 ASSERT(bd->link == NULL);
85 ws->buffer_todo_bd = NULL;
89 ACQUIRE_SPIN_LOCK(&stp->sync_todo);
92 if (stp->todos == stp->todos_last) {
93 stp->todos_last = NULL;
95 stp->todos = bd->link;
99 RELEASE_SPIN_LOCK(&stp->sync_todo);
104 push_scanned_block (bdescr *bd, step_workspace *ws)
107 ASSERT(bd->link == NULL);
108 ASSERT(bd->step == ws->step);
109 ASSERT(bd->u.scan == bd->free);
111 if (bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS)
113 // a partially full block: put it on the part_list list.
114 bd->link = ws->part_list;
118 ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks));
122 // put the scan block on the ws->scavd_list.
123 bd->link = ws->scavd_list;
125 ws->n_scavd_blocks ++;
127 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks));
132 todo_block_full (nat size, step_workspace *ws)
139 ASSERT(bd->link == NULL);
140 ASSERT(bd->step == ws->step);
142 // If the global list is not empty, or there's not much work in
143 // this block to push, and there's enough room in
144 // this block to evacuate the current object, then just increase
146 if (ws->step->todos != NULL ||
147 (ws->todo_free - bd->u.scan < WORK_UNIT_WORDS / 2)) {
148 if (ws->todo_free + size < bd->start + BLOCK_SIZE_W) {
149 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
150 ws->todo_lim + stg_max(WORK_UNIT_WORDS,size));
151 debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim);
152 return ws->todo_free;
156 gct->copied += ws->todo_free - bd->free;
157 bd->free = ws->todo_free;
159 ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free);
161 // If this block is not the scan block, we want to push it out and
162 // make room for a new todo block.
163 if (bd != gct->scan_bd)
165 // If this block does not have enough space to allocate the
166 // current object, but it also doesn't have any work to push, then
167 // push it on to the scanned list. It cannot be empty, because
168 // then there would be enough room to copy the current object.
169 if (bd->u.scan == bd->free)
171 ASSERT(bd->free != bd->start);
172 push_scanned_block(bd, ws);
174 // Otherwise, push this block out to the global list.
179 trace(TRACE_gc|DEBUG_gc, "push todo block %p (%d words), step %d, n_todos: %d",
180 bd->start, bd->free - bd->u.scan, stp->abs_no, stp->n_todos);
181 // ToDo: use buffer_todo
182 ACQUIRE_SPIN_LOCK(&stp->sync_todo);
183 if (stp->todos_last == NULL) {
184 stp->todos_last = bd;
187 stp->todos_last->link = bd;
188 stp->todos_last = bd;
191 RELEASE_SPIN_LOCK(&stp->sync_todo);
196 ws->todo_free = NULL;
199 alloc_todo_block(ws, size);
201 return ws->todo_free;
205 alloc_todo_block (step_workspace *ws, nat size)
207 bdescr *bd, *hd, *tl;
210 // Grab a part block if we have one, and it has enough room
211 if (ws->part_list != NULL &&
212 ws->part_list->start + BLOCK_SIZE_W - ws->part_list->free > (int)size)
215 ws->part_list = bd->link;
220 // blocks in to-space in generations up to and including N
221 // get the BF_EVACUATED flag.
222 if (ws->step->gen_no <= N) {
223 flags = BF_EVACUATED;
227 allocBlocks_sync(4, &hd, &tl,
228 ws->step->gen_no, ws->step, flags);
230 tl->link = ws->part_list;
231 ws->part_list = hd->link;
232 ws->n_part_blocks += 3;
240 ws->todo_free = bd->free;
241 ws->todo_lim = stg_min(bd->start + BLOCK_SIZE_W,
242 bd->free + stg_max(WORK_UNIT_WORDS,size));
244 debugTrace(DEBUG_gc, "alloc new todo block %p for step %d",
245 bd->free, ws->step->abs_no);
247 return ws->todo_free;
250 /* -----------------------------------------------------------------------------
252 * -------------------------------------------------------------------------- */
256 printMutableList(generation *gen)
261 debugBelch("mutable list %p: ", gen->mut_list);
263 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
264 for (p = bd->start; p < bd->free; p++) {
265 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));