1 /* -----------------------------------------------------------------------*-c-*-
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 // This file is #included into Scav.c, twice: firstly with PARALLEL_GC
15 // defined, the second time without.
17 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
18 # define scavenge_block(a) scavenge_block1(a)
19 # define evacuate(a) evacuate1(a)
20 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
22 # undef scavenge_block
24 # undef recordMutableGen_GC
25 # if !defined(THREADED_RTS)
26 # define scavenge_block1(a) scavenge_block(a)
31 static void scavenge_block (bdescr *bd);
33 /* -----------------------------------------------------------------------------
34 Scavenge a block from the given scan pointer up to bd->free.
36 evac_step is set by the caller to be either zero (for a step in a
37 generation < N) or G where G is the generation of the step being
40 We sometimes temporarily change evac_step back to zero if we're
41 scavenging a mutable object where eager promotion isn't such a good
43 -------------------------------------------------------------------------- */
46 scavenge_block (bdescr *bd)
50 step *saved_evac_step;
51 rtsBool saved_eager_promotion;
54 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
55 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
58 gct->evac_step = bd->step;
59 saved_evac_step = gct->evac_step;
60 saved_eager_promotion = gct->eager_promotion;
61 gct->failed_to_evac = rtsFalse;
63 ws = &gct->steps[bd->step->abs_no];
67 // we might be evacuating into the very object that we're
68 // scavenging, so we have to check the real bd->free pointer each
69 // time around the loop.
70 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
72 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
73 info = get_itbl((StgClosure *)p);
75 ASSERT(gct->thunk_selector_depth == 0);
83 StgMVar *mvar = ((StgMVar *)p);
84 gct->eager_promotion = rtsFalse;
85 evacuate((StgClosure **)&mvar->head);
86 evacuate((StgClosure **)&mvar->tail);
87 evacuate((StgClosure **)&mvar->value);
88 gct->eager_promotion = saved_eager_promotion;
90 if (gct->failed_to_evac) {
91 mvar->header.info = &stg_MVAR_DIRTY_info;
93 mvar->header.info = &stg_MVAR_CLEAN_info;
95 p += sizeofW(StgMVar);
100 scavenge_fun_srt(info);
101 evacuate(&((StgClosure *)p)->payload[1]);
102 evacuate(&((StgClosure *)p)->payload[0]);
103 p += sizeofW(StgHeader) + 2;
107 scavenge_thunk_srt(info);
108 evacuate(&((StgThunk *)p)->payload[1]);
109 evacuate(&((StgThunk *)p)->payload[0]);
110 p += sizeofW(StgThunk) + 2;
114 evacuate(&((StgClosure *)p)->payload[1]);
115 evacuate(&((StgClosure *)p)->payload[0]);
116 p += sizeofW(StgHeader) + 2;
120 scavenge_thunk_srt(info);
121 evacuate(&((StgThunk *)p)->payload[0]);
122 p += sizeofW(StgThunk) + 1;
126 scavenge_fun_srt(info);
128 evacuate(&((StgClosure *)p)->payload[0]);
129 p += sizeofW(StgHeader) + 1;
133 scavenge_thunk_srt(info);
134 p += sizeofW(StgThunk) + 1;
138 scavenge_fun_srt(info);
140 p += sizeofW(StgHeader) + 1;
144 scavenge_thunk_srt(info);
145 p += sizeofW(StgThunk) + 2;
149 scavenge_fun_srt(info);
151 p += sizeofW(StgHeader) + 2;
155 scavenge_thunk_srt(info);
156 evacuate(&((StgThunk *)p)->payload[0]);
157 p += sizeofW(StgThunk) + 2;
161 scavenge_fun_srt(info);
163 evacuate(&((StgClosure *)p)->payload[0]);
164 p += sizeofW(StgHeader) + 2;
168 scavenge_fun_srt(info);
175 scavenge_thunk_srt(info);
176 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
177 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
178 evacuate((StgClosure **)p);
180 p += info->layout.payload.nptrs;
191 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
192 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
193 evacuate((StgClosure **)p);
195 p += info->layout.payload.nptrs;
200 StgBCO *bco = (StgBCO *)p;
201 evacuate((StgClosure **)&bco->instrs);
202 evacuate((StgClosure **)&bco->literals);
203 evacuate((StgClosure **)&bco->ptrs);
209 if (bd->gen_no != 0) {
212 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
213 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
214 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
217 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
219 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
221 // We pretend that p has just been created.
222 LDV_RECORD_CREATE((StgClosure *)p);
225 case IND_OLDGEN_PERM:
226 evacuate(&((StgInd *)p)->indirectee);
227 p += sizeofW(StgInd);
232 gct->eager_promotion = rtsFalse;
233 evacuate(&((StgMutVar *)p)->var);
234 gct->eager_promotion = saved_eager_promotion;
236 if (gct->failed_to_evac) {
237 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
239 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
241 p += sizeofW(StgMutVar);
245 case SE_CAF_BLACKHOLE:
248 p += BLACKHOLE_sizeW();
253 StgSelector *s = (StgSelector *)p;
254 evacuate(&s->selectee);
255 p += THUNK_SELECTOR_sizeW();
259 // A chunk of stack saved in a heap object
262 StgAP_STACK *ap = (StgAP_STACK *)p;
265 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
266 p = (StgPtr)ap->payload + ap->size;
271 p = scavenge_PAP((StgPAP *)p);
275 p = scavenge_AP((StgAP *)p);
280 p += arr_words_sizeW((StgArrWords *)p);
283 case MUT_ARR_PTRS_CLEAN:
284 case MUT_ARR_PTRS_DIRTY:
289 // We don't eagerly promote objects pointed to by a mutable
290 // array, but if we find the array only points to objects in
291 // the same or an older generation, we mark it "clean" and
292 // avoid traversing it during minor GCs.
293 gct->eager_promotion = rtsFalse;
294 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
295 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
296 evacuate((StgClosure **)p);
298 gct->eager_promotion = saved_eager_promotion;
300 if (gct->failed_to_evac) {
301 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
303 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
306 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
310 case MUT_ARR_PTRS_FROZEN:
311 case MUT_ARR_PTRS_FROZEN0:
316 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
317 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
318 evacuate((StgClosure **)p);
321 // If we're going to put this object on the mutable list, then
322 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
323 if (gct->failed_to_evac) {
324 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
326 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
333 StgTSO *tso = (StgTSO *)p;
339 case TVAR_WATCH_QUEUE:
341 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
343 evacuate((StgClosure **)&wq->closure);
344 evacuate((StgClosure **)&wq->next_queue_entry);
345 evacuate((StgClosure **)&wq->prev_queue_entry);
346 gct->evac_step = saved_evac_step;
347 gct->failed_to_evac = rtsTrue; // mutable
348 p += sizeofW(StgTVarWatchQueue);
354 StgTVar *tvar = ((StgTVar *) p);
356 evacuate((StgClosure **)&tvar->current_value);
357 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
358 gct->evac_step = saved_evac_step;
359 gct->failed_to_evac = rtsTrue; // mutable
360 p += sizeofW(StgTVar);
366 StgTRecHeader *trec = ((StgTRecHeader *) p);
368 evacuate((StgClosure **)&trec->enclosing_trec);
369 evacuate((StgClosure **)&trec->current_chunk);
370 evacuate((StgClosure **)&trec->invariants_to_check);
371 gct->evac_step = saved_evac_step;
372 gct->failed_to_evac = rtsTrue; // mutable
373 p += sizeofW(StgTRecHeader);
380 StgTRecChunk *tc = ((StgTRecChunk *) p);
381 TRecEntry *e = &(tc -> entries[0]);
383 evacuate((StgClosure **)&tc->prev_chunk);
384 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
385 evacuate((StgClosure **)&e->tvar);
386 evacuate((StgClosure **)&e->expected_value);
387 evacuate((StgClosure **)&e->new_value);
389 gct->evac_step = saved_evac_step;
390 gct->failed_to_evac = rtsTrue; // mutable
391 p += sizeofW(StgTRecChunk);
395 case ATOMIC_INVARIANT:
397 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
399 evacuate(&invariant->code);
400 evacuate((StgClosure **)&invariant->last_execution);
401 gct->evac_step = saved_evac_step;
402 gct->failed_to_evac = rtsTrue; // mutable
403 p += sizeofW(StgAtomicInvariant);
407 case INVARIANT_CHECK_QUEUE:
409 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
411 evacuate((StgClosure **)&queue->invariant);
412 evacuate((StgClosure **)&queue->my_execution);
413 evacuate((StgClosure **)&queue->next_queue_entry);
414 gct->evac_step = saved_evac_step;
415 gct->failed_to_evac = rtsTrue; // mutable
416 p += sizeofW(StgInvariantCheckQueue);
421 barf("scavenge: unimplemented/strange closure type %d @ %p",
426 * We need to record the current object on the mutable list if
427 * (a) It is actually mutable, or
428 * (b) It contains pointers to a younger generation.
429 * Case (b) arises if we didn't manage to promote everything that
430 * the current object points to into the current generation.
432 if (gct->failed_to_evac) {
433 gct->failed_to_evac = rtsFalse;
434 if (bd->gen_no > 0) {
435 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
441 gct->copied += ws->todo_free - bd->free;
445 debugTrace(DEBUG_gc, " scavenged %ld bytes",
446 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
448 // update stats: this is a block that has been scavenged
449 gct->scanned += bd->free - bd->u.scan;
450 bd->u.scan = bd->free;
452 if (bd != ws->todo_bd) {
453 // we're not going to evac any more objects into
454 // this block, so push it now.
455 push_scanned_block(bd, ws);
461 #undef scavenge_block
463 #undef recordMutableGen_GC