1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 // This file is #included into Scav.c, twice: firstly with MINOR_GC
15 // defined, the second time without.
18 #define scavenge_block(a,b) scavenge_block0(a,b)
19 #define evacuate(a) evacuate0(a)
25 static void scavenge_block (bdescr *bd, StgPtr scan);
27 /* -----------------------------------------------------------------------------
28 Scavenge a block from the given scan pointer up to bd->free.
30 evac_step is set by the caller to be either zero (for a step in a
31 generation < N) or G where G is the generation of the step being
34 We sometimes temporarily change evac_step back to zero if we're
35 scavenging a mutable object where eager promotion isn't such a good
37 -------------------------------------------------------------------------- */
40 scavenge_block (bdescr *bd, StgPtr scan)
44 step *saved_evac_step;
48 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
49 bd->start, bd->gen_no, bd->step->no, scan);
51 gct->evac_step = bd->step;
52 saved_evac_step = gct->evac_step;
53 gct->failed_to_evac = rtsFalse;
55 // we might be evacuating into the very object that we're
56 // scavenging, so we have to check the real bd->free pointer each
57 // time around the loop.
58 while (p < bd->free) {
60 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
61 info = get_itbl((StgClosure *)p);
63 ASSERT(gct->thunk_selector_depth == 0);
71 rtsBool saved_eager_promotion = gct->eager_promotion;
73 StgMVar *mvar = ((StgMVar *)p);
74 gct->eager_promotion = rtsFalse;
75 evacuate((StgClosure **)&mvar->head);
76 evacuate((StgClosure **)&mvar->tail);
77 evacuate((StgClosure **)&mvar->value);
78 gct->eager_promotion = saved_eager_promotion;
80 if (gct->failed_to_evac) {
81 mvar->header.info = &stg_MVAR_DIRTY_info;
83 mvar->header.info = &stg_MVAR_CLEAN_info;
85 p += sizeofW(StgMVar);
91 scavenge_fun_srt(info);
93 evacuate(&((StgClosure *)p)->payload[1]);
94 evacuate(&((StgClosure *)p)->payload[0]);
95 p += sizeofW(StgHeader) + 2;
100 scavenge_thunk_srt(info);
102 evacuate(&((StgThunk *)p)->payload[1]);
103 evacuate(&((StgThunk *)p)->payload[0]);
104 p += sizeofW(StgThunk) + 2;
108 evacuate(&((StgClosure *)p)->payload[1]);
109 evacuate(&((StgClosure *)p)->payload[0]);
110 p += sizeofW(StgHeader) + 2;
115 scavenge_thunk_srt(info);
117 evacuate(&((StgThunk *)p)->payload[0]);
118 p += sizeofW(StgThunk) + 1;
123 scavenge_fun_srt(info);
126 evacuate(&((StgClosure *)p)->payload[0]);
127 p += sizeofW(StgHeader) + 1;
132 scavenge_thunk_srt(info);
134 p += sizeofW(StgThunk) + 1;
139 scavenge_fun_srt(info);
142 p += sizeofW(StgHeader) + 1;
147 scavenge_thunk_srt(info);
149 p += sizeofW(StgThunk) + 2;
154 scavenge_fun_srt(info);
157 p += sizeofW(StgHeader) + 2;
162 scavenge_thunk_srt(info);
164 evacuate(&((StgThunk *)p)->payload[0]);
165 p += sizeofW(StgThunk) + 2;
170 scavenge_fun_srt(info);
173 evacuate(&((StgClosure *)p)->payload[0]);
174 p += sizeofW(StgHeader) + 2;
179 scavenge_fun_srt(info);
188 scavenge_thunk_srt(info);
190 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
191 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
192 evacuate((StgClosure **)p);
194 p += info->layout.payload.nptrs;
205 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
206 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
207 evacuate((StgClosure **)p);
209 p += info->layout.payload.nptrs;
214 StgBCO *bco = (StgBCO *)p;
215 evacuate((StgClosure **)&bco->instrs);
216 evacuate((StgClosure **)&bco->literals);
217 evacuate((StgClosure **)&bco->ptrs);
223 if (bd->gen_no != 0) {
226 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
227 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
228 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
231 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
233 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
235 // We pretend that p has just been created.
236 LDV_RECORD_CREATE((StgClosure *)p);
239 case IND_OLDGEN_PERM:
240 evacuate(&((StgInd *)p)->indirectee);
241 p += sizeofW(StgInd);
245 case MUT_VAR_DIRTY: {
246 rtsBool saved_eager_promotion = gct->eager_promotion;
248 gct->eager_promotion = rtsFalse;
249 evacuate(&((StgMutVar *)p)->var);
250 gct->eager_promotion = saved_eager_promotion;
252 if (gct->failed_to_evac) {
253 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
255 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
257 p += sizeofW(StgMutVar);
262 case SE_CAF_BLACKHOLE:
265 p += BLACKHOLE_sizeW();
270 StgSelector *s = (StgSelector *)p;
271 evacuate(&s->selectee);
272 p += THUNK_SELECTOR_sizeW();
276 // A chunk of stack saved in a heap object
279 StgAP_STACK *ap = (StgAP_STACK *)p;
282 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
283 p = (StgPtr)ap->payload + ap->size;
288 p = scavenge_PAP((StgPAP *)p);
292 p = scavenge_AP((StgAP *)p);
297 p += arr_words_sizeW((StgArrWords *)p);
300 case MUT_ARR_PTRS_CLEAN:
301 case MUT_ARR_PTRS_DIRTY:
307 // We don't eagerly promote objects pointed to by a mutable
308 // array, but if we find the array only points to objects in
309 // the same or an older generation, we mark it "clean" and
310 // avoid traversing it during minor GCs.
311 saved_eager = gct->eager_promotion;
312 gct->eager_promotion = rtsFalse;
313 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
314 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
315 evacuate((StgClosure **)p);
317 gct->eager_promotion = saved_eager;
319 if (gct->failed_to_evac) {
320 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
322 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
325 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
329 case MUT_ARR_PTRS_FROZEN:
330 case MUT_ARR_PTRS_FROZEN0:
335 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
336 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
337 evacuate((StgClosure **)p);
340 // If we're going to put this object on the mutable list, then
341 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
342 if (gct->failed_to_evac) {
343 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
345 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
352 StgTSO *tso = (StgTSO *)p;
353 rtsBool saved_eager = gct->eager_promotion;
355 gct->eager_promotion = rtsFalse;
357 gct->eager_promotion = saved_eager;
359 if (gct->failed_to_evac) {
360 tso->flags |= TSO_DIRTY;
362 tso->flags &= ~TSO_DIRTY;
365 gct->failed_to_evac = rtsTrue; // always on the mutable list
370 case TVAR_WATCH_QUEUE:
372 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
374 evacuate((StgClosure **)&wq->closure);
375 evacuate((StgClosure **)&wq->next_queue_entry);
376 evacuate((StgClosure **)&wq->prev_queue_entry);
377 gct->evac_step = saved_evac_step;
378 gct->failed_to_evac = rtsTrue; // mutable
379 p += sizeofW(StgTVarWatchQueue);
385 StgTVar *tvar = ((StgTVar *) p);
387 evacuate((StgClosure **)&tvar->current_value);
388 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
389 gct->evac_step = saved_evac_step;
390 gct->failed_to_evac = rtsTrue; // mutable
391 p += sizeofW(StgTVar);
397 StgTRecHeader *trec = ((StgTRecHeader *) p);
399 evacuate((StgClosure **)&trec->enclosing_trec);
400 evacuate((StgClosure **)&trec->current_chunk);
401 evacuate((StgClosure **)&trec->invariants_to_check);
402 gct->evac_step = saved_evac_step;
403 gct->failed_to_evac = rtsTrue; // mutable
404 p += sizeofW(StgTRecHeader);
411 StgTRecChunk *tc = ((StgTRecChunk *) p);
412 TRecEntry *e = &(tc -> entries[0]);
414 evacuate((StgClosure **)&tc->prev_chunk);
415 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
416 evacuate((StgClosure **)&e->tvar);
417 evacuate((StgClosure **)&e->expected_value);
418 evacuate((StgClosure **)&e->new_value);
420 gct->evac_step = saved_evac_step;
421 gct->failed_to_evac = rtsTrue; // mutable
422 p += sizeofW(StgTRecChunk);
426 case ATOMIC_INVARIANT:
428 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
430 evacuate(&invariant->code);
431 evacuate((StgClosure **)&invariant->last_execution);
432 gct->evac_step = saved_evac_step;
433 gct->failed_to_evac = rtsTrue; // mutable
434 p += sizeofW(StgAtomicInvariant);
438 case INVARIANT_CHECK_QUEUE:
440 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
442 evacuate((StgClosure **)&queue->invariant);
443 evacuate((StgClosure **)&queue->my_execution);
444 evacuate((StgClosure **)&queue->next_queue_entry);
445 gct->evac_step = saved_evac_step;
446 gct->failed_to_evac = rtsTrue; // mutable
447 p += sizeofW(StgInvariantCheckQueue);
452 barf("scavenge: unimplemented/strange closure type %d @ %p",
457 * We need to record the current object on the mutable list if
458 * (a) It is actually mutable, or
459 * (b) It contains pointers to a younger generation.
460 * Case (b) arises if we didn't manage to promote everything that
461 * the current object points to into the current generation.
463 if (gct->failed_to_evac) {
464 gct->failed_to_evac = rtsFalse;
465 if (bd->gen_no > 0) {
466 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
471 debugTrace(DEBUG_gc, " scavenged %ld bytes", (bd->free - scan) * sizeof(W_));