1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 // This file is #included into Scav.c, twice: firstly with MINOR_GC
15 // defined, the second time without.
18 #define scavenge_block(a,b) scavenge_block0(a,b)
19 #define evacuate(a) evacuate0(a)
25 static void scavenge_block (bdescr *bd, StgPtr scan);
27 /* -----------------------------------------------------------------------------
28 Scavenge a block from the given scan pointer up to bd->free.
30 evac_step is set by the caller to be either zero (for a step in a
31 generation < N) or G where G is the generation of the step being
34 We sometimes temporarily change evac_step back to zero if we're
35 scavenging a mutable object where eager promotion isn't such a good
37 -------------------------------------------------------------------------- */
40 scavenge_block (bdescr *bd, StgPtr scan)
44 step *saved_evac_step;
45 rtsBool saved_eager_promotion;
50 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
51 bd->start, bd->gen_no, bd->step->no, scan);
53 gct->evac_step = bd->step;
54 saved_evac_step = gct->evac_step;
55 saved_eager_promotion = gct->eager_promotion;
56 gct->failed_to_evac = rtsFalse;
58 ws = &gct->steps[bd->step->abs_no];
60 // we might be evacuating into the very object that we're
61 // scavenging, so we have to check the real bd->free pointer each
62 // time around the loop.
63 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
65 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
66 info = get_itbl((StgClosure *)p);
68 ASSERT(gct->thunk_selector_depth == 0);
76 StgMVar *mvar = ((StgMVar *)p);
77 gct->eager_promotion = rtsFalse;
78 evacuate((StgClosure **)&mvar->head);
79 evacuate((StgClosure **)&mvar->tail);
80 evacuate((StgClosure **)&mvar->value);
81 gct->eager_promotion = saved_eager_promotion;
83 if (gct->failed_to_evac) {
84 mvar->header.info = &stg_MVAR_DIRTY_info;
86 mvar->header.info = &stg_MVAR_CLEAN_info;
88 p += sizeofW(StgMVar);
94 scavenge_fun_srt(info);
96 evacuate(&((StgClosure *)p)->payload[1]);
97 evacuate(&((StgClosure *)p)->payload[0]);
98 p += sizeofW(StgHeader) + 2;
103 scavenge_thunk_srt(info);
105 evacuate(&((StgThunk *)p)->payload[1]);
106 evacuate(&((StgThunk *)p)->payload[0]);
107 p += sizeofW(StgThunk) + 2;
111 evacuate(&((StgClosure *)p)->payload[1]);
112 evacuate(&((StgClosure *)p)->payload[0]);
113 p += sizeofW(StgHeader) + 2;
118 scavenge_thunk_srt(info);
120 evacuate(&((StgThunk *)p)->payload[0]);
121 p += sizeofW(StgThunk) + 1;
126 scavenge_fun_srt(info);
129 evacuate(&((StgClosure *)p)->payload[0]);
130 p += sizeofW(StgHeader) + 1;
135 scavenge_thunk_srt(info);
137 p += sizeofW(StgThunk) + 1;
142 scavenge_fun_srt(info);
145 p += sizeofW(StgHeader) + 1;
150 scavenge_thunk_srt(info);
152 p += sizeofW(StgThunk) + 2;
157 scavenge_fun_srt(info);
160 p += sizeofW(StgHeader) + 2;
165 scavenge_thunk_srt(info);
167 evacuate(&((StgThunk *)p)->payload[0]);
168 p += sizeofW(StgThunk) + 2;
173 scavenge_fun_srt(info);
176 evacuate(&((StgClosure *)p)->payload[0]);
177 p += sizeofW(StgHeader) + 2;
182 scavenge_fun_srt(info);
191 scavenge_thunk_srt(info);
193 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
194 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
195 evacuate((StgClosure **)p);
197 p += info->layout.payload.nptrs;
208 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
209 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
210 evacuate((StgClosure **)p);
212 p += info->layout.payload.nptrs;
217 StgBCO *bco = (StgBCO *)p;
218 evacuate((StgClosure **)&bco->instrs);
219 evacuate((StgClosure **)&bco->literals);
220 evacuate((StgClosure **)&bco->ptrs);
226 if (bd->gen_no != 0) {
229 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
230 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
231 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
234 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
236 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
238 // We pretend that p has just been created.
239 LDV_RECORD_CREATE((StgClosure *)p);
242 case IND_OLDGEN_PERM:
243 evacuate(&((StgInd *)p)->indirectee);
244 p += sizeofW(StgInd);
249 gct->eager_promotion = rtsFalse;
250 evacuate(&((StgMutVar *)p)->var);
251 gct->eager_promotion = saved_eager_promotion;
253 if (gct->failed_to_evac) {
254 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
256 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
258 p += sizeofW(StgMutVar);
262 case SE_CAF_BLACKHOLE:
265 p += BLACKHOLE_sizeW();
270 StgSelector *s = (StgSelector *)p;
271 evacuate(&s->selectee);
272 p += THUNK_SELECTOR_sizeW();
276 // A chunk of stack saved in a heap object
279 StgAP_STACK *ap = (StgAP_STACK *)p;
282 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
283 p = (StgPtr)ap->payload + ap->size;
288 p = scavenge_PAP((StgPAP *)p);
292 p = scavenge_AP((StgAP *)p);
297 p += arr_words_sizeW((StgArrWords *)p);
300 case MUT_ARR_PTRS_CLEAN:
301 case MUT_ARR_PTRS_DIRTY:
306 // We don't eagerly promote objects pointed to by a mutable
307 // array, but if we find the array only points to objects in
308 // the same or an older generation, we mark it "clean" and
309 // avoid traversing it during minor GCs.
310 gct->eager_promotion = rtsFalse;
311 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
312 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
313 evacuate((StgClosure **)p);
315 gct->eager_promotion = saved_eager_promotion;
317 if (gct->failed_to_evac) {
318 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
320 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
323 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
327 case MUT_ARR_PTRS_FROZEN:
328 case MUT_ARR_PTRS_FROZEN0:
333 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
334 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
335 evacuate((StgClosure **)p);
338 // If we're going to put this object on the mutable list, then
339 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
340 if (gct->failed_to_evac) {
341 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
343 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
350 StgTSO *tso = (StgTSO *)p;
352 gct->eager_promotion = rtsFalse;
354 gct->eager_promotion = saved_eager_promotion;
356 if (gct->failed_to_evac) {
357 tso->flags |= TSO_DIRTY;
359 tso->flags &= ~TSO_DIRTY;
362 gct->failed_to_evac = rtsTrue; // always on the mutable list
367 case TVAR_WATCH_QUEUE:
369 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
371 evacuate((StgClosure **)&wq->closure);
372 evacuate((StgClosure **)&wq->next_queue_entry);
373 evacuate((StgClosure **)&wq->prev_queue_entry);
374 gct->evac_step = saved_evac_step;
375 gct->failed_to_evac = rtsTrue; // mutable
376 p += sizeofW(StgTVarWatchQueue);
382 StgTVar *tvar = ((StgTVar *) p);
384 evacuate((StgClosure **)&tvar->current_value);
385 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
386 gct->evac_step = saved_evac_step;
387 gct->failed_to_evac = rtsTrue; // mutable
388 p += sizeofW(StgTVar);
394 StgTRecHeader *trec = ((StgTRecHeader *) p);
396 evacuate((StgClosure **)&trec->enclosing_trec);
397 evacuate((StgClosure **)&trec->current_chunk);
398 evacuate((StgClosure **)&trec->invariants_to_check);
399 gct->evac_step = saved_evac_step;
400 gct->failed_to_evac = rtsTrue; // mutable
401 p += sizeofW(StgTRecHeader);
408 StgTRecChunk *tc = ((StgTRecChunk *) p);
409 TRecEntry *e = &(tc -> entries[0]);
411 evacuate((StgClosure **)&tc->prev_chunk);
412 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
413 evacuate((StgClosure **)&e->tvar);
414 evacuate((StgClosure **)&e->expected_value);
415 evacuate((StgClosure **)&e->new_value);
417 gct->evac_step = saved_evac_step;
418 gct->failed_to_evac = rtsTrue; // mutable
419 p += sizeofW(StgTRecChunk);
423 case ATOMIC_INVARIANT:
425 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
427 evacuate(&invariant->code);
428 evacuate((StgClosure **)&invariant->last_execution);
429 gct->evac_step = saved_evac_step;
430 gct->failed_to_evac = rtsTrue; // mutable
431 p += sizeofW(StgAtomicInvariant);
435 case INVARIANT_CHECK_QUEUE:
437 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
439 evacuate((StgClosure **)&queue->invariant);
440 evacuate((StgClosure **)&queue->my_execution);
441 evacuate((StgClosure **)&queue->next_queue_entry);
442 gct->evac_step = saved_evac_step;
443 gct->failed_to_evac = rtsTrue; // mutable
444 p += sizeofW(StgInvariantCheckQueue);
449 barf("scavenge: unimplemented/strange closure type %d @ %p",
454 * We need to record the current object on the mutable list if
455 * (a) It is actually mutable, or
456 * (b) It contains pointers to a younger generation.
457 * Case (b) arises if we didn't manage to promote everything that
458 * the current object points to into the current generation.
460 if (gct->failed_to_evac) {
461 gct->failed_to_evac = rtsFalse;
462 if (bd->gen_no > 0) {
463 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
472 debugTrace(DEBUG_gc, " scavenged %ld bytes",
473 (unsigned long)((bd->free - scan) * sizeof(W_)));