1 /* -----------------------------------------------------------------------*-c-*-
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 // This file is #included into Scav.c, twice: firstly with MINOR_GC
15 // defined, the second time without.
18 #define scavenge_block(a,b) scavenge_block0(a,b)
19 #define evacuate(a) evacuate0(a)
23 #undef recordMutableGen_GC
26 static void scavenge_block (bdescr *bd, StgPtr scan);
28 /* -----------------------------------------------------------------------------
29 Scavenge a block from the given scan pointer up to bd->free.
31 evac_step is set by the caller to be either zero (for a step in a
32 generation < N) or G where G is the generation of the step being
35 We sometimes temporarily change evac_step back to zero if we're
36 scavenging a mutable object where eager promotion isn't such a good
38 -------------------------------------------------------------------------- */
41 scavenge_block (bdescr *bd, StgPtr scan)
45 step *saved_evac_step;
46 rtsBool saved_eager_promotion;
51 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
52 bd->start, bd->gen_no, bd->step->no, scan);
54 gct->evac_step = bd->step;
55 saved_evac_step = gct->evac_step;
56 saved_eager_promotion = gct->eager_promotion;
57 gct->failed_to_evac = rtsFalse;
59 ws = &gct->steps[bd->step->abs_no];
61 // we might be evacuating into the very object that we're
62 // scavenging, so we have to check the real bd->free pointer each
63 // time around the loop.
64 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
66 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
67 info = get_itbl((StgClosure *)p);
69 ASSERT(gct->thunk_selector_depth == 0);
77 StgMVar *mvar = ((StgMVar *)p);
78 gct->eager_promotion = rtsFalse;
79 evacuate((StgClosure **)&mvar->head);
80 evacuate((StgClosure **)&mvar->tail);
81 evacuate((StgClosure **)&mvar->value);
82 gct->eager_promotion = saved_eager_promotion;
84 if (gct->failed_to_evac) {
85 mvar->header.info = &stg_MVAR_DIRTY_info;
87 mvar->header.info = &stg_MVAR_CLEAN_info;
89 p += sizeofW(StgMVar);
95 scavenge_fun_srt(info);
97 evacuate(&((StgClosure *)p)->payload[1]);
98 evacuate(&((StgClosure *)p)->payload[0]);
99 p += sizeofW(StgHeader) + 2;
104 scavenge_thunk_srt(info);
106 evacuate(&((StgThunk *)p)->payload[1]);
107 evacuate(&((StgThunk *)p)->payload[0]);
108 p += sizeofW(StgThunk) + 2;
112 evacuate(&((StgClosure *)p)->payload[1]);
113 evacuate(&((StgClosure *)p)->payload[0]);
114 p += sizeofW(StgHeader) + 2;
119 scavenge_thunk_srt(info);
121 evacuate(&((StgThunk *)p)->payload[0]);
122 p += sizeofW(StgThunk) + 1;
127 scavenge_fun_srt(info);
130 evacuate(&((StgClosure *)p)->payload[0]);
131 p += sizeofW(StgHeader) + 1;
136 scavenge_thunk_srt(info);
138 p += sizeofW(StgThunk) + 1;
143 scavenge_fun_srt(info);
146 p += sizeofW(StgHeader) + 1;
151 scavenge_thunk_srt(info);
153 p += sizeofW(StgThunk) + 2;
158 scavenge_fun_srt(info);
161 p += sizeofW(StgHeader) + 2;
166 scavenge_thunk_srt(info);
168 evacuate(&((StgThunk *)p)->payload[0]);
169 p += sizeofW(StgThunk) + 2;
174 scavenge_fun_srt(info);
177 evacuate(&((StgClosure *)p)->payload[0]);
178 p += sizeofW(StgHeader) + 2;
183 scavenge_fun_srt(info);
192 scavenge_thunk_srt(info);
194 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
195 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
196 evacuate((StgClosure **)p);
198 p += info->layout.payload.nptrs;
209 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
210 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
211 evacuate((StgClosure **)p);
213 p += info->layout.payload.nptrs;
218 StgBCO *bco = (StgBCO *)p;
219 evacuate((StgClosure **)&bco->instrs);
220 evacuate((StgClosure **)&bco->literals);
221 evacuate((StgClosure **)&bco->ptrs);
227 if (bd->gen_no != 0) {
230 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
231 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
232 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
235 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
237 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
239 // We pretend that p has just been created.
240 LDV_RECORD_CREATE((StgClosure *)p);
243 case IND_OLDGEN_PERM:
244 evacuate(&((StgInd *)p)->indirectee);
245 p += sizeofW(StgInd);
250 gct->eager_promotion = rtsFalse;
251 evacuate(&((StgMutVar *)p)->var);
252 gct->eager_promotion = saved_eager_promotion;
254 if (gct->failed_to_evac) {
255 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
257 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
259 p += sizeofW(StgMutVar);
263 case SE_CAF_BLACKHOLE:
266 p += BLACKHOLE_sizeW();
271 StgSelector *s = (StgSelector *)p;
272 evacuate(&s->selectee);
273 p += THUNK_SELECTOR_sizeW();
277 // A chunk of stack saved in a heap object
280 StgAP_STACK *ap = (StgAP_STACK *)p;
283 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
284 p = (StgPtr)ap->payload + ap->size;
289 p = scavenge_PAP((StgPAP *)p);
293 p = scavenge_AP((StgAP *)p);
298 p += arr_words_sizeW((StgArrWords *)p);
301 case MUT_ARR_PTRS_CLEAN:
302 case MUT_ARR_PTRS_DIRTY:
307 // We don't eagerly promote objects pointed to by a mutable
308 // array, but if we find the array only points to objects in
309 // the same or an older generation, we mark it "clean" and
310 // avoid traversing it during minor GCs.
311 gct->eager_promotion = rtsFalse;
312 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
313 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
314 evacuate((StgClosure **)p);
316 gct->eager_promotion = saved_eager_promotion;
318 if (gct->failed_to_evac) {
319 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
321 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
324 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
328 case MUT_ARR_PTRS_FROZEN:
329 case MUT_ARR_PTRS_FROZEN0:
334 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
335 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
336 evacuate((StgClosure **)p);
339 // If we're going to put this object on the mutable list, then
340 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
341 if (gct->failed_to_evac) {
342 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
344 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
351 StgTSO *tso = (StgTSO *)p;
353 gct->eager_promotion = rtsFalse;
355 gct->eager_promotion = saved_eager_promotion;
357 if (gct->failed_to_evac) {
358 tso->flags |= TSO_DIRTY;
360 tso->flags &= ~TSO_DIRTY;
363 gct->failed_to_evac = rtsTrue; // always on the mutable list
368 case TVAR_WATCH_QUEUE:
370 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
372 evacuate((StgClosure **)&wq->closure);
373 evacuate((StgClosure **)&wq->next_queue_entry);
374 evacuate((StgClosure **)&wq->prev_queue_entry);
375 gct->evac_step = saved_evac_step;
376 gct->failed_to_evac = rtsTrue; // mutable
377 p += sizeofW(StgTVarWatchQueue);
383 StgTVar *tvar = ((StgTVar *) p);
385 evacuate((StgClosure **)&tvar->current_value);
386 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
387 gct->evac_step = saved_evac_step;
388 gct->failed_to_evac = rtsTrue; // mutable
389 p += sizeofW(StgTVar);
395 StgTRecHeader *trec = ((StgTRecHeader *) p);
397 evacuate((StgClosure **)&trec->enclosing_trec);
398 evacuate((StgClosure **)&trec->current_chunk);
399 evacuate((StgClosure **)&trec->invariants_to_check);
400 gct->evac_step = saved_evac_step;
401 gct->failed_to_evac = rtsTrue; // mutable
402 p += sizeofW(StgTRecHeader);
409 StgTRecChunk *tc = ((StgTRecChunk *) p);
410 TRecEntry *e = &(tc -> entries[0]);
412 evacuate((StgClosure **)&tc->prev_chunk);
413 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
414 evacuate((StgClosure **)&e->tvar);
415 evacuate((StgClosure **)&e->expected_value);
416 evacuate((StgClosure **)&e->new_value);
418 gct->evac_step = saved_evac_step;
419 gct->failed_to_evac = rtsTrue; // mutable
420 p += sizeofW(StgTRecChunk);
424 case ATOMIC_INVARIANT:
426 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
428 evacuate(&invariant->code);
429 evacuate((StgClosure **)&invariant->last_execution);
430 gct->evac_step = saved_evac_step;
431 gct->failed_to_evac = rtsTrue; // mutable
432 p += sizeofW(StgAtomicInvariant);
436 case INVARIANT_CHECK_QUEUE:
438 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
440 evacuate((StgClosure **)&queue->invariant);
441 evacuate((StgClosure **)&queue->my_execution);
442 evacuate((StgClosure **)&queue->next_queue_entry);
443 gct->evac_step = saved_evac_step;
444 gct->failed_to_evac = rtsTrue; // mutable
445 p += sizeofW(StgInvariantCheckQueue);
450 barf("scavenge: unimplemented/strange closure type %d @ %p",
455 * We need to record the current object on the mutable list if
456 * (a) It is actually mutable, or
457 * (b) It contains pointers to a younger generation.
458 * Case (b) arises if we didn't manage to promote everything that
459 * the current object points to into the current generation.
461 if (gct->failed_to_evac) {
462 gct->failed_to_evac = rtsFalse;
463 if (bd->gen_no > 0) {
464 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
473 debugTrace(DEBUG_gc, " scavenged %ld bytes",
474 (unsigned long)((bd->free - scan) * sizeof(W_)));