1 /* -----------------------------------------------------------------------*-c-*-
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 // This file is #included into Scav.c, twice: firstly with PARALLEL_GC
15 // defined, the second time without.
18 #define scavenge_block(a,b) scavenge_block1(a,b)
19 #define evacuate(a) evacuate1(a)
20 #define recordMutableGen_GC(a,b) recordMutableGen(a,b)
24 #undef recordMutableGen_GC
27 static void scavenge_block (bdescr *bd, StgPtr scan);
29 /* -----------------------------------------------------------------------------
30 Scavenge a block from the given scan pointer up to bd->free.
32 evac_step is set by the caller to be either zero (for a step in a
33 generation < N) or G where G is the generation of the step being
36 We sometimes temporarily change evac_step back to zero if we're
37 scavenging a mutable object where eager promotion isn't such a good
39 -------------------------------------------------------------------------- */
42 scavenge_block (bdescr *bd, StgPtr scan)
46 step *saved_evac_step;
47 rtsBool saved_eager_promotion;
52 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
53 bd->start, bd->gen_no, bd->step->no, scan);
55 gct->evac_step = bd->step;
56 saved_evac_step = gct->evac_step;
57 saved_eager_promotion = gct->eager_promotion;
58 gct->failed_to_evac = rtsFalse;
60 ws = &gct->steps[bd->step->abs_no];
62 // we might be evacuating into the very object that we're
63 // scavenging, so we have to check the real bd->free pointer each
64 // time around the loop.
65 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
67 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
68 info = get_itbl((StgClosure *)p);
70 ASSERT(gct->thunk_selector_depth == 0);
78 StgMVar *mvar = ((StgMVar *)p);
79 gct->eager_promotion = rtsFalse;
80 evacuate((StgClosure **)&mvar->head);
81 evacuate((StgClosure **)&mvar->tail);
82 evacuate((StgClosure **)&mvar->value);
83 gct->eager_promotion = saved_eager_promotion;
85 if (gct->failed_to_evac) {
86 mvar->header.info = &stg_MVAR_DIRTY_info;
88 mvar->header.info = &stg_MVAR_CLEAN_info;
90 p += sizeofW(StgMVar);
95 scavenge_fun_srt(info);
96 evacuate(&((StgClosure *)p)->payload[1]);
97 evacuate(&((StgClosure *)p)->payload[0]);
98 p += sizeofW(StgHeader) + 2;
102 scavenge_thunk_srt(info);
103 evacuate(&((StgThunk *)p)->payload[1]);
104 evacuate(&((StgThunk *)p)->payload[0]);
105 p += sizeofW(StgThunk) + 2;
109 evacuate(&((StgClosure *)p)->payload[1]);
110 evacuate(&((StgClosure *)p)->payload[0]);
111 p += sizeofW(StgHeader) + 2;
115 scavenge_thunk_srt(info);
116 evacuate(&((StgThunk *)p)->payload[0]);
117 p += sizeofW(StgThunk) + 1;
121 scavenge_fun_srt(info);
123 evacuate(&((StgClosure *)p)->payload[0]);
124 p += sizeofW(StgHeader) + 1;
128 scavenge_thunk_srt(info);
129 p += sizeofW(StgThunk) + 1;
133 scavenge_fun_srt(info);
135 p += sizeofW(StgHeader) + 1;
139 scavenge_thunk_srt(info);
140 p += sizeofW(StgThunk) + 2;
144 scavenge_fun_srt(info);
146 p += sizeofW(StgHeader) + 2;
150 scavenge_thunk_srt(info);
151 evacuate(&((StgThunk *)p)->payload[0]);
152 p += sizeofW(StgThunk) + 2;
156 scavenge_fun_srt(info);
158 evacuate(&((StgClosure *)p)->payload[0]);
159 p += sizeofW(StgHeader) + 2;
163 scavenge_fun_srt(info);
170 scavenge_thunk_srt(info);
171 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
172 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
173 evacuate((StgClosure **)p);
175 p += info->layout.payload.nptrs;
186 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
187 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
188 evacuate((StgClosure **)p);
190 p += info->layout.payload.nptrs;
195 StgBCO *bco = (StgBCO *)p;
196 evacuate((StgClosure **)&bco->instrs);
197 evacuate((StgClosure **)&bco->literals);
198 evacuate((StgClosure **)&bco->ptrs);
204 if (bd->gen_no != 0) {
207 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
208 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
209 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
212 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
214 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
216 // We pretend that p has just been created.
217 LDV_RECORD_CREATE((StgClosure *)p);
220 case IND_OLDGEN_PERM:
221 evacuate(&((StgInd *)p)->indirectee);
222 p += sizeofW(StgInd);
227 gct->eager_promotion = rtsFalse;
228 evacuate(&((StgMutVar *)p)->var);
229 gct->eager_promotion = saved_eager_promotion;
231 if (gct->failed_to_evac) {
232 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
234 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
236 p += sizeofW(StgMutVar);
240 case SE_CAF_BLACKHOLE:
243 p += BLACKHOLE_sizeW();
248 StgSelector *s = (StgSelector *)p;
249 evacuate(&s->selectee);
250 p += THUNK_SELECTOR_sizeW();
254 // A chunk of stack saved in a heap object
257 StgAP_STACK *ap = (StgAP_STACK *)p;
260 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
261 p = (StgPtr)ap->payload + ap->size;
266 p = scavenge_PAP((StgPAP *)p);
270 p = scavenge_AP((StgAP *)p);
275 p += arr_words_sizeW((StgArrWords *)p);
278 case MUT_ARR_PTRS_CLEAN:
279 case MUT_ARR_PTRS_DIRTY:
284 // We don't eagerly promote objects pointed to by a mutable
285 // array, but if we find the array only points to objects in
286 // the same or an older generation, we mark it "clean" and
287 // avoid traversing it during minor GCs.
288 gct->eager_promotion = rtsFalse;
289 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
290 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
291 evacuate((StgClosure **)p);
293 gct->eager_promotion = saved_eager_promotion;
295 if (gct->failed_to_evac) {
296 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
298 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
301 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
305 case MUT_ARR_PTRS_FROZEN:
306 case MUT_ARR_PTRS_FROZEN0:
311 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
312 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
313 evacuate((StgClosure **)p);
316 // If we're going to put this object on the mutable list, then
317 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
318 if (gct->failed_to_evac) {
319 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
321 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
328 StgTSO *tso = (StgTSO *)p;
330 gct->eager_promotion = rtsFalse;
332 gct->eager_promotion = saved_eager_promotion;
334 if (gct->failed_to_evac) {
335 tso->flags |= TSO_DIRTY;
337 tso->flags &= ~TSO_DIRTY;
340 gct->failed_to_evac = rtsTrue; // always on the mutable list
345 case TVAR_WATCH_QUEUE:
347 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
349 evacuate((StgClosure **)&wq->closure);
350 evacuate((StgClosure **)&wq->next_queue_entry);
351 evacuate((StgClosure **)&wq->prev_queue_entry);
352 gct->evac_step = saved_evac_step;
353 gct->failed_to_evac = rtsTrue; // mutable
354 p += sizeofW(StgTVarWatchQueue);
360 StgTVar *tvar = ((StgTVar *) p);
362 evacuate((StgClosure **)&tvar->current_value);
363 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
364 gct->evac_step = saved_evac_step;
365 gct->failed_to_evac = rtsTrue; // mutable
366 p += sizeofW(StgTVar);
372 StgTRecHeader *trec = ((StgTRecHeader *) p);
374 evacuate((StgClosure **)&trec->enclosing_trec);
375 evacuate((StgClosure **)&trec->current_chunk);
376 evacuate((StgClosure **)&trec->invariants_to_check);
377 gct->evac_step = saved_evac_step;
378 gct->failed_to_evac = rtsTrue; // mutable
379 p += sizeofW(StgTRecHeader);
386 StgTRecChunk *tc = ((StgTRecChunk *) p);
387 TRecEntry *e = &(tc -> entries[0]);
389 evacuate((StgClosure **)&tc->prev_chunk);
390 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
391 evacuate((StgClosure **)&e->tvar);
392 evacuate((StgClosure **)&e->expected_value);
393 evacuate((StgClosure **)&e->new_value);
395 gct->evac_step = saved_evac_step;
396 gct->failed_to_evac = rtsTrue; // mutable
397 p += sizeofW(StgTRecChunk);
401 case ATOMIC_INVARIANT:
403 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
405 evacuate(&invariant->code);
406 evacuate((StgClosure **)&invariant->last_execution);
407 gct->evac_step = saved_evac_step;
408 gct->failed_to_evac = rtsTrue; // mutable
409 p += sizeofW(StgAtomicInvariant);
413 case INVARIANT_CHECK_QUEUE:
415 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
417 evacuate((StgClosure **)&queue->invariant);
418 evacuate((StgClosure **)&queue->my_execution);
419 evacuate((StgClosure **)&queue->next_queue_entry);
420 gct->evac_step = saved_evac_step;
421 gct->failed_to_evac = rtsTrue; // mutable
422 p += sizeofW(StgInvariantCheckQueue);
427 barf("scavenge: unimplemented/strange closure type %d @ %p",
432 * We need to record the current object on the mutable list if
433 * (a) It is actually mutable, or
434 * (b) It contains pointers to a younger generation.
435 * Case (b) arises if we didn't manage to promote everything that
436 * the current object points to into the current generation.
438 if (gct->failed_to_evac) {
439 gct->failed_to_evac = rtsFalse;
440 if (bd->gen_no > 0) {
441 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
450 debugTrace(DEBUG_gc, " scavenged %ld bytes",
451 (unsigned long)((bd->free - scan) * sizeof(W_)));
454 #undef scavenge_block
456 #undef recordMutableGen_GC