1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: evacuation functions
7 * ---------------------------------------------------------------------------*/
9 // We have two versions of evacuate(): one for minor GC, and one for
10 // non-minor, parallel, GC. This file contains the code for both,
11 // controllled by the CPP symbol MINOR_GC.
14 #define copy(a,b,c,d) copy0(a,b,c,d)
15 #define copy_tag(a,b,c,d,e) copy_tag0(a,b,c,d,e)
16 #define copyPart(a,b,c,d,e) copyPart0(a,b,c,d,e)
17 #define evacuate(a) evacuate0(a)
26 copy_tag(StgClosure **p, StgClosure *src, nat size, step *stp, StgWord tag)
28 StgPtr to, tagged_to, from;
32 #if !defined(MINOR_GC) && defined(THREADED_RTS)
34 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
36 } while (info == (W_)&stg_WHITEHOLE_info);
37 if (info == (W_)&stg_EVACUATED_info) {
38 src->header.info = (const StgInfoTable *)info;
39 return evacuate(p); // does the failed_to_evac stuff
42 ASSERT(n_gc_threads == 1);
43 info = (W_)src->header.info;
44 src->header.info = &stg_EVACUATED_info;
47 to = alloc_for_copy(size,stp);
48 tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
49 *p = (StgClosure *)tagged_to;
51 TICK_GC_WORDS_COPIED(size);
55 for (i = 1; i < size; i++) { // unroll for small i
59 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
60 // __builtin_prefetch(to + size + 2, 1);
63 ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
64 #if !defined(MINOR_GC) && defined(THREADED_RTS)
66 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
70 // We store the size of the just evacuated object in the LDV word so that
71 // the profiler can guess the position of the next object later.
72 SET_EVACUAEE_FOR_LDV(from, size);
77 /* Special version of copy() for when we only want to copy the info
78 * pointer of an object, but reserve some padding after it. This is
79 * used to optimise evacuation of BLACKHOLEs.
82 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
88 #if !defined(MINOR_GC) && defined(THREADED_RTS)
90 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
91 } while (info == (W_)&stg_WHITEHOLE_info);
92 if (info == (W_)&stg_EVACUATED_info) {
93 src->header.info = (const StgInfoTable *)info;
94 return evacuate(p); // does the failed_to_evac stuff
97 info = (W_)src->header.info;
98 src->header.info = &stg_EVACUATED_info;
101 to = alloc_for_copy(size_to_reserve, stp);
102 *p = (StgClosure *)to;
104 TICK_GC_WORDS_COPIED(size_to_copy);
108 for (i = 1; i < size_to_copy; i++) { // unroll for small i
112 ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
113 #if !defined(MINOR_GC) && defined(THREADED_RTS)
115 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
119 // We store the size of the just evacuated object in the LDV word so that
120 // the profiler can guess the position of the next object later.
121 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
123 if (size_to_reserve - size_to_copy > 0)
124 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
129 /* Copy wrappers that don't tag the closure after copying */
131 copy(StgClosure **p, StgClosure *src, nat size, step *stp)
133 copy_tag(p,src,size,stp,0);
136 /* ----------------------------------------------------------------------------
139 This is called (eventually) for every live object in the system.
141 The caller to evacuate specifies a desired generation in the
142 gct->evac_step thread-local variable. The following conditions apply to
143 evacuating an object which resides in generation M when we're
144 collecting up to generation N
146 if M >= gct->evac_step
148 else evac to step->to
150 if M < gct->evac_step evac to gct->evac_step, step 0
152 if the object is already evacuated, then we check which generation
155 if M >= gct->evac_step do nothing
156 if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
157 didn't manage to evacuate this object into gct->evac_step.
162 evacuate() is the single most important function performance-wise
163 in the GC. Various things have been tried to speed it up, but as
164 far as I can tell the code generated by gcc 3.2 with -O2 is about
165 as good as it's going to get. We pass the argument to evacuate()
166 in a register using the 'regparm' attribute (see the prototype for
167 evacuate() near the top of this file).
169 Changing evacuate() to take an (StgClosure **) rather than
170 returning the new pointer seems attractive, because we can avoid
171 writing back the pointer when it hasn't changed (eg. for a static
172 object, or an object in a generation > N). However, I tried it and
173 it doesn't help. One reason is that the (StgClosure **) pointer
174 gets spilled to the stack inside evacuate(), resulting in far more
175 extra reads/writes than we save.
176 ------------------------------------------------------------------------- */
179 evacuate(StgClosure **p)
184 const StgInfoTable *info;
190 /* The tag and the pointer are split, to be merged after evacing */
191 tag = GET_CLOSURE_TAG(q);
192 q = UNTAG_CLOSURE(q);
194 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
196 if (!HEAP_ALLOCED(q)) {
201 if (!major_gc) return;
204 switch (info->type) {
207 if (info->srt_bitmap != 0 &&
208 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
209 ACQUIRE_SPIN_LOCK(&static_objects_sync);
210 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
211 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
212 static_objects = (StgClosure *)q;
214 RELEASE_SPIN_LOCK(&static_objects_sync);
219 if (info->srt_bitmap != 0 &&
220 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
221 ACQUIRE_SPIN_LOCK(&static_objects_sync);
222 if (*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
223 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
224 static_objects = (StgClosure *)q;
226 RELEASE_SPIN_LOCK(&static_objects_sync);
231 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
232 * on the CAF list, so don't do anything with it here (we'll
233 * scavenge it later).
235 if (((StgIndStatic *)q)->saved_info == NULL) {
236 ACQUIRE_SPIN_LOCK(&static_objects_sync);
237 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
238 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
239 static_objects = (StgClosure *)q;
241 RELEASE_SPIN_LOCK(&static_objects_sync);
246 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
247 ACQUIRE_SPIN_LOCK(&static_objects_sync);
248 // re-test, after acquiring lock
249 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
250 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
251 static_objects = (StgClosure *)q;
253 RELEASE_SPIN_LOCK(&static_objects_sync);
254 /* I am assuming that static_objects pointers are not
255 * written to other objects, and thus, no need to retag. */
259 case CONSTR_NOCAF_STATIC:
260 /* no need to put these on the static linked list, they don't need
266 barf("evacuate(static): strange closure type %d", (int)(info->type));
272 if (bd->gen_no > N) {
273 /* Can't evacuate this object, because it's in a generation
274 * older than the ones we're collecting. Let's hope that it's
275 * in gct->evac_step or older, or we will have to arrange to track
276 * this pointer using the mutable list.
278 if (bd->step < gct->evac_step) {
280 gct->failed_to_evac = rtsTrue;
281 TICK_GC_FAILED_PROMOTION();
286 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
288 /* pointer into to-space: just return it. This normally
289 * shouldn't happen, but alllowing it makes certain things
290 * slightly easier (eg. the mutable list can contain the same
291 * object twice, for example).
293 if (bd->flags & BF_EVACUATED) {
294 if (bd->step < gct->evac_step) {
295 gct->failed_to_evac = rtsTrue;
296 TICK_GC_FAILED_PROMOTION();
301 /* evacuate large objects by re-linking them onto a different list.
303 if (bd->flags & BF_LARGE) {
305 if (info->type == TSO &&
306 ((StgTSO *)q)->what_next == ThreadRelocated) {
307 q = (StgClosure *)((StgTSO *)q)->link;
311 evacuate_large((P_)q);
315 /* If the object is in a step that we're compacting, then we
316 * need to use an alternative evacuate procedure.
318 if (bd->flags & BF_COMPACTED) {
319 if (!is_marked((P_)q,bd)) {
321 if (mark_stack_full()) {
322 mark_stack_overflowed = rtsTrue;
325 push_mark_stack((P_)q);
335 switch (info->type) {
344 copy(p,q,sizeW_fromITBL(info),stp);
349 StgWord w = (StgWord)q->payload[0];
350 if (q->header.info == Czh_con_info &&
351 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
352 (StgChar)w <= MAX_CHARLIKE) {
353 *p = TAG_CLOSURE(tag,
354 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
357 else if (q->header.info == Izh_con_info &&
358 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
359 *p = TAG_CLOSURE(tag,
360 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
364 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
372 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
377 copy(p,q,sizeofW(StgThunk)+1,stp);
383 #ifdef NO_PROMOTE_THUNKS
384 if (bd->gen_no == 0 &&
386 bd->step->no == generations[bd->gen_no].n_steps-1) {
390 copy(p,q,sizeofW(StgThunk)+2,stp);
398 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
402 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
406 copy(p,q,thunk_sizeW_fromITBL(info),stp);
411 case IND_OLDGEN_PERM:
415 copy_tag(p,q,sizeW_fromITBL(info),stp,tag);
419 copy(p,q,bco_sizeW((StgBCO *)q),stp);
423 case SE_CAF_BLACKHOLE:
426 copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
430 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
435 // follow chains of indirections, don't evacuate them
436 q = ((StgInd*)q)->indirectee;
447 case CATCH_STM_FRAME:
448 case CATCH_RETRY_FRAME:
449 case ATOMICALLY_FRAME:
450 // shouldn't see these
451 barf("evacuate: stack frame at %p\n", q);
454 copy(p,q,pap_sizeW((StgPAP*)q),stp);
458 copy(p,q,ap_sizeW((StgAP*)q),stp);
462 copy(p,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
466 /* Already evacuated, just return the forwarding address.
467 * HOWEVER: if the requested destination generation (gct->evac_step) is
468 * older than the actual generation (because the object was
469 * already evacuated to a younger generation) then we have to
470 * set the gct->failed_to_evac flag to indicate that we couldn't
471 * manage to promote the object to the desired generation.
474 * Optimisation: the check is fairly expensive, but we can often
475 * shortcut it if either the required generation is 0, or the
476 * current object (the EVACUATED) is in a high enough generation.
477 * We know that an EVACUATED always points to an object in the
478 * same or an older generation. stp is the lowest step that the
479 * current object would be evacuated to, so we only do the full
480 * check if stp is too low.
483 StgClosure *e = ((StgEvacuated*)q)->evacuee;
485 if (stp < gct->evac_step) { // optimisation
486 if (Bdescr((P_)e)->step < gct->evac_step) {
487 gct->failed_to_evac = rtsTrue;
488 TICK_GC_FAILED_PROMOTION();
495 // just copy the block
496 copy(p,q,arr_words_sizeW((StgArrWords *)q),stp);
499 case MUT_ARR_PTRS_CLEAN:
500 case MUT_ARR_PTRS_DIRTY:
501 case MUT_ARR_PTRS_FROZEN:
502 case MUT_ARR_PTRS_FROZEN0:
503 // just copy the block
504 copy(p,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
509 StgTSO *tso = (StgTSO *)q;
511 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
513 if (tso->what_next == ThreadRelocated) {
514 q = (StgClosure *)tso->link;
519 /* To evacuate a small TSO, we need to relocate the update frame
526 copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
527 new_tso = (StgTSO *)*p;
528 move_TSO(tso, new_tso);
529 for (r = tso->sp, s = new_tso->sp;
530 r < tso->stack+tso->stack_size;) {
538 copy(p,q,sizeofW(StgTRecHeader),stp);
541 case TVAR_WATCH_QUEUE:
542 copy(p,q,sizeofW(StgTVarWatchQueue),stp);
546 copy(p,q,sizeofW(StgTVar),stp);
550 copy(p,q,sizeofW(StgTRecChunk),stp);
553 case ATOMIC_INVARIANT:
554 copy(p,q,sizeofW(StgAtomicInvariant),stp);
557 case INVARIANT_CHECK_QUEUE:
558 copy(p,q,sizeofW(StgInvariantCheckQueue),stp);
562 barf("evacuate: strange closure type %d", (int)(info->type));