1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: evacuation functions
7 * ---------------------------------------------------------------------------*/
9 // We have two versions of evacuate(): one for minor GC, and one for
10 // non-minor, parallel, GC. This file contains the code for both,
11 // controllled by the CPP symbol MINOR_GC.
14 #define copy(a,b,c,d) copy0(a,b,c,d)
15 #define copy_tag(a,b,c,d,e) copy_tag0(a,b,c,d,e)
16 #define copyPart(a,b,c,d,e) copyPart0(a,b,c,d,e)
17 #define evacuate(a) evacuate0(a)
26 copy_tag(StgClosure **p, StgClosure *src, nat size, step *stp, StgWord tag)
28 StgPtr to, tagged_to, from;
32 #if !defined(MINOR_GC) && defined(THREADED_RTS)
34 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
36 } while (info == (W_)&stg_WHITEHOLE_info);
37 if (info == (W_)&stg_EVACUATED_info) {
38 src->header.info = (const StgInfoTable *)info;
39 return evacuate(p); // does the failed_to_evac stuff
42 info = (W_)src->header.info;
43 src->header.info = &stg_EVACUATED_info;
46 to = alloc_for_copy(size,stp);
47 tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
48 *p = (StgClosure *)tagged_to;
50 TICK_GC_WORDS_COPIED(size);
54 for (i = 1; i < size; i++) { // unroll for small i
58 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
59 // __builtin_prefetch(to + size + 2, 1);
62 ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
63 #if !defined(MINOR_GC) && defined(THREADED_RTS)
65 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
69 // We store the size of the just evacuated object in the LDV word so that
70 // the profiler can guess the position of the next object later.
71 SET_EVACUAEE_FOR_LDV(from, size);
76 /* Special version of copy() for when we only want to copy the info
77 * pointer of an object, but reserve some padding after it. This is
78 * used to optimise evacuation of BLACKHOLEs.
81 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
87 #if !defined(MINOR_GC) && defined(THREADED_RTS)
89 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
90 } while (info == (W_)&stg_WHITEHOLE_info);
91 if (info == (W_)&stg_EVACUATED_info) {
92 src->header.info = (const StgInfoTable *)info;
93 return evacuate(p); // does the failed_to_evac stuff
96 info = (W_)src->header.info;
97 src->header.info = &stg_EVACUATED_info;
100 to = alloc_for_copy(size_to_reserve, stp);
101 *p = (StgClosure *)to;
103 TICK_GC_WORDS_COPIED(size_to_copy);
107 for (i = 1; i < size_to_copy; i++) { // unroll for small i
111 ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
112 #if !defined(MINOR_GC) && defined(THREADED_RTS)
114 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
118 // We store the size of the just evacuated object in the LDV word so that
119 // the profiler can guess the position of the next object later.
120 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
122 if (size_to_reserve - size_to_copy > 0)
123 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
128 /* Copy wrappers that don't tag the closure after copying */
130 copy(StgClosure **p, StgClosure *src, nat size, step *stp)
132 copy_tag(p,src,size,stp,0);
135 /* ----------------------------------------------------------------------------
138 This is called (eventually) for every live object in the system.
140 The caller to evacuate specifies a desired generation in the
141 gct->evac_step thread-local variable. The following conditions apply to
142 evacuating an object which resides in generation M when we're
143 collecting up to generation N
145 if M >= gct->evac_step
147 else evac to step->to
149 if M < gct->evac_step evac to gct->evac_step, step 0
151 if the object is already evacuated, then we check which generation
154 if M >= gct->evac_step do nothing
155 if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
156 didn't manage to evacuate this object into gct->evac_step.
161 evacuate() is the single most important function performance-wise
162 in the GC. Various things have been tried to speed it up, but as
163 far as I can tell the code generated by gcc 3.2 with -O2 is about
164 as good as it's going to get. We pass the argument to evacuate()
165 in a register using the 'regparm' attribute (see the prototype for
166 evacuate() near the top of this file).
168 Changing evacuate() to take an (StgClosure **) rather than
169 returning the new pointer seems attractive, because we can avoid
170 writing back the pointer when it hasn't changed (eg. for a static
171 object, or an object in a generation > N). However, I tried it and
172 it doesn't help. One reason is that the (StgClosure **) pointer
173 gets spilled to the stack inside evacuate(), resulting in far more
174 extra reads/writes than we save.
175 ------------------------------------------------------------------------- */
178 evacuate(StgClosure **p)
183 const StgInfoTable *info;
189 /* The tag and the pointer are split, to be merged after evacing */
190 tag = GET_CLOSURE_TAG(q);
191 q = UNTAG_CLOSURE(q);
193 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
195 if (!HEAP_ALLOCED(q)) {
200 if (!major_gc) return;
203 switch (info->type) {
206 if (info->srt_bitmap != 0 &&
207 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
208 ACQUIRE_SPIN_LOCK(&static_objects_sync);
209 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
210 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
211 static_objects = (StgClosure *)q;
213 RELEASE_SPIN_LOCK(&static_objects_sync);
218 if (info->srt_bitmap != 0 &&
219 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
220 ACQUIRE_SPIN_LOCK(&static_objects_sync);
221 if (*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
222 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
223 static_objects = (StgClosure *)q;
225 RELEASE_SPIN_LOCK(&static_objects_sync);
230 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
231 * on the CAF list, so don't do anything with it here (we'll
232 * scavenge it later).
234 if (((StgIndStatic *)q)->saved_info == NULL) {
235 ACQUIRE_SPIN_LOCK(&static_objects_sync);
236 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
237 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
238 static_objects = (StgClosure *)q;
240 RELEASE_SPIN_LOCK(&static_objects_sync);
245 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
246 ACQUIRE_SPIN_LOCK(&static_objects_sync);
247 // re-test, after acquiring lock
248 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
249 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
250 static_objects = (StgClosure *)q;
252 RELEASE_SPIN_LOCK(&static_objects_sync);
253 /* I am assuming that static_objects pointers are not
254 * written to other objects, and thus, no need to retag. */
258 case CONSTR_NOCAF_STATIC:
259 /* no need to put these on the static linked list, they don't need
265 barf("evacuate(static): strange closure type %d", (int)(info->type));
271 if (bd->gen_no > N) {
272 /* Can't evacuate this object, because it's in a generation
273 * older than the ones we're collecting. Let's hope that it's
274 * in gct->evac_step or older, or we will have to arrange to track
275 * this pointer using the mutable list.
277 if (bd->step < gct->evac_step) {
279 gct->failed_to_evac = rtsTrue;
280 TICK_GC_FAILED_PROMOTION();
285 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
287 /* pointer into to-space: just return it. This normally
288 * shouldn't happen, but alllowing it makes certain things
289 * slightly easier (eg. the mutable list can contain the same
290 * object twice, for example).
292 if (bd->flags & BF_EVACUATED) {
293 if (bd->step < gct->evac_step) {
294 gct->failed_to_evac = rtsTrue;
295 TICK_GC_FAILED_PROMOTION();
300 /* evacuate large objects by re-linking them onto a different list.
302 if (bd->flags & BF_LARGE) {
304 if (info->type == TSO &&
305 ((StgTSO *)q)->what_next == ThreadRelocated) {
306 q = (StgClosure *)((StgTSO *)q)->link;
310 evacuate_large((P_)q);
314 /* If the object is in a step that we're compacting, then we
315 * need to use an alternative evacuate procedure.
317 if (bd->flags & BF_COMPACTED) {
318 if (!is_marked((P_)q,bd)) {
320 if (mark_stack_full()) {
321 mark_stack_overflowed = rtsTrue;
324 push_mark_stack((P_)q);
334 switch (info->type) {
343 copy(p,q,sizeW_fromITBL(info),stp);
348 StgWord w = (StgWord)q->payload[0];
349 if (q->header.info == Czh_con_info &&
350 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
351 (StgChar)w <= MAX_CHARLIKE) {
352 *p = TAG_CLOSURE(tag,
353 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
356 else if (q->header.info == Izh_con_info &&
357 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
358 *p = TAG_CLOSURE(tag,
359 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
363 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
371 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
376 copy(p,q,sizeofW(StgThunk)+1,stp);
382 #ifdef NO_PROMOTE_THUNKS
383 if (bd->gen_no == 0 &&
385 bd->step->no == generations[bd->gen_no].n_steps-1) {
389 copy(p,q,sizeofW(StgThunk)+2,stp);
397 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
401 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
405 copy(p,q,thunk_sizeW_fromITBL(info),stp);
410 case IND_OLDGEN_PERM:
414 copy_tag(p,q,sizeW_fromITBL(info),stp,tag);
418 copy(p,q,bco_sizeW((StgBCO *)q),stp);
422 case SE_CAF_BLACKHOLE:
425 copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
429 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
434 // follow chains of indirections, don't evacuate them
435 q = ((StgInd*)q)->indirectee;
446 case CATCH_STM_FRAME:
447 case CATCH_RETRY_FRAME:
448 case ATOMICALLY_FRAME:
449 // shouldn't see these
450 barf("evacuate: stack frame at %p\n", q);
453 copy(p,q,pap_sizeW((StgPAP*)q),stp);
457 copy(p,q,ap_sizeW((StgAP*)q),stp);
461 copy(p,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
465 /* Already evacuated, just return the forwarding address.
466 * HOWEVER: if the requested destination generation (gct->evac_step) is
467 * older than the actual generation (because the object was
468 * already evacuated to a younger generation) then we have to
469 * set the gct->failed_to_evac flag to indicate that we couldn't
470 * manage to promote the object to the desired generation.
473 * Optimisation: the check is fairly expensive, but we can often
474 * shortcut it if either the required generation is 0, or the
475 * current object (the EVACUATED) is in a high enough generation.
476 * We know that an EVACUATED always points to an object in the
477 * same or an older generation. stp is the lowest step that the
478 * current object would be evacuated to, so we only do the full
479 * check if stp is too low.
482 StgClosure *e = ((StgEvacuated*)q)->evacuee;
484 if (stp < gct->evac_step) { // optimisation
485 if (Bdescr((P_)e)->step < gct->evac_step) {
486 gct->failed_to_evac = rtsTrue;
487 TICK_GC_FAILED_PROMOTION();
494 // just copy the block
495 copy(p,q,arr_words_sizeW((StgArrWords *)q),stp);
498 case MUT_ARR_PTRS_CLEAN:
499 case MUT_ARR_PTRS_DIRTY:
500 case MUT_ARR_PTRS_FROZEN:
501 case MUT_ARR_PTRS_FROZEN0:
502 // just copy the block
503 copy(p,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
508 StgTSO *tso = (StgTSO *)q;
510 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
512 if (tso->what_next == ThreadRelocated) {
513 q = (StgClosure *)tso->link;
518 /* To evacuate a small TSO, we need to relocate the update frame
525 copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
526 new_tso = (StgTSO *)*p;
527 move_TSO(tso, new_tso);
528 for (r = tso->sp, s = new_tso->sp;
529 r < tso->stack+tso->stack_size;) {
537 copy(p,q,sizeofW(StgTRecHeader),stp);
540 case TVAR_WATCH_QUEUE:
541 copy(p,q,sizeofW(StgTVarWatchQueue),stp);
545 copy(p,q,sizeofW(StgTVar),stp);
549 copy(p,q,sizeofW(StgTRecChunk),stp);
552 case ATOMIC_INVARIANT:
553 copy(p,q,sizeofW(StgAtomicInvariant),stp);
556 case INVARIANT_CHECK_QUEUE:
557 copy(p,q,sizeofW(StgInvariantCheckQueue),stp);
561 barf("evacuate: strange closure type %d", (int)(info->type));