1 /* -----------------------------------------------------------------------*-c-*-
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: evacuation functions
7 * ---------------------------------------------------------------------------*/
9 // We have two versions of evacuate(): one for minor GC, and one for
10 // non-minor, parallel, GC. This file contains the code for both,
11 // controllled by the CPP symbol MINOR_GC.
14 #define copy(a,b,c,d) copy0(a,b,c,d)
15 #define copy_tag(a,b,c,d,e) copy_tag0(a,b,c,d,e)
16 #define copyPart(a,b,c,d,e) copyPart0(a,b,c,d,e)
17 #define evacuate(a) evacuate0(a)
26 copy_tag(StgClosure **p, StgClosure *src, nat size, step *stp, StgWord tag)
28 StgPtr to, tagged_to, from;
32 #if !defined(MINOR_GC) && defined(THREADED_RTS)
34 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
36 if (info == (W_)&stg_WHITEHOLE_info) {
42 if (info == (W_)&stg_EVACUATED_info || info == (W_)&stg_IND_info) {
43 // NB. a closure might be updated with an IND by
44 // unchain_selector_thunks(), hence the test above.
45 src->header.info = (const StgInfoTable *)info;
46 return evacuate(p); // does the failed_to_evac stuff
49 ASSERT(n_gc_threads == 1);
50 info = (W_)src->header.info;
51 src->header.info = &stg_EVACUATED_info;
54 to = alloc_for_copy(size,stp);
55 tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
56 *p = (StgClosure *)tagged_to;
58 TICK_GC_WORDS_COPIED(size);
62 for (i = 1; i < size; i++) { // unroll for small i
66 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
67 // __builtin_prefetch(to + size + 2, 1);
70 ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
71 #if !defined(MINOR_GC) && defined(THREADED_RTS)
73 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
77 // We store the size of the just evacuated object in the LDV word so that
78 // the profiler can guess the position of the next object later.
79 SET_EVACUAEE_FOR_LDV(from, size);
84 /* Special version of copy() for when we only want to copy the info
85 * pointer of an object, but reserve some padding after it. This is
86 * used to optimise evacuation of BLACKHOLEs.
89 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
95 #if !defined(MINOR_GC) && defined(THREADED_RTS)
97 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
98 if (info == (W_)&stg_WHITEHOLE_info) {
104 if (info == (W_)&stg_EVACUATED_info) {
105 src->header.info = (const StgInfoTable *)info;
106 evacuate(p); // does the failed_to_evac stuff
110 info = (W_)src->header.info;
111 src->header.info = &stg_EVACUATED_info;
114 to = alloc_for_copy(size_to_reserve, stp);
115 *p = (StgClosure *)to;
117 TICK_GC_WORDS_COPIED(size_to_copy);
121 for (i = 1; i < size_to_copy; i++) { // unroll for small i
125 ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
126 #if !defined(MINOR_GC) && defined(THREADED_RTS)
128 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
132 // We store the size of the just evacuated object in the LDV word so that
133 // the profiler can guess the position of the next object later.
134 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
136 if (size_to_reserve - size_to_copy > 0)
137 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
142 /* Copy wrappers that don't tag the closure after copying */
144 copy(StgClosure **p, StgClosure *src, nat size, step *stp)
146 copy_tag(p,src,size,stp,0);
149 /* ----------------------------------------------------------------------------
152 This is called (eventually) for every live object in the system.
154 The caller to evacuate specifies a desired generation in the
155 gct->evac_step thread-local variable. The following conditions apply to
156 evacuating an object which resides in generation M when we're
157 collecting up to generation N
159 if M >= gct->evac_step
161 else evac to step->to
163 if M < gct->evac_step evac to gct->evac_step, step 0
165 if the object is already evacuated, then we check which generation
168 if M >= gct->evac_step do nothing
169 if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
170 didn't manage to evacuate this object into gct->evac_step.
175 evacuate() is the single most important function performance-wise
176 in the GC. Various things have been tried to speed it up, but as
177 far as I can tell the code generated by gcc 3.2 with -O2 is about
178 as good as it's going to get. We pass the argument to evacuate()
179 in a register using the 'regparm' attribute (see the prototype for
180 evacuate() near the top of this file).
182 Changing evacuate() to take an (StgClosure **) rather than
183 returning the new pointer seems attractive, because we can avoid
184 writing back the pointer when it hasn't changed (eg. for a static
185 object, or an object in a generation > N). However, I tried it and
186 it doesn't help. One reason is that the (StgClosure **) pointer
187 gets spilled to the stack inside evacuate(), resulting in far more
188 extra reads/writes than we save.
189 ------------------------------------------------------------------------- */
192 evacuate(StgClosure **p)
197 const StgInfoTable *info;
203 /* The tag and the pointer are split, to be merged after evacing */
204 tag = GET_CLOSURE_TAG(q);
205 q = UNTAG_CLOSURE(q);
207 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
209 if (!HEAP_ALLOCED(q)) {
214 if (!major_gc) return;
217 switch (info->type) {
220 if (info->srt_bitmap != 0 &&
221 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
222 ACQUIRE_SPIN_LOCK(&static_objects_sync);
223 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
224 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
225 static_objects = (StgClosure *)q;
227 RELEASE_SPIN_LOCK(&static_objects_sync);
232 if (info->srt_bitmap != 0 &&
233 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
234 ACQUIRE_SPIN_LOCK(&static_objects_sync);
235 if (*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
236 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
237 static_objects = (StgClosure *)q;
239 RELEASE_SPIN_LOCK(&static_objects_sync);
244 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
245 * on the CAF list, so don't do anything with it here (we'll
246 * scavenge it later).
248 if (((StgIndStatic *)q)->saved_info == NULL) {
249 ACQUIRE_SPIN_LOCK(&static_objects_sync);
250 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
251 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
252 static_objects = (StgClosure *)q;
254 RELEASE_SPIN_LOCK(&static_objects_sync);
259 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
260 ACQUIRE_SPIN_LOCK(&static_objects_sync);
261 // re-test, after acquiring lock
262 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
263 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
264 static_objects = (StgClosure *)q;
266 RELEASE_SPIN_LOCK(&static_objects_sync);
267 /* I am assuming that static_objects pointers are not
268 * written to other objects, and thus, no need to retag. */
272 case CONSTR_NOCAF_STATIC:
273 /* no need to put these on the static linked list, they don't need
279 barf("evacuate(static): strange closure type %d", (int)(info->type));
285 if (bd->gen_no > N) {
286 /* Can't evacuate this object, because it's in a generation
287 * older than the ones we're collecting. Let's hope that it's
288 * in gct->evac_step or older, or we will have to arrange to track
289 * this pointer using the mutable list.
291 if (bd->step < gct->evac_step) {
293 gct->failed_to_evac = rtsTrue;
294 TICK_GC_FAILED_PROMOTION();
299 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
301 /* pointer into to-space: just return it. This normally
302 * shouldn't happen, but alllowing it makes certain things
303 * slightly easier (eg. the mutable list can contain the same
304 * object twice, for example).
306 if (bd->flags & BF_EVACUATED) {
307 if (bd->step < gct->evac_step) {
308 gct->failed_to_evac = rtsTrue;
309 TICK_GC_FAILED_PROMOTION();
314 /* evacuate large objects by re-linking them onto a different list.
316 if (bd->flags & BF_LARGE) {
318 if (info->type == TSO &&
319 ((StgTSO *)q)->what_next == ThreadRelocated) {
320 q = (StgClosure *)((StgTSO *)q)->link;
324 evacuate_large((P_)q);
328 /* If the object is in a step that we're compacting, then we
329 * need to use an alternative evacuate procedure.
331 if (bd->flags & BF_COMPACTED) {
332 if (!is_marked((P_)q,bd)) {
334 if (mark_stack_full()) {
335 mark_stack_overflowed = rtsTrue;
338 push_mark_stack((P_)q);
348 switch (info->type) {
357 copy(p,q,sizeW_fromITBL(info),stp);
362 StgWord w = (StgWord)q->payload[0];
363 if (q->header.info == Czh_con_info &&
364 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
365 (StgChar)w <= MAX_CHARLIKE) {
366 *p = TAG_CLOSURE(tag,
367 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
370 else if (q->header.info == Izh_con_info &&
371 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
372 *p = TAG_CLOSURE(tag,
373 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
377 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
385 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
390 copy(p,q,sizeofW(StgThunk)+1,stp);
396 #ifdef NO_PROMOTE_THUNKS
397 if (bd->gen_no == 0 &&
399 bd->step->no == generations[bd->gen_no].n_steps-1) {
403 copy(p,q,sizeofW(StgThunk)+2,stp);
411 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
415 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
419 copy(p,q,thunk_sizeW_fromITBL(info),stp);
424 case IND_OLDGEN_PERM:
428 copy_tag(p,q,sizeW_fromITBL(info),stp,tag);
432 copy(p,q,bco_sizeW((StgBCO *)q),stp);
436 case SE_CAF_BLACKHOLE:
439 copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
443 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
448 // follow chains of indirections, don't evacuate them
449 q = ((StgInd*)q)->indirectee;
460 case CATCH_STM_FRAME:
461 case CATCH_RETRY_FRAME:
462 case ATOMICALLY_FRAME:
463 // shouldn't see these
464 barf("evacuate: stack frame at %p\n", q);
467 copy(p,q,pap_sizeW((StgPAP*)q),stp);
471 copy(p,q,ap_sizeW((StgAP*)q),stp);
475 copy(p,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
479 /* Already evacuated, just return the forwarding address.
480 * HOWEVER: if the requested destination generation (gct->evac_step) is
481 * older than the actual generation (because the object was
482 * already evacuated to a younger generation) then we have to
483 * set the gct->failed_to_evac flag to indicate that we couldn't
484 * manage to promote the object to the desired generation.
487 * Optimisation: the check is fairly expensive, but we can often
488 * shortcut it if either the required generation is 0, or the
489 * current object (the EVACUATED) is in a high enough generation.
490 * We know that an EVACUATED always points to an object in the
491 * same or an older generation. stp is the lowest step that the
492 * current object would be evacuated to, so we only do the full
493 * check if stp is too low.
496 StgClosure *e = ((StgEvacuated*)q)->evacuee;
498 if (stp < gct->evac_step) { // optimisation
499 if (Bdescr((P_)e)->step < gct->evac_step) {
500 gct->failed_to_evac = rtsTrue;
501 TICK_GC_FAILED_PROMOTION();
508 // just copy the block
509 copy(p,q,arr_words_sizeW((StgArrWords *)q),stp);
512 case MUT_ARR_PTRS_CLEAN:
513 case MUT_ARR_PTRS_DIRTY:
514 case MUT_ARR_PTRS_FROZEN:
515 case MUT_ARR_PTRS_FROZEN0:
516 // just copy the block
517 copy(p,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
522 StgTSO *tso = (StgTSO *)q;
524 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
526 if (tso->what_next == ThreadRelocated) {
527 q = (StgClosure *)tso->link;
532 /* To evacuate a small TSO, we need to relocate the update frame
539 copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
540 new_tso = (StgTSO *)*p;
541 move_TSO(tso, new_tso);
542 for (r = tso->sp, s = new_tso->sp;
543 r < tso->stack+tso->stack_size;) {
551 copy(p,q,sizeofW(StgTRecHeader),stp);
554 case TVAR_WATCH_QUEUE:
555 copy(p,q,sizeofW(StgTVarWatchQueue),stp);
559 copy(p,q,sizeofW(StgTVar),stp);
563 copy(p,q,sizeofW(StgTRecChunk),stp);
566 case ATOMIC_INVARIANT:
567 copy(p,q,sizeofW(StgAtomicInvariant),stp);
570 case INVARIANT_CHECK_QUEUE:
571 copy(p,q,sizeofW(StgInvariantCheckQueue),stp);
575 barf("evacuate: strange closure type %d", (int)(info->type));