1 /* -----------------------------------------------------------------------*-c-*-
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: evacuation functions
7 * ---------------------------------------------------------------------------*/
9 // We have two versions of evacuate(): one for minor GC, and one for
10 // non-minor, parallel, GC. This file contains the code for both,
11 // controllled by the CPP symbol MINOR_GC.
13 #if defined(THREADED_RTS)
14 # if !defined(PARALLEL_GC)
15 # define copy(a,b,c,d,e) copy1(a,b,c,d,e)
16 # define copy_tag(a,b,c,d,e,f) copy_tag1(a,b,c,d,e,f)
17 # define copy_tag_nolock(a,b,c,d,e,f) copy_tag1(a,b,c,d,e,f)
18 # define copyPart(a,b,c,d,e) copyPart1(a,b,c,d,e)
19 # define evacuate(a) evacuate1(a)
22 # define copy_tag_nolock(a,b,c,d,e,f) copy_tag(a,b,c,d,e,f)
26 copy_tag(StgClosure **p, const StgInfoTable *info,
27 StgClosure *src, nat size, step *stp, StgWord tag)
32 to = alloc_for_copy(size,stp);
34 TICK_GC_WORDS_COPIED(size);
38 for (i = 1; i < size; i++) { // unroll for small i
42 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
43 // __builtin_prefetch(to + size + 2, 1);
46 #if defined(PARALLEL_GC)
48 const StgInfoTable *new_info;
49 new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
50 if (new_info != info) {
51 return evacuate(p); // does the failed_to_evac stuff
53 *p = TAG_CLOSURE(tag,(StgClosure*)to);
57 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
58 *p = TAG_CLOSURE(tag,(StgClosure*)to);
62 // We store the size of the just evacuated object in the LDV word so that
63 // the profiler can guess the position of the next object later.
64 SET_EVACUAEE_FOR_LDV(from, size);
68 #if defined(PARALLEL_GC)
70 copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
71 StgClosure *src, nat size, step *stp, StgWord tag)
76 to = alloc_for_copy(size,stp);
77 *p = TAG_CLOSURE(tag,(StgClosure*)to);
78 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
80 TICK_GC_WORDS_COPIED(size);
84 for (i = 1; i < size; i++) { // unroll for small i
88 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
89 // __builtin_prefetch(to + size + 2, 1);
93 // We store the size of the just evacuated object in the LDV word so that
94 // the profiler can guess the position of the next object later.
95 SET_EVACUAEE_FOR_LDV(from, size);
100 /* Special version of copy() for when we only want to copy the info
101 * pointer of an object, but reserve some padding after it. This is
102 * used to optimise evacuation of BLACKHOLEs.
105 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
111 #if defined(PARALLEL_GC)
113 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
114 if (info == (W_)&stg_WHITEHOLE_info) {
120 if (IS_FORWARDING_PTR(info)) {
121 src->header.info = (const StgInfoTable *)info;
122 evacuate(p); // does the failed_to_evac stuff
126 info = (W_)src->header.info;
129 to = alloc_for_copy(size_to_reserve, stp);
130 *p = (StgClosure *)to;
132 TICK_GC_WORDS_COPIED(size_to_copy);
136 for (i = 1; i < size_to_copy; i++) { // unroll for small i
140 #if defined(PARALLEL_GC)
143 src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
146 // We store the size of the just evacuated object in the LDV word so that
147 // the profiler can guess the position of the next object later.
148 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
150 if (size_to_reserve - size_to_copy > 0)
151 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
156 /* Copy wrappers that don't tag the closure after copying */
158 copy(StgClosure **p, const StgInfoTable *info,
159 StgClosure *src, nat size, step *stp)
161 copy_tag(p,info,src,size,stp,0);
164 /* ----------------------------------------------------------------------------
167 This is called (eventually) for every live object in the system.
169 The caller to evacuate specifies a desired generation in the
170 gct->evac_step thread-local variable. The following conditions apply to
171 evacuating an object which resides in generation M when we're
172 collecting up to generation N
174 if M >= gct->evac_step
176 else evac to step->to
178 if M < gct->evac_step evac to gct->evac_step, step 0
180 if the object is already evacuated, then we check which generation
183 if M >= gct->evac_step do nothing
184 if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
185 didn't manage to evacuate this object into gct->evac_step.
190 evacuate() is the single most important function performance-wise
191 in the GC. Various things have been tried to speed it up, but as
192 far as I can tell the code generated by gcc 3.2 with -O2 is about
193 as good as it's going to get. We pass the argument to evacuate()
194 in a register using the 'regparm' attribute (see the prototype for
195 evacuate() near the top of this file).
197 Changing evacuate() to take an (StgClosure **) rather than
198 returning the new pointer seems attractive, because we can avoid
199 writing back the pointer when it hasn't changed (eg. for a static
200 object, or an object in a generation > N). However, I tried it and
201 it doesn't help. One reason is that the (StgClosure **) pointer
202 gets spilled to the stack inside evacuate(), resulting in far more
203 extra reads/writes than we save.
204 ------------------------------------------------------------------------- */
207 evacuate(StgClosure **p)
212 const StgInfoTable *info;
218 /* The tag and the pointer are split, to be merged after evacing */
219 tag = GET_CLOSURE_TAG(q);
220 q = UNTAG_CLOSURE(q);
222 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
224 if (!HEAP_ALLOCED(q)) {
226 if (!major_gc) return;
229 switch (info->type) {
232 if (info->srt_bitmap != 0) {
233 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
235 *THUNK_STATIC_LINK((StgClosure *)q) = gct->static_objects;
236 gct->static_objects = (StgClosure *)q;
239 link = (StgPtr)cas((StgPtr)THUNK_STATIC_LINK((StgClosure *)q),
241 (StgWord)gct->static_objects);
243 gct->static_objects = (StgClosure *)q;
251 if (info->srt_bitmap != 0 &&
252 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
254 *FUN_STATIC_LINK((StgClosure *)q) = gct->static_objects;
255 gct->static_objects = (StgClosure *)q;
258 link = (StgPtr)cas((StgPtr)FUN_STATIC_LINK((StgClosure *)q),
260 (StgWord)gct->static_objects);
262 gct->static_objects = (StgClosure *)q;
269 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
270 * on the CAF list, so don't do anything with it here (we'll
271 * scavenge it later).
273 if (((StgIndStatic *)q)->saved_info == NULL) {
274 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
276 *IND_STATIC_LINK((StgClosure *)q) = gct->static_objects;
277 gct->static_objects = (StgClosure *)q;
280 link = (StgPtr)cas((StgPtr)IND_STATIC_LINK((StgClosure *)q),
282 (StgWord)gct->static_objects);
284 gct->static_objects = (StgClosure *)q;
292 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
294 *STATIC_LINK(info,(StgClosure *)q) = gct->static_objects;
295 gct->static_objects = (StgClosure *)q;
298 link = (StgPtr)cas((StgPtr)STATIC_LINK(info,(StgClosure *)q),
300 (StgWord)gct->static_objects);
302 gct->static_objects = (StgClosure *)q;
306 /* I am assuming that static_objects pointers are not
307 * written to other objects, and thus, no need to retag. */
310 case CONSTR_NOCAF_STATIC:
311 /* no need to put these on the static linked list, they don't need
317 barf("evacuate(static): strange closure type %d", (int)(info->type));
323 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
325 // pointer into to-space: just return it. It might be a pointer
326 // into a generation that we aren't collecting (> N), or it
327 // might just be a pointer into to-space. The latter doesn't
328 // happen often, but allowing it makes certain things a bit
329 // easier; e.g. scavenging an object is idempotent, so it's OK to
330 // have an object on the mutable list multiple times.
331 if (bd->flags & BF_EVACUATED) {
332 // We aren't copying this object, so we have to check
333 // whether it is already in the target generation. (this is
334 // the write barrier).
335 if (bd->step < gct->evac_step) {
336 gct->failed_to_evac = rtsTrue;
337 TICK_GC_FAILED_PROMOTION();
342 /* evacuate large objects by re-linking them onto a different list.
344 if (bd->flags & BF_LARGE) {
346 if (info->type == TSO &&
347 ((StgTSO *)q)->what_next == ThreadRelocated) {
348 q = (StgClosure *)((StgTSO *)q)->_link;
352 evacuate_large((P_)q);
356 /* If the object is in a step that we're compacting, then we
357 * need to use an alternative evacuate procedure.
359 if (bd->flags & BF_COMPACTED) {
360 if (!is_marked((P_)q,bd)) {
362 if (mark_stack_full()) {
363 mark_stack_overflowed = rtsTrue;
366 push_mark_stack((P_)q);
374 info = q->header.info;
375 if (IS_FORWARDING_PTR(info))
377 /* Already evacuated, just return the forwarding address.
378 * HOWEVER: if the requested destination generation (gct->evac_step) is
379 * older than the actual generation (because the object was
380 * already evacuated to a younger generation) then we have to
381 * set the gct->failed_to_evac flag to indicate that we couldn't
382 * manage to promote the object to the desired generation.
385 * Optimisation: the check is fairly expensive, but we can often
386 * shortcut it if either the required generation is 0, or the
387 * current object (the EVACUATED) is in a high enough generation.
388 * We know that an EVACUATED always points to an object in the
389 * same or an older generation. stp is the lowest step that the
390 * current object would be evacuated to, so we only do the full
391 * check if stp is too low.
393 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
394 *p = TAG_CLOSURE(tag,e);
395 if (stp < gct->evac_step) { // optimisation
396 if (Bdescr((P_)e)->step < gct->evac_step) {
397 gct->failed_to_evac = rtsTrue;
398 TICK_GC_FAILED_PROMOTION();
404 switch (INFO_PTR_TO_STRUCT(info)->type) {
413 copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
418 StgWord w = (StgWord)q->payload[0];
419 if (info == Czh_con_info &&
420 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
421 (StgChar)w <= MAX_CHARLIKE) {
422 *p = TAG_CLOSURE(tag,
423 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
426 else if (info == Izh_con_info &&
427 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
428 *p = TAG_CLOSURE(tag,
429 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
433 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
441 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
446 copy(p,info,q,sizeofW(StgThunk)+1,stp);
452 #ifdef NO_PROMOTE_THUNKS
453 if (bd->gen_no == 0 &&
455 bd->step->no == generations[bd->gen_no].n_steps-1) {
459 copy(p,info,q,sizeofW(StgThunk)+2,stp);
467 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
471 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
475 copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
480 case IND_OLDGEN_PERM:
482 copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
487 copy_tag(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
491 copy(p,info,q,bco_sizeW((StgBCO *)q),stp);
495 case SE_CAF_BLACKHOLE:
498 copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
502 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
507 // follow chains of indirections, don't evacuate them
508 q = ((StgInd*)q)->indirectee;
519 case CATCH_STM_FRAME:
520 case CATCH_RETRY_FRAME:
521 case ATOMICALLY_FRAME:
522 // shouldn't see these
523 barf("evacuate: stack frame at %p\n", q);
526 copy(p,info,q,pap_sizeW((StgPAP*)q),stp);
530 copy(p,info,q,ap_sizeW((StgAP*)q),stp);
534 copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
538 // just copy the block
539 copy(p,info,q,arr_words_sizeW((StgArrWords *)q),stp);
542 case MUT_ARR_PTRS_CLEAN:
543 case MUT_ARR_PTRS_DIRTY:
544 case MUT_ARR_PTRS_FROZEN:
545 case MUT_ARR_PTRS_FROZEN0:
546 // just copy the block
547 copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
552 StgTSO *tso = (StgTSO *)q;
554 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
556 if (tso->what_next == ThreadRelocated) {
557 q = (StgClosure *)tso->_link;
562 /* To evacuate a small TSO, we need to relocate the update frame
569 copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
570 new_tso = (StgTSO *)*p;
571 move_TSO(tso, new_tso);
572 for (r = tso->sp, s = new_tso->sp;
573 r < tso->stack+tso->stack_size;) {
581 copy(p,info,q,sizeofW(StgTRecHeader),stp);
584 case TVAR_WATCH_QUEUE:
585 copy(p,info,q,sizeofW(StgTVarWatchQueue),stp);
589 copy(p,info,q,sizeofW(StgTVar),stp);
593 copy(p,info,q,sizeofW(StgTRecChunk),stp);
596 case ATOMIC_INVARIANT:
597 copy(p,info,q,sizeofW(StgAtomicInvariant),stp);
600 case INVARIANT_CHECK_QUEUE:
601 copy(p,info,q,sizeofW(StgInvariantCheckQueue),stp);
605 barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
613 #undef copy_tag_nolock