X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=bf5d612549e8b65f9409a30ab014795ebd3ea991;hb=91b07216be1cb09230b7d1b417899ddea8620ff3;hp=f2d9437551fa312c6576acc2d4ebf3a0f5ef2b9d;hpb=de910f06ebec544c71cd5c41dbb11937812c7a1a;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index f2d9437..bf5d612 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -104,6 +104,10 @@ static rtsBool major_gc; */ static nat evac_gen; +/* Whether to do eager promotion or not. + */ +static rtsBool eager_promotion; + /* Weak pointers */ StgWeak *old_weak_ptr_list; // also pending finaliser list @@ -585,6 +589,8 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) mark_stack_bdescr = NULL; } + eager_promotion = rtsTrue; // for now + /* ----------------------------------------------------------------------- * follow all the roots that we know about: * - mutable lists from each generation > N @@ -1567,11 +1573,11 @@ copy(StgClosure *src, nat size, step *stp) * by evacuate()). */ if (stp->gen_no < evac_gen) { -#ifdef NO_EAGER_PROMOTION - failed_to_evac = rtsTrue; -#else - stp = &generations[evac_gen].steps[0]; -#endif + if (eager_promotion) { + stp = &generations[evac_gen].steps[0]; + } else { + failed_to_evac = rtsTrue; + } } /* chain a new block onto the to-space for the destination step if @@ -1617,11 +1623,11 @@ copy_noscav(StgClosure *src, nat size, step *stp) * by evacuate()). */ if (stp->gen_no < evac_gen) { -#ifdef NO_EAGER_PROMOTION - failed_to_evac = rtsTrue; -#else - stp = &generations[evac_gen].steps[0]; -#endif + if (eager_promotion) { + stp = &generations[evac_gen].steps[0]; + } else { + failed_to_evac = rtsTrue; + } } /* chain a new block onto the to-space for the destination step if @@ -1664,11 +1670,11 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) TICK_GC_WORDS_COPIED(size_to_copy); if (stp->gen_no < evac_gen) { -#ifdef NO_EAGER_PROMOTION - failed_to_evac = rtsTrue; -#else - stp = &generations[evac_gen].steps[0]; -#endif + if (eager_promotion) { + stp = &generations[evac_gen].steps[0]; + } else { + failed_to_evac = rtsTrue; + } } if (stp->hp + size_to_reserve >= stp->hpLim) { @@ -1745,11 +1751,11 @@ evacuate_large(StgPtr p) */ stp = bd->step->to; if (stp->gen_no < evac_gen) { -#ifdef NO_EAGER_PROMOTION - failed_to_evac = rtsTrue; -#else - stp = &generations[evac_gen].steps[0]; -#endif + if (eager_promotion) { + stp = &generations[evac_gen].steps[0]; + } else { + failed_to_evac = rtsTrue; + } } bd->step = stp; @@ -1935,7 +1941,8 @@ loop: switch (info->type) { - case MUT_VAR: + case MUT_VAR_CLEAN: + case MUT_VAR_DIRTY: case MVAR: return copy(q,sizeW_fromITBL(info),stp); @@ -2105,7 +2112,8 @@ loop: // just copy the block return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp); - case MUT_ARR_PTRS: + case MUT_ARR_PTRS_CLEAN: + case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: // just copy the block @@ -2887,13 +2895,22 @@ scavenge(step *stp) p += sizeofW(StgInd); break; - case MUT_VAR: - evac_gen = 0; + case MUT_VAR_CLEAN: + case MUT_VAR_DIRTY: { + rtsBool saved_eager_promotion = eager_promotion; + + eager_promotion = rtsFalse; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable anyhow + eager_promotion = saved_eager_promotion; + + if (failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; + } p += sizeofW(StgMutVar); break; + } case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: @@ -2934,18 +2951,32 @@ scavenge(step *stp) p += arr_words_sizeW((StgArrWords *)p); break; - case MUT_ARR_PTRS: + case MUT_ARR_PTRS_CLEAN: + case MUT_ARR_PTRS_DIRTY: // follow everything { StgPtr next; - - evac_gen = 0; // repeatedly mutable + rtsBool saved_eager; + + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + saved_eager = eager_promotion; + eager_promotion = rtsFalse; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable anyhow. + eager_promotion = saved_eager; + + if (failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } + + failed_to_evac = rtsTrue; // always put it on the mutable list. break; } @@ -3256,12 +3287,21 @@ linear_scan: evacuate(((StgInd *)p)->indirectee); break; - case MUT_VAR: - evac_gen = 0; + case MUT_VAR_CLEAN: + case MUT_VAR_DIRTY: { + rtsBool saved_eager_promotion = eager_promotion; + + eager_promotion = rtsFalse; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; + eager_promotion = saved_eager_promotion; + + if (failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; + } break; + } case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: @@ -3295,17 +3335,31 @@ linear_scan: scavenge_AP((StgAP *)p); break; - case MUT_ARR_PTRS: + case MUT_ARR_PTRS_CLEAN: + case MUT_ARR_PTRS_DIRTY: // follow everything { StgPtr next; - - evac_gen = 0; // repeatedly mutable + rtsBool saved_eager; + + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + saved_eager = eager_promotion; + eager_promotion = rtsFalse; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } - evac_gen = saved_evac_gen; + eager_promotion = saved_eager; + + if (failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } + failed_to_evac = rtsTrue; // mutable anyhow. break; } @@ -3572,12 +3626,22 @@ scavenge_one(StgPtr p) break; } - case MUT_VAR: - evac_gen = 0; + case MUT_VAR_CLEAN: + case MUT_VAR_DIRTY: { + StgPtr q = p; + rtsBool saved_eager_promotion = eager_promotion; + + eager_promotion = rtsFalse; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable anyhow + eager_promotion = saved_eager_promotion; + + if (failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; + } break; + } case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: @@ -3614,17 +3678,31 @@ scavenge_one(StgPtr p) // nothing to follow break; - case MUT_ARR_PTRS: + case MUT_ARR_PTRS_CLEAN: + case MUT_ARR_PTRS_DIRTY: { - // follow everything - StgPtr next; - - evac_gen = 0; // repeatedly mutable + StgPtr next, q; + rtsBool saved_eager; + + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + saved_eager = eager_promotion; + eager_promotion = rtsFalse; + q = p; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } - evac_gen = saved_evac_gen; + eager_promotion = saved_eager; + + if (failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } + failed_to_evac = rtsTrue; break; } @@ -3843,9 +3921,12 @@ scavenge_mutable_list(generation *gen) #ifdef DEBUG switch (get_itbl((StgClosure *)p)->type) { - case MUT_VAR: + case MUT_VAR_CLEAN: + barf("MUT_VAR_CLEAN on mutable list"); + case MUT_VAR_DIRTY: mutlist_MUTVARS++; break; - case MUT_ARR_PTRS: + case MUT_ARR_PTRS_CLEAN: + case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: mutlist_MUTARRS++; break; @@ -3854,6 +3935,13 @@ scavenge_mutable_list(generation *gen) } #endif + // We don't need to scavenge clean arrays. This is the + // Whole Point of MUT_ARR_PTRS_CLEAN. + if (get_itbl((StgClosure *)p)->type == MUT_ARR_PTRS_CLEAN) { + recordMutableGen((StgClosure *)p,gen); + continue; + } + if (scavenge_one(p)) { /* didn't manage to promote everything, so put the * object back on the list.