X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=a13cd33afadd346ed79e20ac72235ea4e62887eb;hb=a1b4e3b88a6987deed7bb7f1bd870b30eef1b475;hp=bf5d612549e8b65f9409a30ab014795ebd3ea991;hpb=91b07216be1cb09230b7d1b417899ddea8620ff3;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index bf5d612..a13cd33 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -818,7 +818,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } copied += mut_list_size; - IF_DEBUG(gc, debugBelch("mut_list_size: %d (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS)); + IF_DEBUG(gc, debugBelch("mut_list_size: %ld (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS)); } for (s = 0; s < generations[g].n_steps; s++) { @@ -1154,7 +1154,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) ACQUIRE_SM_LOCK; // send exceptions to any threads which were about to die + RELEASE_SM_LOCK; resurrectThreads(resurrected_threads); + ACQUIRE_SM_LOCK; // Update the stable pointer hash table. updateStablePtrTable(major_gc); @@ -1696,7 +1698,7 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) SET_EVACUAEE_FOR_LDV(src, size_to_reserve); // fill the slop if (size_to_reserve - size_to_copy_org > 0) - FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org)); + LDV_FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org)); #endif return (StgClosure *)dest; } @@ -2016,11 +2018,15 @@ loop: case THUNK_SELECTOR: { StgClosure *p; + const StgInfoTable *info_ptr; if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) { return copy(q,THUNK_SELECTOR_sizeW(),stp); } + // stashed away for LDV profiling, see below + info_ptr = q->header.info; + p = eval_thunk_selector(info->layout.selector_offset, (StgSelector *)q); @@ -2033,6 +2039,13 @@ loop: val = evacuate(p); thunk_selector_depth--; +#ifdef PROFILING + // For the purposes of LDV profiling, we have destroyed + // the original selector thunk. + SET_INFO(q, info_ptr); + LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(q); +#endif + // Update the THUNK_SELECTOR with an indirection to the // EVACUATED closure now at p. Why do this rather than // upd_evacuee(q,p)? Because we have an invariant that an @@ -2042,12 +2055,10 @@ loop: SET_INFO(q, &stg_IND_info); ((StgInd *)q)->indirectee = p; -#ifdef PROFILING - // We store the size of the just evacuated object in the - // LDV word so that the profiler can guess the position of - // the next object later. - SET_EVACUAEE_FOR_LDV(q, THUNK_SELECTOR_sizeW()); -#endif + // For the purposes of LDV profiling, we have created an + // indirection. + LDV_RECORD_CREATE(q); + return val; } } @@ -2164,7 +2175,7 @@ loop: } case BLOCKED_FETCH: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); + ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOD_SIZE); to = copy(q,sizeofW(StgBlockedFetch),stp); IF_DEBUG(gc, debugBelch("@@ evacuate: %p (%s) to %p (%s)", @@ -2175,7 +2186,7 @@ loop: case REMOTE_REF: # endif case FETCH_ME: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE); to = copy(q,sizeofW(StgFetchMe),stp); IF_DEBUG(gc, debugBelch("@@ evacuate: %p (%s) to %p (%s)", @@ -2183,7 +2194,7 @@ loop: return to; case FETCH_ME_BQ: - ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); + ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE); to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); IF_DEBUG(gc, debugBelch("@@ evacuate: %p (%s) to %p (%s)", @@ -3004,10 +3015,19 @@ scavenge(step *stp) case TSO: { StgTSO *tso = (StgTSO *)p; - evac_gen = 0; + rtsBool saved_eager = eager_promotion; + + eager_promotion = rtsFalse; scavengeTSO(tso); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable anyhow. + eager_promotion = saved_eager; + + if (failed_to_evac) { + tso->flags |= TSO_DIRTY; + } else { + tso->flags &= ~TSO_DIRTY; + } + + failed_to_evac = rtsTrue; // always on the mutable list p += tso_sizeW(tso); break; } @@ -3388,10 +3408,19 @@ linear_scan: case TSO: { StgTSO *tso = (StgTSO *)p; - evac_gen = 0; + rtsBool saved_eager = eager_promotion; + + eager_promotion = rtsFalse; scavengeTSO(tso); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; + eager_promotion = saved_eager; + + if (failed_to_evac) { + tso->flags |= TSO_DIRTY; + } else { + tso->flags &= ~TSO_DIRTY; + } + + failed_to_evac = rtsTrue; // always on the mutable list break; } @@ -3537,12 +3566,12 @@ linear_scan: // already scavenged? if (is_marked(oldgen_scan+1,oldgen_scan_bd)) { - oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE; + oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE; goto loop; } push_mark_stack(oldgen_scan); // ToDo: bump the linear scan by the actual size of the object - oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE; + oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE; goto linear_scan; } @@ -3731,11 +3760,19 @@ scavenge_one(StgPtr p) case TSO: { StgTSO *tso = (StgTSO *)p; - - evac_gen = 0; // repeatedly mutable + rtsBool saved_eager = eager_promotion; + + eager_promotion = rtsFalse; scavengeTSO(tso); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; + eager_promotion = saved_eager; + + if (failed_to_evac) { + tso->flags |= TSO_DIRTY; + } else { + tso->flags &= ~TSO_DIRTY; + } + + failed_to_evac = rtsTrue; // always on the mutable list break; } @@ -3935,17 +3972,38 @@ scavenge_mutable_list(generation *gen) } #endif - // We don't need to scavenge clean arrays. This is the - // Whole Point of MUT_ARR_PTRS_CLEAN. - if (get_itbl((StgClosure *)p)->type == MUT_ARR_PTRS_CLEAN) { + // Check whether this object is "clean", that is it + // definitely doesn't point into a young generation. + // Clean objects don't need to be scavenged. Some clean + // objects (MUT_VAR_CLEAN) are not kept on the mutable + // list at all; others, such as MUT_ARR_PTRS_CLEAN and + // TSO, are always on the mutable list. + // + switch (get_itbl((StgClosure *)p)->type) { + case MUT_ARR_PTRS_CLEAN: recordMutableGen((StgClosure *)p,gen); continue; + case TSO: { + StgTSO *tso = (StgTSO *)p; + if ((tso->flags & TSO_DIRTY) == 0) { + // A clean TSO: we don't have to traverse its + // stack. However, we *do* follow the link field: + // we don't want to have to mark a TSO dirty just + // because we put it on a different queue. + if (tso->why_blocked != BlockedOnBlackHole) { + tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); + } + recordMutableGen((StgClosure *)p,gen); + continue; + } + } + default: + ; } if (scavenge_one(p)) { - /* didn't manage to promote everything, so put the - * object back on the list. - */ + // didn't manage to promote everything, so put the + // object back on the list. recordMutableGen((StgClosure *)p,gen); } } @@ -4632,8 +4690,8 @@ end: // Should we squeeze or not? Arbitrary heuristic: we squeeze if // the number of words we have to shift down is less than the // number of stack words we squeeze away by doing so. - if (1 /*RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue && - weight < words_to_squeeze*/) { + if (RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue && + weight < words_to_squeeze) { stackSqueeze(tso, (StgPtr)frame); } }