X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=791cc4cfc377d2b6dd012fc2a4812bc4f1c1a5dd;hb=9f3fe695451887ade350abc1a2b2a72c75adc402;hp=3d6d649181cc96dfc6c85ea305a6b3f8bab8d66c;hpb=0f3205e6c40575910d50bc2cc42020ccf55e07ba;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 3d6d649..791cc4c 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -739,7 +739,12 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (stp->is_compacted) { collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W; } else { - collected += stp->n_blocks * BLOCK_SIZE_W; + if (g == 0 && s == 0) { + collected += countNurseryBlocks() * BLOCK_SIZE_W; + collected += alloc_blocks; + } else { + collected += stp->n_blocks * BLOCK_SIZE_W; + } } /* free old memory and shift to-space into from-space for all @@ -1029,7 +1034,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } else { // we might have added extra large blocks to the nursery, so // resize back to minAllocAreaSize again. - resizeNurseries(RtsFlags.GcFlags.minAllocAreaSize); + resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize); } } @@ -1266,6 +1271,16 @@ traverse_weak_ptr_list(void) ; } + // Threads blocked on black holes: if the black hole + // is alive, then the thread is alive too. + if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) { + if (isAlive(t->block_info.closure)) { + t = (StgTSO *)evacuate((StgClosure *)t); + tmp = t; + flag = rtsTrue; + } + } + if (tmp == NULL) { // not alive (yet): leave this thread on the // old_all_threads list. @@ -1282,6 +1297,10 @@ traverse_weak_ptr_list(void) } } + /* If we evacuated any threads, we need to go back to the scavenger. + */ + if (flag) return rtsTrue; + /* And resurrect any threads which were about to become garbage. */ { @@ -1294,6 +1313,21 @@ traverse_weak_ptr_list(void) } } + /* Finally, we can update the blackhole_queue. This queue + * simply strings together TSOs blocked on black holes, it is + * not intended to keep anything alive. Hence, we do not follow + * pointers on the blackhole_queue until now, when we have + * determined which TSOs are otherwise reachable. We know at + * this point that all TSOs have been evacuated, however. + */ + { + StgTSO **pt; + for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) { + *pt = (StgTSO *)isAlive((StgClosure *)*pt); + ASSERT(*pt != NULL); + } + } + weak_stage = WeakDone; // *now* we're done, return rtsTrue; // but one more round of scavenging, please @@ -1747,8 +1781,10 @@ loop: case CONSTR_2_0: return copy(q,sizeofW(StgHeader)+2,stp); - case FUN: case THUNK: + return copy(q,thunk_sizeW_fromITBL(info),stp); + + case FUN: case CONSTR: case IND_PERM: case IND_OLDGEN_PERM: @@ -2354,8 +2390,6 @@ scavenge_fun_srt(const StgInfoTable *info) static void scavengeTSO (StgTSO *tso) { - // chase the link field for any TSOs on the same queue - tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole || tso->why_blocked == BlockedOnException @@ -2371,6 +2405,13 @@ scavengeTSO (StgTSO *tso) (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); } + // We don't always chase the link field: TSOs on the blackhole + // queue are not automatically alive, so the link field is a + // "weak" pointer in that case. + if (tso->why_blocked != BlockedOnBlackHole) { + tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); + } + // scavange current transaction record tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec); @@ -3366,11 +3407,11 @@ scavenge_one(StgPtr p) } case PAP: - p = scavenge_AP((StgAP *)p); + p = scavenge_PAP((StgPAP *)p); break; case AP: - p = scavenge_PAP((StgPAP *)p); + p = scavenge_AP((StgAP *)p); break; case ARR_WORDS: @@ -4167,27 +4208,13 @@ threadSqueezeStack(StgTSO *tso) debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif #ifdef DEBUG - /* zero out the slop so that the sanity checker can tell - * where the next closure is. - */ - { - StgInfoTable *bh_info = get_itbl(bh); - nat np = bh_info->layout.payload.ptrs, - nw = bh_info->layout.payload.nptrs, i; - /* don't zero out slop for a THUNK_SELECTOR, - * because its layout info is used for a - * different purpose, and it's exactly the - * same size as a BLACKHOLE in any case. - */ - if (bh_info->type != THUNK_SELECTOR) { - for (i = 0; i < np + nw; i++) { - ((StgClosure *)bh)->payload[i] = INVALID_OBJECT; - } - } - } + // zero out the slop so that the sanity checker can tell + // where the next closure is. + DEBUG_FILL_SLOP(bh); #endif #ifdef PROFILING // We pretend that bh is now dead. + // ToDo: is the slop filling the same as DEBUG_FILL_SLOP? LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); #endif // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? @@ -4306,20 +4333,4 @@ printMutableList(generation *gen) } debugBelch("\n"); } - -STATIC_INLINE rtsBool -maybeLarge(StgClosure *closure) -{ - StgInfoTable *info = get_itbl(closure); - - /* closure types that may be found on the new_large_objects list; - see scavenge_large */ - return (info->type == MUT_ARR_PTRS || - info->type == MUT_ARR_PTRS_FROZEN || - info->type == MUT_ARR_PTRS_FROZEN0 || - info->type == TSO || - info->type == ARR_WORDS); -} - - #endif /* DEBUG */