#include "RtsUtils.h"
#include "Apply.h"
#include "OSThreads.h"
-#include "Storage.h"
-#include "Stable.h"
#include "LdvProfile.h"
#include "Updates.h"
#include "Stats.h"
*/
rtsBool failed_to_evac;
-/* Saved nursery (used for 2-space collector only)
- */
-static bdescr *saved_nursery;
-static nat saved_n_blocks;
-
/* Data used for allocation area sizing.
*/
lnat new_blocks; // blocks allocated during this GC
#ifdef DEBUG
nat mutlist_MUTVARS,
mutlist_MUTARRS,
+ mutlist_MVARS,
mutlist_OTHERS;
#endif
lnat oldgen_saved_blocks = 0;
nat g, s, i;
- ACQUIRE_SM_LOCK;
-
#ifdef PROFILING
CostCentreStack *prev_CCS;
#endif
+ ACQUIRE_SM_LOCK;
+
debugTrace(DEBUG_gc, "starting GC");
#if defined(RTS_USER_SIGNALS)
- // block signals
- blockUserSignals();
+ if (RtsFlags.MiscFlags.install_signal_handlers) {
+ // block signals
+ blockUserSignals();
+ }
#endif
// tell the STM to discard any cached closures its hoping to re-use
static_objects = END_OF_STATIC_LIST;
scavenged_static_objects = END_OF_STATIC_LIST;
- /* Save the nursery if we're doing a two-space collection.
- * g0s0->blocks will be used for to-space, so we need to get the
- * nursery out of the way.
- */
- if (RtsFlags.GcFlags.generations == 1) {
- saved_nursery = g0s0->blocks;
- saved_n_blocks = g0s0->n_blocks;
- g0s0->blocks = NULL;
- g0s0->n_blocks = 0;
- }
-
/* Keep a count of how many new blocks we allocated during this GC
* (used for resizing the allocation area, later).
*/
*/
markStablePtrTable(mark_root);
- /* Mark the root pointer table.
- */
- markRootPtrTable(mark_root);
-
/* -------------------------------------------------------------------------
* Repeatedly scavenge all the areas we know about until there's no
* more scavenging to be done.
copied += mut_list_size;
debugTrace(DEBUG_gc,
- "mut_list_size: %lu (%d vars, %d arrays, %d others)",
+ "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
(unsigned long)(mut_list_size * sizeof(W_)),
- mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
+ mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
}
for (s = 0; s < generations[g].n_steps; s++) {
if (g <= N) {
copied -= stp->hp_bd->start + BLOCK_SIZE_W -
stp->hp_bd->free;
- scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
+ scavd_copied -= stp->scavd_hpLim - stp->scavd_hp;
}
}
* the collected steps (except the allocation area). These
* freed blocks will probaby be quickly recycled.
*/
- if (!(g == 0 && s == 0)) {
+ if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
if (stp->is_compacted) {
// for a compacted step, just shift the new to-space
// onto the front of the now-compacted existing blocks.
/* Free the small objects allocated via allocate(), since this will
* all have been copied into G0S1 now.
*/
- if (small_alloc_list != NULL) {
- freeChain(small_alloc_list);
+ if (RtsFlags.GcFlags.generations > 1) {
+ if (g0s0->blocks != NULL) {
+ freeChain(g0s0->blocks);
+ g0s0->blocks = NULL;
+ }
+ g0s0->n_blocks = 0;
}
- small_alloc_list = NULL;
alloc_blocks = 0;
- alloc_Hp = NULL;
- alloc_HpLim = NULL;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
// Start a new pinned_object_block
if (RtsFlags.GcFlags.generations == 1) {
nat blocks;
- if (g0s0->old_blocks != NULL) {
- freeChain(g0s0->old_blocks);
- }
- for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
- bd->flags = 0; // now from-space
- }
- g0s0->old_blocks = g0s0->blocks;
- g0s0->n_old_blocks = g0s0->n_blocks;
- g0s0->blocks = saved_nursery;
- g0s0->n_blocks = saved_n_blocks;
-
/* For a two-space collector, we need to resize the nursery. */
/* set up a new nursery. Allocate a nursery size based on a
* performance we get from 3L bytes, reducing to the same
* performance at 2L bytes.
*/
- blocks = g0s0->n_old_blocks;
+ blocks = g0s0->n_blocks;
if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
stat_endGC(allocated, live, copied, scavd_copied, N);
#if defined(RTS_USER_SIGNALS)
- // unblock signals again
- unblockUserSignals();
+ if (RtsFlags.MiscFlags.install_signal_handlers) {
+ // unblock signals again
+ unblockUserSignals();
+ }
#endif
RELEASE_SM_LOCK;
closure if it is alive, or NULL otherwise.
NOTE: Use it before compaction only!
+ It untags and (if needed) retags pointers to closures.
-------------------------------------------------------------------------- */
{
const StgInfoTable *info;
bdescr *bd;
+ StgWord tag;
+ StgClosure *q;
while (1) {
+ /* The tag and the pointer are split, to be merged later when needed. */
+ tag = GET_CLOSURE_TAG(p);
+ q = UNTAG_CLOSURE(p);
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
- info = get_itbl(p);
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
+ info = get_itbl(q);
// ignore static closures
//
// Problem here is that we sometimes don't set the link field, eg.
// for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
//
- if (!HEAP_ALLOCED(p)) {
+ if (!HEAP_ALLOCED(q)) {
return p;
}
// ignore closures in generations that we're not collecting.
- bd = Bdescr((P_)p);
+ bd = Bdescr((P_)q);
if (bd->gen_no > N) {
return p;
}
}
// check the mark bit for compacted steps
- if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
+ if ((bd->flags & BF_COMPACTED) && is_marked((P_)q,bd)) {
return p;
}
case IND_OLDGEN: // rely on compatible layout with StgInd
case IND_OLDGEN_PERM:
// follow indirections
- p = ((StgInd *)p)->indirectee;
+ p = ((StgInd *)q)->indirectee;
continue;
case EVACUATED:
// alive!
- return ((StgEvacuated *)p)->evacuee;
+ return ((StgEvacuated *)q)->evacuee;
case TSO:
- if (((StgTSO *)p)->what_next == ThreadRelocated) {
- p = (StgClosure *)((StgTSO *)p)->link;
+ if (((StgTSO *)q)->what_next == ThreadRelocated) {
+ p = (StgClosure *)((StgTSO *)q)->link;
continue;
}
return NULL;