*************************************************************************** APPEL'S GARBAGE COLLECTION Global heap requirements as for 1s and 2s collectors. ++ All closures in the old generation that are updated must be updated with indirections and placed on the linked list of updated old generation closures. *************************************************************************** \begin{code} #if defined(GCap) #define SCAV_REG_MAP #include "SMinternal.h" #include "SMcopying.h" #include "SMcompacting.h" #include "SMextn.h" REGDUMP(ScavRegDump); appelData appelInfo = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {{0, 0}, {0, 0}} }; P_ heap_space = 0; /* Address of first word of slab of memory allocated for heap */ P_ hp_start; /* Value of Hp when reduction was resumed */ static I_ allocd_since_last_major_GC = 0; /* words alloced since last major GC; used when forcing GC */ #if defined(DEBUG) void debug_look_for (start, stop, villain) P_ start, stop, villain; { P_ i; for (i = start; i <= stop; i++) { if ( (P_) *i == villain ) { fprintf(stderr, "* %x : %x\n", i, villain); } } } #endif rtsBool initHeap(smInfo * sm) { if (heap_space == 0) { /* allocates if it doesn't already exist */ /* Allocate the roots space */ sm->roots = (P_ *) stgMallocWords(SM_MAXROOTS, "initHeap (roots)"); /* Allocate the heap */ heap_space = (P_) stgMallocWords(RTSflags.GcFlags.heapSize + EXTRA_HEAP_WORDS, "initHeap (heap)"); /* ToDo (ADR): trash entire heap contents */ if (RTSflags.GcFlags.force2s) { stat_init("TWOSPACE(APPEL)", " No of Roots Caf Caf Astk Bstk", "Astk Bstk Reg No bytes bytes bytes"); } else { stat_init("APPEL", " No of Roots Caf Mut- Old Collec Resid", "Astk Bstk Reg No able Gen tion %heap"); } } sm->hardHpOverflowSize = 0; if (RTSflags.GcFlags.force2s) { I_ semi_space_words = RTSflags.GcFlags.heapSize / 2; appelInfo.space[0].base = HEAP_FRAME_BASE(heap_space, semi_space_words); appelInfo.space[1].base = HEAP_FRAME_BASE(heap_space + semi_space_words, semi_space_words); appelInfo.space[0].lim = HEAP_FRAME_LIMIT(heap_space, semi_space_words); appelInfo.space[1].lim = HEAP_FRAME_LIMIT(heap_space + semi_space_words, semi_space_words); appelInfo.semi_space = 0; appelInfo.oldlim = heap_space - 1; /* Never in old generation */ sm->hp = hp_start = appelInfo.space[appelInfo.semi_space].base - 1; if (! RTSflags.GcFlags.allocAreaSizeGiven) { sm->hplim = appelInfo.space[appelInfo.semi_space].lim; } else { sm->hplim = sm->hp + RTSflags.GcFlags.allocAreaSize; RTSflags.GcFlags.minAllocAreaSize = 0; /* specified size takes precedence */ if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) { fprintf(stderr, "Not enough heap for requested alloc size\n"); return rtsFalse; } } if (RTSflags.GcFlags.forceGC) { if (sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) { sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval; } else { /* no point in forcing GC, as the semi-space is smaller than forcingInterval */ RTSflags.GcFlags.forceGC = rtsFalse; } } sm->OldLim = appelInfo.oldlim; sm->CAFlist = NULL; #ifndef PAR initExtensions( sm ); #endif if (RTSflags.GcFlags.trace) { fprintf(stderr, "APPEL(2s) Heap: 0x%lx .. 0x%lx\n", (W_) heap_space, (W_) (heap_space - 1 + RTSflags.GcFlags.heapSize)); fprintf(stderr, "Initial: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n", appelInfo.semi_space, (W_) appelInfo.space[appelInfo.semi_space].base, (W_) appelInfo.space[appelInfo.semi_space].lim, (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_)); } return rtsTrue; } /* So not forced 2s */ appelInfo.newlim = heap_space + RTSflags.GcFlags.heapSize - 1; if (RTSflags.GcFlags.allocAreaSizeGiven) { appelInfo.newfixed = RTSflags.GcFlags.allocAreaSize; appelInfo.newmin = RTSflags.GcFlags.allocAreaSize; appelInfo.newbase = heap_space + RTSflags.GcFlags.heapSize - appelInfo.newfixed; } else { appelInfo.newfixed = 0; appelInfo.newmin = RTSflags.GcFlags.minAllocAreaSize; appelInfo.newbase = heap_space + (RTSflags.GcFlags.heapSize / 2); } appelInfo.oldbase = heap_space; appelInfo.oldlim = heap_space - 1; appelInfo.oldlast = heap_space - 1; appelInfo.oldmax = heap_space - 1 + RTSflags.GcFlags.heapSize - 2*appelInfo.newmin; if (appelInfo.oldbase > appelInfo.oldmax) { fprintf(stderr, "Not enough heap for requested/minimum allocation area\n"); fprintf(stderr, "heap_space=%ld\n", (W_) heap_space); fprintf(stderr, "heapSize=%ld\n", RTSflags.GcFlags.heapSize); fprintf(stderr, "newmin=%ld\n", appelInfo.newmin); return rtsFalse; } appelInfo.bit_words = (RTSflags.GcFlags.heapSize + BITS_IN(BitWord) - 1) / BITS_IN(BitWord); appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words; if (appelInfo.bit_words > appelInfo.newmin) appelInfo.oldmax = heap_space - 1 + RTSflags.GcFlags.heapSize - appelInfo.bit_words - appelInfo.newmin; if (RTSflags.GcFlags.specifiedOldGenSize) { appelInfo.oldthresh = heap_space -1 + RTSflags.GcFlags.specifiedOldGenSize; if (appelInfo.oldthresh > appelInfo.oldmax) { fprintf(stderr, "Not enough heap for requested major resid size\n"); return rtsFalse; } } else { appelInfo.oldthresh = heap_space + RTSflags.GcFlags.heapSize * 2 / 3; /* Initial threshold -- 2/3rds */ if (appelInfo.oldthresh > appelInfo.oldmax) appelInfo.oldthresh = appelInfo.oldmax; } sm->hp = hp_start = appelInfo.newbase - 1; sm->hplim = appelInfo.newlim; if (RTSflags.GcFlags.forceGC && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) { sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval; } sm->OldLim = appelInfo.oldlim; sm->CAFlist = NULL; appelInfo.OldCAFlist = NULL; appelInfo.OldCAFno = 0; #ifndef PAR initExtensions( sm ); #endif appelInfo.PromMutables = 0; if (RTSflags.GcFlags.trace) { fprintf(stderr, "APPEL Heap: 0x%lx .. 0x%lx\n", (W_) heap_space, (W_) (heap_space - 1 + RTSflags.GcFlags.heapSize)); fprintf(stderr, "Initial: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx\n", (W_) appelInfo.newbase, (W_) appelInfo.newlim, (W_) appelInfo.oldbase, (W_) appelInfo.oldlim, (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax, (W_) sm->hp, (W_) sm->hplim); } return rtsTrue; /* OK */ } static I_ collect2s(W_ reqsize, smInfo *sm) { I_ free_space, /* No of words of free space following GC */ alloc, /* Number of words allocated since last GC */ resident, /* Number of words remaining after GC */ extra_caf_words,/* Extra words referenced from CAFs */ caf_roots, /* Number of CAFs */ bstk_roots; /* Number of update frames in B stack */ SAVE_REGS(&ScavRegDump); /* Save registers */ #if defined(PROFILING) if (interval_expired) { heap_profile_setup(); } #endif /* PROFILING */ if (RTSflags.GcFlags.trace) fprintf(stderr, "Start: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n", appelInfo.semi_space, (W_) appelInfo.space[appelInfo.semi_space].base, (W_) appelInfo.space[appelInfo.semi_space].lim, (W_) sm->hp, (W_) sm->hplim, (W_) (reqsize * sizeof(W_))); alloc = sm->hp - hp_start; stat_startGC(alloc); appelInfo.semi_space = NEXT_SEMI_SPACE(appelInfo.semi_space); ToHp = appelInfo.space[appelInfo.semi_space].base - 1; Scav = appelInfo.space[appelInfo.semi_space].base; OldGen = sm->OldLim; /* always evac ! */ SetCAFInfoTables( sm->CAFlist ); #ifdef PAR EvacuateLocalGAs(rtsTrue); #else /* evacSPTable( sm ); StablePointerTable now accessable in sm->roots SOF 4/96 */ #endif /* PAR */ EvacuateRoots( sm->roots, sm->rootno ); #if defined(GRAN) EvacuateEvents(); #endif #if defined(CONCURRENT) EvacuateSparks(); #endif #if !defined(PAR) EvacuateAStack( MAIN_SpA, stackInfo.botA ); EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots ); #endif /* !PAR */ Scavenge(); EvacAndScavengeCAFs( sm->CAFlist, &extra_caf_words, &caf_roots ); #ifdef PAR RebuildGAtables(rtsTrue); #else reportDeadForeignObjs( sm->ForeignObjList, NULL, &(sm->ForeignObjList) ); #endif /* PAR */ /* TIDY UP AND RETURN */ sm->hp = hp_start = ToHp; /* Last allocated word */ resident = sm->hp - (appelInfo.space[appelInfo.semi_space].base - 1); DO_MAX_RESIDENCY(resident); /* stats only */ if (! RTSflags.GcFlags.allocAreaSizeGiven) { sm->hplim = appelInfo.space[appelInfo.semi_space].lim; free_space = sm->hplim - sm->hp; } else { sm->hplim = sm->hp + RTSflags.GcFlags.allocAreaSize; if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) { free_space = 0; } else { free_space = RTSflags.GcFlags.allocAreaSize; } } if (RTSflags.GcFlags.forceGC && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) { sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval; } if (RTSflags.GcFlags.giveStats) { char comment_str[BIG_STRING_LEN]; #if !defined(PAR) sprintf(comment_str, "%4lu %4ld %3ld %3ld %6lu %6lu %6lu 2s", (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1), bstk_roots, sm->rootno, caf_roots, extra_caf_words*sizeof(W_), (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1)*sizeof(W_), (W_) (SUBTRACT_B_STK(MAIN_SpB, stackInfo.botB) + 1)*sizeof(W_)); #else /* ToDo: come up with some interesting statistics for the parallel world */ sprintf(comment_str, "%4u %4ld %3ld %3ld %6lu %6lu %6lu 2s", 0, 0L, sm->rootno, caf_roots, extra_caf_words*sizeof(W_), 0L, 0L); #endif #if defined(PROFILING) if (interval_expired) { strcat(comment_str, " prof"); } #endif stat_endGC(alloc, RTSflags.GcFlags.heapSize, resident, comment_str); } else { stat_endGC(alloc, RTSflags.GcFlags.heapSize, resident, ""); } #if defined(PROFILING) || defined(PAR) if (interval_expired) { # if defined(PROFILING) heap_profile_done(); # endif report_cc_profiling(0 /*partial*/); } #endif /* PROFILING */ if (RTSflags.GcFlags.trace) fprintf(stderr, "Done: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n", appelInfo.semi_space, (W_) appelInfo.space[appelInfo.semi_space].base, (W_) appelInfo.space[appelInfo.semi_space].lim, (W_) sm->hp, (W_) sm->hplim, (W_) (free_space * sizeof(W_))); #ifdef DEBUG /* To help flush out bugs, we trash the part of the heap from which we're about to start allocating, and all of the space we just came from. */ { I_ old_space = NEXT_SEMI_SPACE(appelInfo.semi_space); TrashMem(appelInfo.space[old_space].base, appelInfo.space[old_space].lim); TrashMem(sm->hp+1, sm->hplim); } #endif /* DEBUG */ RESTORE_REGS(&ScavRegDump); /* Restore Registers */ if (free_space < RTSflags.GcFlags.minAllocAreaSize || free_space < reqsize) return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */ else { if (reqsize + sm->hardHpOverflowSize > free_space) { return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */ } else { return( GC_SUCCESS ); /* Heap OK */ } } } I_ collectHeap(reqsize, sm, do_full_collection) W_ reqsize; smInfo *sm; rtsBool do_full_collection; /* do a major collection regardless? */ { I_ bstk_roots, caf_roots, mutable, old_words; P_ old_start, mutptr, prevmut; P_ CAFptr, prevCAF; I_ alloc, /* Number of words allocated since last GC */ resident; /* Number of words remaining after GC */ fflush(stdout); /* Flush stdout at start of GC */ if (RTSflags.GcFlags.force2s) { return collect2s(reqsize, sm); } SAVE_REGS(&ScavRegDump); /* Save registers */ if (RTSflags.GcFlags.trace) fprintf(stderr, "Start: newbase 0x%lx, newlim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n", (W_) appelInfo.newbase, (W_) appelInfo.newlim, (W_) sm->hp, (W_) sm->hplim, reqsize * sizeof(W_)); alloc = sm->hp - hp_start; stat_startGC(alloc); allocd_since_last_major_GC += sm->hplim - hp_start; /* this is indeed supposed to be less precise than alloc above */ /* COPYING COLLECTION */ /* Set ToHp to end of old gen */ ToHp = appelInfo.oldlim; /* Set OldGen register so we only evacuate new gen closures */ OldGen = appelInfo.oldlim; /* FIRST: Evacuate and Scavenge CAFs and roots in the old generation */ old_start = ToHp; SetCAFInfoTables( sm->CAFlist ); DEBUG_STRING("Evacuate CAFs:"); caf_roots = 0; CAFptr = sm->CAFlist; prevCAF = ((P_)(&sm->CAFlist)) - FIXED_HS; /* see IND_CLOSURE_LINK */ while (CAFptr) { EVACUATE_CLOSURE(CAFptr); /* evac & upd OR return */ caf_roots++; prevCAF = CAFptr; CAFptr = (P_) IND_CLOSURE_LINK(CAFptr); } IND_CLOSURE_LINK(prevCAF) = (W_) appelInfo.OldCAFlist; appelInfo.OldCAFlist = sm->CAFlist; appelInfo.OldCAFno += caf_roots; sm->CAFlist = NULL; DEBUG_STRING("Evacuate Mutable Roots:"); mutable = 0; mutptr = sm->OldMutables; /* Clever, but completely illegal: */ prevmut = ((P_)&sm->OldMutables) - FIXED_HS; /* See MUT_LINK */ while ( mutptr ) { /* Scavenge the OldMutable */ P_ info = (P_) INFO_PTR(mutptr); StgScavPtr scav_code = SCAV_CODE(info); Scav = mutptr; (scav_code)(); /* Remove from OldMutables if no longer mutable */ if (!IS_MUTABLE(info)) { P_ tmp = mutptr; MUT_LINK(prevmut) = MUT_LINK(mutptr); mutptr = (P_) MUT_LINK(mutptr); MUT_LINK(tmp) = MUT_NOT_LINKED; } else { prevmut = mutptr; mutptr = (P_) MUT_LINK(mutptr); } mutable++; } #if 0 && defined(GRAN) { extern ex_RBH_q; closq prev_ptr, clos_ptr; DEBUG_STRING("Evacuate reverted RBHs:"); clos_ptr = ex_RBH_q; while ( clos_ptr ) { /* Scavenge the OldMutable */ P_ info = (P_) INFO_PTR(CLOS_CLOSURE(clos_ptr)); StgScavPtr scav_code = SCAV_CODE(info); Scav = CLOS_CLOSURE(clos_ptr); (scav_code)(); /* No mutable closure are put on the ex_RBH_q */ /* ASSERT(IS_MUTABLE(info)); */ prev_ptr = clos_ptr; clos_ptr = CLOS_NEXT(clos_ptr); free(prev_ptr); } ex_RBH_q = NULL; } #endif /* GRAN */ #ifdef PAR EvacuateLocalGAs(rtsFalse); #else /* evacSPTable( sm ); SP table is now in sm->roots*/ #endif /* PAR */ DEBUG_STRING("Scavenge evacuated old generation roots:"); Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */ Scavenge(); old_words = ToHp - old_start; /* PROMOTE closures rooted in the old generation and reset list of old gen roots */ appelInfo.oldlim = ToHp; /* SECOND: Evacuate and scavenge remaining roots These may already have been evacuated -- just get new address */ EvacuateRoots( sm->roots, sm->rootno ); #if defined(GRAN) EvacuateEvents(); #endif #if defined(CONCURRENT) EvacuateSparks(); #endif #if !defined(PAR) EvacuateAStack( MAIN_SpA, stackInfo.botA ); EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots ); /* ToDo: Optimisation which squeezes out garbage update frames */ #endif /* PAR */ Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */ Scavenge(); appelInfo.oldlim = ToHp; /* record newly promoted mutuple roots */ MUT_LINK(prevmut) = (W_) appelInfo.PromMutables; appelInfo.PromMutables = 0; /* set new generation base, if not fixed */ if (! appelInfo.newfixed) { appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2); } #ifdef PAR RebuildGAtables(rtsFalse); #else reportDeadForeignObjs(sm->ForeignObjList, sm->OldForeignObjList, &(sm->OldForeignObjList)); sm->ForeignObjList = NULL; /* all (new) ForeignObjs have been promoted */ #endif /* PAR */ resident = appelInfo.oldlim - sm->OldLim; /* DONT_DO_MAX_RESIDENCY -- it is just a minor collection */ if (RTSflags.GcFlags.giveStats) { char minor_str[BIG_STRING_LEN]; #if !defined(PAR) sprintf(minor_str, "%4lu %4ld %3ld %3ld %4ld Minor", (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1), bstk_roots, sm->rootno, caf_roots, mutable); /* oldnew_roots, old_words */ #else /* ToDo: come up with some interesting statistics for the parallel world */ sprintf(minor_str, "%4u %4ld %3ld %3ld %4ld Minor", 0, 0L, sm->rootno, caf_roots, mutable); #endif stat_endGC(alloc, alloc, resident, minor_str); } else { stat_endGC(alloc, alloc, resident, ""); } /* Note: if do_full_collection we want to force a full collection. [ADR] */ if (RTSflags.GcFlags.forceGC && allocd_since_last_major_GC >= RTSflags.GcFlags.forcingInterval) { do_full_collection = 1; } if ((appelInfo.oldlim < appelInfo.oldthresh) && (reqsize + sm->hardHpOverflowSize <= appelInfo.newlim - appelInfo.newbase) && (! do_full_collection) ) { sm->hp = hp_start = appelInfo.newbase - 1; sm->hplim = appelInfo.newlim; if (RTSflags.GcFlags.forceGC && (allocd_since_last_major_GC + (sm->hplim - hp_start) > RTSflags.GcFlags.forcingInterval)) { sm->hplim = sm->hp + (RTSflags.GcFlags.forcingInterval - allocd_since_last_major_GC); } sm->OldLim = appelInfo.oldlim; if (RTSflags.GcFlags.trace) { fprintf(stderr, "Minor: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n", (W_) appelInfo.newbase, (W_) appelInfo.newlim, (W_) appelInfo.oldbase, (W_) appelInfo.oldlim, (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax, (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_)); } #ifdef DEBUG /* To help flush out bugs, we trash the part of the heap from which we're about to start allocating. */ TrashMem(sm->hp+1, sm->hplim); #endif /* DEBUG */ RESTORE_REGS(&ScavRegDump); /* Restore Registers */ return GC_SUCCESS; /* Heap OK -- Enough space to continue */ } DEBUG_STRING("Major Collection Required"); allocd_since_last_major_GC = 0; stat_startGC(0); alloc = (appelInfo.oldlim - appelInfo.oldbase) + 1; appelInfo.bit_words = (alloc + BITS_IN(BitWord) - 1) / BITS_IN(BitWord); appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words; /* For some reason, this doesn't seem to use the last allocatable word at appelInfo.newlim */ if (appelInfo.bits <= appelInfo.oldlim) { fprintf(stderr, "APPEL Major: Not enough space for bit vector\n"); return GC_HARD_LIMIT_EXCEEDED; } /* Zero bit vector for marking phase of major collection */ { BitWord *ptr = appelInfo.bits, *end = appelInfo.bits + appelInfo.bit_words; while (ptr < end) { *(ptr++) = 0; }; } #ifdef HAVE_VADVISE vadvise(VA_ANOM); #endif /* bracket use of MARK_REG_MAP with RESTORE/SAVE of SCAV_REG_MAP */ RESTORE_REGS(&ScavRegDump); markHeapRoots(sm, appelInfo.OldCAFlist, NULL, appelInfo.oldbase, appelInfo.oldlim, appelInfo.bits); SAVE_REGS(&ScavRegDump); /* end of bracket */ #ifndef PAR sweepUpDeadForeignObjs(sm->OldForeignObjList, appelInfo.oldbase, appelInfo.bits ); #endif /* !PAR */ /* Reset OldMutables -- this will be reconstructed during scan */ sm->OldMutables = 0; LinkCAFs(appelInfo.OldCAFlist); LinkRoots( sm->roots, sm->rootno ); #if defined(GRAN) LinkEvents(); #endif #if defined(CONCURRENT) LinkSparks(); #endif #ifdef PAR LinkLiveGAs(appelInfo.oldbase, appelInfo.bits); #else /* stable pointers now included in sm->roots -- SOF DEBUG_STRING("Linking Stable Pointer Table:"); LINK_LOCATION_TO_CLOSURE(&sm->StablePointerTable); */ LinkAStack( MAIN_SpA, stackInfo.botA ); LinkBStack( MAIN_SuB, stackInfo.botB ); #endif /* Do Inplace Compaction */ /* Returns start of next closure, -1 gives last allocated word */ appelInfo.oldlim = Inplace_Compaction(appelInfo.oldbase, appelInfo.oldlim, 0, 0, appelInfo.bits, appelInfo.bit_words #ifndef PAR ,&(sm->OldForeignObjList) #endif ) - 1; appelInfo.oldlast = appelInfo.oldlim; resident = (appelInfo.oldlim - appelInfo.oldbase) + 1; DO_MAX_RESIDENCY(resident); /* stats only */ /* set new generation base, if not fixed */ if (! appelInfo.newfixed) { appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2); } /* set major threshold, if not fixed */ /* next major collection when old gen occupies 2/3rds of the free space or exceeds oldmax */ if (! RTSflags.GcFlags.specifiedOldGenSize) { appelInfo.oldthresh = appelInfo.oldlim + (appelInfo.newlim - appelInfo.oldlim) * 2 / 3; if (appelInfo.oldthresh > appelInfo.oldmax) appelInfo.oldthresh = appelInfo.oldmax; } sm->hp = hp_start = appelInfo.newbase - 1; sm->hplim = appelInfo.newlim; if (RTSflags.GcFlags.forceGC && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) { sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval; } sm->OldLim = appelInfo.oldlim; #ifdef HAVE_VADVISE vadvise(VA_NORM); #endif if (RTSflags.GcFlags.giveStats) { char major_str[BIG_STRING_LEN]; #if !defined(PAR) sprintf(major_str, "%4lu %4ld %3ld %3ld %4d %4d *Major* %4.1f%%", (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1), bstk_roots, sm->rootno, appelInfo.OldCAFno, 0, 0, resident / (StgDouble) RTSflags.GcFlags.heapSize * 100); #else /* ToDo: come up with some interesting statistics for the parallel world */ sprintf(major_str, "%4u %4ld %3ld %3ld %4d %4d *Major* %4.1f%%", 0, 0L, sm->rootno, appelInfo.OldCAFno, 0, 0, resident / (StgDouble) RTSflags.GcFlags.heapSize * 100); #endif stat_endGC(0, alloc, resident, major_str); } else { stat_endGC(0, alloc, resident, ""); } if (RTSflags.GcFlags.trace) { fprintf(stderr, "Major: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n", (W_) appelInfo.newbase, (W_) appelInfo.newlim, (W_) appelInfo.oldbase, (W_) appelInfo.oldlim, (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax, (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_)); } #ifdef DEBUG /* To help flush out bugs, we trash the part of the heap from which we're about to start allocating. */ TrashMem(sm->hp+1, sm->hplim); #endif /* DEBUG */ RESTORE_REGS(&ScavRegDump); /* Restore Registers */ if ((appelInfo.oldlim > appelInfo.oldmax) || (reqsize > sm->hplim - sm->hp) ) { return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */ } else if (reqsize + sm->hardHpOverflowSize > sm->hplim - sm->hp) { return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */ } else { return( GC_SUCCESS ); /* Heap OK */ /* linked = IS_MUTABLE(INFO_PTR(closure)) && MUT_LINK(closure) != MUT_NOT_LINKED; */ } } #endif /* GCap */ \end{code}