1 ***************************************************************************
3 APPEL'S GARBAGE COLLECTION
5 Global heap requirements as for 1s and 2s collectors.
6 ++ All closures in the old generation that are updated must be
7 updated with indirections and placed on the linked list of
8 updated old generation closures.
10 ***************************************************************************
16 #include "SMinternal.h"
17 #include "SMcopying.h"
18 #include "SMcompacting.h"
23 appelData appelInfo = {0, 0, 0, 0, 0,
24 0, 0, 0, 0, 0, 0, 0, 0, 0,
28 P_ heap_space = 0; /* Address of first word of slab
29 of memory allocated for heap */
31 P_ hp_start; /* Value of Hp when reduction was resumed */
33 static I_ allocd_since_last_major_GC = 0;
34 /* words alloced since last major GC; used when forcing GC */
38 debug_look_for (start, stop, villain)
39 P_ start, stop, villain;
42 for (i = start; i <= stop; i++) {
43 if ( (P_) *i == villain ) {
44 fprintf(stderr, "* %x : %x\n", i, villain);
53 if (heap_space == 0) { /* allocates if it doesn't already exist */
55 /* Allocate the roots space */
56 sm->roots = (P_ *) stgMallocWords(SM_MAXROOTS, "initHeap (roots)");
58 /* Allocate the heap */
59 heap_space = (P_) stgMallocWords(RTSflags.GcFlags.heapSize + EXTRA_HEAP_WORDS,
62 /* ToDo (ADR): trash entire heap contents */
64 if (RTSflags.GcFlags.force2s) {
65 stat_init("TWOSPACE(APPEL)",
66 " No of Roots Caf Caf Astk Bstk",
67 "Astk Bstk Reg No bytes bytes bytes");
70 " No of Roots Caf Mut- Old Collec Resid",
71 "Astk Bstk Reg No able Gen tion %heap");
74 sm->hardHpOverflowSize = 0;
76 if (RTSflags.GcFlags.force2s) {
77 I_ semi_space_words = RTSflags.GcFlags.heapSize / 2;
78 appelInfo.space[0].base = HEAP_FRAME_BASE(heap_space, semi_space_words);
79 appelInfo.space[1].base = HEAP_FRAME_BASE(heap_space + semi_space_words, semi_space_words);
80 appelInfo.space[0].lim = HEAP_FRAME_LIMIT(heap_space, semi_space_words);
81 appelInfo.space[1].lim = HEAP_FRAME_LIMIT(heap_space + semi_space_words, semi_space_words);
82 appelInfo.semi_space = 0;
83 appelInfo.oldlim = heap_space - 1; /* Never in old generation */
85 sm->hp = hp_start = appelInfo.space[appelInfo.semi_space].base - 1;
87 if (! RTSflags.GcFlags.allocAreaSizeGiven) {
88 sm->hplim = appelInfo.space[appelInfo.semi_space].lim;
90 sm->hplim = sm->hp + RTSflags.GcFlags.allocAreaSize;
92 RTSflags.GcFlags.minAllocAreaSize = 0; /* specified size takes precedence */
94 if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) {
95 fprintf(stderr, "Not enough heap for requested alloc size\n");
100 if (RTSflags.GcFlags.forceGC) {
101 if (sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
102 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
104 /* no point in forcing GC,
105 as the semi-space is smaller than forcingInterval */
106 RTSflags.GcFlags.forceGC = rtsFalse;
110 sm->OldLim = appelInfo.oldlim;
114 initExtensions( sm );
117 if (RTSflags.GcFlags.trace) {
118 fprintf(stderr, "APPEL(2s) Heap: 0x%lx .. 0x%lx\n",
119 (W_) heap_space, (W_) (heap_space - 1 + RTSflags.GcFlags.heapSize));
120 fprintf(stderr, "Initial: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
121 appelInfo.semi_space,
122 (W_) appelInfo.space[appelInfo.semi_space].base,
123 (W_) appelInfo.space[appelInfo.semi_space].lim,
124 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
130 /* So not forced 2s */
132 appelInfo.newlim = heap_space + RTSflags.GcFlags.heapSize - 1;
133 if (RTSflags.GcFlags.allocAreaSizeGiven) {
134 appelInfo.newfixed = RTSflags.GcFlags.allocAreaSize;
135 appelInfo.newmin = RTSflags.GcFlags.allocAreaSize;
136 appelInfo.newbase = heap_space + RTSflags.GcFlags.heapSize - appelInfo.newfixed;
138 appelInfo.newfixed = 0;
139 appelInfo.newmin = RTSflags.GcFlags.minAllocAreaSize;
140 appelInfo.newbase = heap_space + (RTSflags.GcFlags.heapSize / 2);
143 appelInfo.oldbase = heap_space;
144 appelInfo.oldlim = heap_space - 1;
145 appelInfo.oldlast = heap_space - 1;
146 appelInfo.oldmax = heap_space - 1 + RTSflags.GcFlags.heapSize - 2*appelInfo.newmin;
148 if (appelInfo.oldbase > appelInfo.oldmax) {
149 fprintf(stderr, "Not enough heap for requested/minimum allocation area\n");
150 fprintf(stderr, "heap_space=%ld\n", (W_) heap_space);
151 fprintf(stderr, "heapSize=%ld\n", RTSflags.GcFlags.heapSize);
152 fprintf(stderr, "newmin=%ld\n", appelInfo.newmin);
156 appelInfo.bit_words = (RTSflags.GcFlags.heapSize + BITS_IN(BitWord) - 1) / BITS_IN(BitWord);
157 appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words;
159 if (appelInfo.bit_words > appelInfo.newmin)
160 appelInfo.oldmax = heap_space - 1 + RTSflags.GcFlags.heapSize - appelInfo.bit_words - appelInfo.newmin;
162 if (RTSflags.GcFlags.specifiedOldGenSize) {
163 appelInfo.oldthresh = heap_space -1 + RTSflags.GcFlags.specifiedOldGenSize;
164 if (appelInfo.oldthresh > appelInfo.oldmax) {
165 fprintf(stderr, "Not enough heap for requested major resid size\n");
169 appelInfo.oldthresh = heap_space + RTSflags.GcFlags.heapSize * 2 / 3; /* Initial threshold -- 2/3rds */
170 if (appelInfo.oldthresh > appelInfo.oldmax)
171 appelInfo.oldthresh = appelInfo.oldmax;
174 sm->hp = hp_start = appelInfo.newbase - 1;
175 sm->hplim = appelInfo.newlim;
177 if (RTSflags.GcFlags.forceGC
178 && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
179 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
182 sm->OldLim = appelInfo.oldlim;
185 appelInfo.OldCAFlist = NULL;
186 appelInfo.OldCAFno = 0;
189 initExtensions( sm );
192 appelInfo.PromMutables = 0;
194 if (RTSflags.GcFlags.trace) {
195 fprintf(stderr, "APPEL Heap: 0x%lx .. 0x%lx\n",
196 (W_) heap_space, (W_) (heap_space - 1 + RTSflags.GcFlags.heapSize));
197 fprintf(stderr, "Initial: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx\n",
198 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
199 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
200 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
201 (W_) sm->hp, (W_) sm->hplim);
204 return rtsTrue; /* OK */
208 collect2s(W_ reqsize, smInfo *sm)
210 I_ free_space, /* No of words of free space following GC */
211 alloc, /* Number of words allocated since last GC */
212 resident, /* Number of words remaining after GC */
213 extra_caf_words,/* Extra words referenced from CAFs */
214 caf_roots, /* Number of CAFs */
215 bstk_roots; /* Number of update frames in B stack */
217 SAVE_REGS(&ScavRegDump); /* Save registers */
219 #if defined(PROFILING)
220 if (interval_expired) { heap_profile_setup(); }
221 #endif /* PROFILING */
223 if (RTSflags.GcFlags.trace)
224 fprintf(stderr, "Start: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n",
225 appelInfo.semi_space,
226 (W_) appelInfo.space[appelInfo.semi_space].base,
227 (W_) appelInfo.space[appelInfo.semi_space].lim,
228 (W_) sm->hp, (W_) sm->hplim, (W_) (reqsize * sizeof(W_)));
230 alloc = sm->hp - hp_start;
233 appelInfo.semi_space = NEXT_SEMI_SPACE(appelInfo.semi_space);
234 ToHp = appelInfo.space[appelInfo.semi_space].base - 1;
235 Scav = appelInfo.space[appelInfo.semi_space].base;
236 OldGen = sm->OldLim; /* always evac ! */
238 SetCAFInfoTables( sm->CAFlist );
240 EvacuateLocalGAs(rtsTrue);
242 /* evacSPTable( sm ); StablePointerTable now accessable in sm->roots SOF 4/96 */
244 EvacuateRoots( sm->roots, sm->rootno );
248 #if defined(CONCURRENT)
252 EvacuateAStack( MAIN_SpA, stackInfo.botA );
253 EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
258 EvacAndScavengeCAFs( sm->CAFlist, &extra_caf_words, &caf_roots );
261 RebuildGAtables(rtsTrue);
263 reportDeadForeignObjs( sm->ForeignObjList, NULL, &(sm->ForeignObjList));
266 /* TIDY UP AND RETURN */
268 sm->hp = hp_start = ToHp; /* Last allocated word */
270 resident = sm->hp - (appelInfo.space[appelInfo.semi_space].base - 1);
271 DO_MAX_RESIDENCY(resident); /* stats only */
273 if (! RTSflags.GcFlags.allocAreaSizeGiven) {
274 sm->hplim = appelInfo.space[appelInfo.semi_space].lim;
275 free_space = sm->hplim - sm->hp;
277 sm->hplim = sm->hp + RTSflags.GcFlags.allocAreaSize;
278 if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) {
281 free_space = RTSflags.GcFlags.allocAreaSize;
285 if (RTSflags.GcFlags.forceGC
286 && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
287 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
290 if (RTSflags.GcFlags.giveStats) {
291 char comment_str[BIG_STRING_LEN];
293 sprintf(comment_str, "%4lu %4ld %3ld %3ld %6lu %6lu %6lu 2s",
294 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
295 bstk_roots, sm->rootno,
296 caf_roots, extra_caf_words*sizeof(W_),
297 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1)*sizeof(W_),
298 (W_) (SUBTRACT_B_STK(MAIN_SpB, stackInfo.botB) + 1)*sizeof(W_));
300 /* ToDo: come up with some interesting statistics for the parallel world */
301 sprintf(comment_str, "%4u %4ld %3ld %3ld %6lu %6lu %6lu 2s",
302 0, 0L, sm->rootno, caf_roots, extra_caf_words*sizeof(W_), 0L, 0L);
306 #if defined(PROFILING)
307 if (interval_expired) { strcat(comment_str, " prof"); }
310 stat_endGC(alloc, RTSflags.GcFlags.heapSize, resident, comment_str);
312 stat_endGC(alloc, RTSflags.GcFlags.heapSize, resident, "");
315 #if defined(PROFILING) || defined(PAR)
316 if (interval_expired) {
317 # if defined(PROFILING)
320 report_cc_profiling(0 /*partial*/);
322 #endif /* PROFILING */
324 if (RTSflags.GcFlags.trace)
325 fprintf(stderr, "Done: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
326 appelInfo.semi_space,
327 (W_) appelInfo.space[appelInfo.semi_space].base,
328 (W_) appelInfo.space[appelInfo.semi_space].lim,
329 (W_) sm->hp, (W_) sm->hplim, (W_) (free_space * sizeof(W_)));
332 /* To help flush out bugs, we trash the part of the heap from
333 which we're about to start allocating, and all of the space
334 we just came from. */
336 I_ old_space = NEXT_SEMI_SPACE(appelInfo.semi_space);
338 TrashMem(appelInfo.space[old_space].base, appelInfo.space[old_space].lim);
339 TrashMem(sm->hp+1, sm->hplim);
343 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
345 if (free_space < RTSflags.GcFlags.minAllocAreaSize || free_space < reqsize)
346 return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */
348 if (reqsize + sm->hardHpOverflowSize > free_space) {
349 return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */
351 return( GC_SUCCESS ); /* Heap OK */
358 collectHeap(reqsize, sm, do_full_collection)
361 rtsBool do_full_collection; /* do a major collection regardless? */
363 I_ bstk_roots, caf_roots, mutable, old_words;
364 P_ old_start, mutptr, prevmut;
367 I_ alloc, /* Number of words allocated since last GC */
368 resident; /* Number of words remaining after GC */
370 fflush(stdout); /* Flush stdout at start of GC */
372 if (RTSflags.GcFlags.force2s) {
373 return collect2s(reqsize, sm);
376 SAVE_REGS(&ScavRegDump); /* Save registers */
378 if (RTSflags.GcFlags.trace)
379 fprintf(stderr, "Start: newbase 0x%lx, newlim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n",
380 (W_) appelInfo.newbase, (W_) appelInfo.newlim, (W_) sm->hp, (W_) sm->hplim, reqsize * sizeof(W_));
382 alloc = sm->hp - hp_start;
385 allocd_since_last_major_GC += sm->hplim - hp_start;
386 /* this is indeed supposed to be less precise than alloc above */
388 /* COPYING COLLECTION */
390 /* Set ToHp to end of old gen */
391 ToHp = appelInfo.oldlim;
393 /* Set OldGen register so we only evacuate new gen closures */
394 OldGen = appelInfo.oldlim;
396 /* FIRST: Evacuate and Scavenge CAFs and roots in the old generation */
399 SetCAFInfoTables( sm->CAFlist );
401 DEBUG_STRING("Evacuate CAFs:");
403 CAFptr = sm->CAFlist;
404 prevCAF = ((P_)(&sm->CAFlist)) - FIXED_HS; /* see IND_CLOSURE_LINK */
406 EVACUATE_CLOSURE(CAFptr); /* evac & upd OR return */
409 CAFptr = (P_) IND_CLOSURE_LINK(CAFptr);
411 IND_CLOSURE_LINK(prevCAF) = (W_) appelInfo.OldCAFlist;
412 appelInfo.OldCAFlist = sm->CAFlist;
413 appelInfo.OldCAFno += caf_roots;
416 DEBUG_STRING("Evacuate Mutable Roots:");
418 mutptr = sm->OldMutables;
419 /* Clever, but completely illegal: */
420 prevmut = ((P_)&sm->OldMutables) - FIXED_HS;
424 /* Scavenge the OldMutable */
425 P_ info = (P_) INFO_PTR(mutptr);
426 StgScavPtr scav_code = SCAV_CODE(info);
430 /* Remove from OldMutables if no longer mutable */
431 if (!IS_MUTABLE(info)) {
433 MUT_LINK(prevmut) = MUT_LINK(mutptr);
434 mutptr = (P_) MUT_LINK(mutptr);
435 MUT_LINK(tmp) = MUT_NOT_LINKED;
438 mutptr = (P_) MUT_LINK(mutptr);
444 #if 0 && defined(GRAN)
447 closq prev_ptr, clos_ptr;
449 DEBUG_STRING("Evacuate reverted RBHs:");
453 /* Scavenge the OldMutable */
454 P_ info = (P_) INFO_PTR(CLOS_CLOSURE(clos_ptr));
455 StgScavPtr scav_code = SCAV_CODE(info);
456 Scav = CLOS_CLOSURE(clos_ptr);
459 /* No mutable closure are put on the ex_RBH_q */
460 /* ASSERT(IS_MUTABLE(info)); */
462 clos_ptr = CLOS_NEXT(clos_ptr);
470 EvacuateLocalGAs(rtsFalse);
472 /* evacSPTable( sm ); SP table is now in sm->roots*/
475 DEBUG_STRING("Scavenge evacuated old generation roots:");
477 Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */
481 old_words = ToHp - old_start;
483 /* PROMOTE closures rooted in the old generation and reset list of old gen roots */
485 appelInfo.oldlim = ToHp;
487 /* SECOND: Evacuate and scavenge remaining roots
488 These may already have been evacuated -- just get new address
491 EvacuateRoots( sm->roots, sm->rootno );
496 #if defined(CONCURRENT)
500 EvacuateAStack( MAIN_SpA, stackInfo.botA );
501 EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
502 /* ToDo: Optimisation which squeezes out garbage update frames */
505 Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */
509 appelInfo.oldlim = ToHp;
511 /* record newly promoted mutuple roots */
512 MUT_LINK(prevmut) = (W_) appelInfo.PromMutables;
513 appelInfo.PromMutables = 0;
515 /* set new generation base, if not fixed */
516 if (! appelInfo.newfixed) {
517 appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2);
521 RebuildGAtables(rtsFalse);
523 reportDeadForeignObjs(sm->ForeignObjList,
524 sm->OldForeignObjList,
525 &(sm->OldForeignObjList));
526 sm->ForeignObjList = NULL; /* all (new) ForeignObjs have been promoted */
529 resident = appelInfo.oldlim - sm->OldLim;
530 /* DONT_DO_MAX_RESIDENCY -- it is just a minor collection */
532 if (RTSflags.GcFlags.giveStats) {
533 char minor_str[BIG_STRING_LEN];
535 sprintf(minor_str, "%4lu %4ld %3ld %3ld %4ld Minor",
536 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
537 bstk_roots, sm->rootno, caf_roots, mutable); /* oldnew_roots, old_words */
539 /* ToDo: come up with some interesting statistics for the parallel world */
540 sprintf(minor_str, "%4u %4ld %3ld %3ld %4ld Minor",
541 0, 0L, sm->rootno, caf_roots, mutable);
543 stat_endGC(alloc, alloc, resident, minor_str);
545 stat_endGC(alloc, alloc, resident, "");
548 /* Note: if do_full_collection we want to force a full collection. [ADR] */
550 if (RTSflags.GcFlags.forceGC
551 && allocd_since_last_major_GC >= RTSflags.GcFlags.forcingInterval) {
552 do_full_collection = 1;
555 if ((appelInfo.oldlim < appelInfo.oldthresh) &&
556 (reqsize + sm->hardHpOverflowSize <= appelInfo.newlim - appelInfo.newbase) &&
557 (! do_full_collection) ) {
559 sm->hp = hp_start = appelInfo.newbase - 1;
560 sm->hplim = appelInfo.newlim;
562 if (RTSflags.GcFlags.forceGC
563 && (allocd_since_last_major_GC + (sm->hplim - hp_start) > RTSflags.GcFlags.forcingInterval)) {
564 sm->hplim = sm->hp + (RTSflags.GcFlags.forcingInterval - allocd_since_last_major_GC);
567 sm->OldLim = appelInfo.oldlim;
569 if (RTSflags.GcFlags.trace) {
570 fprintf(stderr, "Minor: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
571 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
572 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
573 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
574 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
578 /* To help flush out bugs, we trash the part of the heap from
579 which we're about to start allocating. */
580 TrashMem(sm->hp+1, sm->hplim);
583 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
585 return GC_SUCCESS; /* Heap OK -- Enough space to continue */
588 DEBUG_STRING("Major Collection Required");
590 allocd_since_last_major_GC = 0;
594 alloc = (appelInfo.oldlim - appelInfo.oldbase) + 1;
596 appelInfo.bit_words = (alloc + BITS_IN(BitWord) - 1) / BITS_IN(BitWord);
597 appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words;
598 /* For some reason, this doesn't seem to use the last
599 allocatable word at appelInfo.newlim */
601 if (appelInfo.bits <= appelInfo.oldlim) {
602 fprintf(stderr, "APPEL Major: Not enough space for bit vector\n");
603 return GC_HARD_LIMIT_EXCEEDED;
606 /* Zero bit vector for marking phase of major collection */
607 { BitWord *ptr = appelInfo.bits,
608 *end = appelInfo.bits + appelInfo.bit_words;
609 while (ptr < end) { *(ptr++) = 0; };
616 /* bracket use of MARK_REG_MAP with RESTORE/SAVE of SCAV_REG_MAP */
617 RESTORE_REGS(&ScavRegDump);
620 appelInfo.OldCAFlist,
626 SAVE_REGS(&ScavRegDump);
630 sweepUpDeadForeignObjs(sm->OldForeignObjList,
636 /* Reset OldMutables -- this will be reconstructed during scan */
639 LinkCAFs(appelInfo.OldCAFlist);
641 LinkRoots( sm->roots, sm->rootno );
645 #if defined(CONCURRENT)
649 LinkLiveGAs(appelInfo.oldbase, appelInfo.bits);
651 /* stable pointers now included in sm->roots -- SOF
652 DEBUG_STRING("Linking Stable Pointer Table:");
653 LINK_LOCATION_TO_CLOSURE(&sm->StablePointerTable);
655 LinkAStack( MAIN_SpA, stackInfo.botA );
656 LinkBStack( MAIN_SuB, stackInfo.botB );
659 /* Do Inplace Compaction */
660 /* Returns start of next closure, -1 gives last allocated word */
662 appelInfo.oldlim = Inplace_Compaction(appelInfo.oldbase,
668 ,&(sm->OldForeignObjList)
672 appelInfo.oldlast = appelInfo.oldlim;
673 resident = (appelInfo.oldlim - appelInfo.oldbase) + 1;
674 DO_MAX_RESIDENCY(resident); /* stats only */
676 /* set new generation base, if not fixed */
677 if (! appelInfo.newfixed) {
678 appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2);
681 /* set major threshold, if not fixed */
682 /* next major collection when old gen occupies 2/3rds of the free space or exceeds oldmax */
683 if (! RTSflags.GcFlags.specifiedOldGenSize) {
684 appelInfo.oldthresh = appelInfo.oldlim + (appelInfo.newlim - appelInfo.oldlim) * 2 / 3;
685 if (appelInfo.oldthresh > appelInfo.oldmax)
686 appelInfo.oldthresh = appelInfo.oldmax;
689 sm->hp = hp_start = appelInfo.newbase - 1;
690 sm->hplim = appelInfo.newlim;
692 if (RTSflags.GcFlags.forceGC
693 && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
694 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
697 sm->OldLim = appelInfo.oldlim;
703 if (RTSflags.GcFlags.giveStats) {
704 char major_str[BIG_STRING_LEN];
706 sprintf(major_str, "%4lu %4ld %3ld %3ld %4d %4d *Major* %4.1f%%",
707 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
708 bstk_roots, sm->rootno, appelInfo.OldCAFno,
709 0, 0, resident / (StgDouble) RTSflags.GcFlags.heapSize * 100);
711 /* ToDo: come up with some interesting statistics for the parallel world */
712 sprintf(major_str, "%4u %4ld %3ld %3ld %4d %4d *Major* %4.1f%%",
713 0, 0L, sm->rootno, appelInfo.OldCAFno, 0, 0,
714 resident / (StgDouble) RTSflags.GcFlags.heapSize * 100);
717 stat_endGC(0, alloc, resident, major_str);
719 stat_endGC(0, alloc, resident, "");
722 if (RTSflags.GcFlags.trace) {
723 fprintf(stderr, "Major: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
724 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
725 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
726 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
727 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
731 /* To help flush out bugs, we trash the part of the heap from
732 which we're about to start allocating. */
733 TrashMem(sm->hp+1, sm->hplim);
736 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
738 if ((appelInfo.oldlim > appelInfo.oldmax)
739 || (reqsize > sm->hplim - sm->hp) ) {
740 return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */
741 } else if (reqsize + sm->hardHpOverflowSize > sm->hplim - sm->hp) {
742 return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */
744 return( GC_SUCCESS ); /* Heap OK */
745 /* linked = IS_MUTABLE(INFO_PTR(closure)) && MUT_LINK(closure) !=