1 ***************************************************************************
3 APPEL'S GARBAGE COLLECTION
5 Global heap requirements as for 1s and 2s collectors.
6 ++ All closures in the old generation that are updated must be
7 updated with indirections and placed on the linked list of
8 updated old generation closures.
10 ***************************************************************************
16 #include "SMinternal.h"
17 #include "SMcopying.h"
18 #include "SMcompacting.h"
23 appelData appelInfo = {0, 0, 0, 0, 0,
24 0, 0, 0, 0, 0, 0, 0, 0, 0,
28 P_ heap_space = 0; /* Address of first word of slab
29 of memory allocated for heap */
31 P_ hp_start; /* Value of Hp when reduction was resumed */
33 static I_ allocd_since_last_major_GC = 0;
34 /* words alloced since last major GC; used when forcing GC */
38 debug_look_for (start, stop, villain)
39 P_ start, stop, villain;
42 for (i = start; i <= stop; i++) {
43 if ( (P_) *i == villain ) {
44 fprintf(stderr, "* %x : %x\n", i, villain);
53 if (heap_space == 0) { /* allocates if it doesn't already exist */
55 /* Allocate the roots space */
56 sm->roots = (P_ *) stgMallocWords(SM_MAXROOTS, "initHeap (roots)");
58 /* Allocate the heap */
59 heap_space = (P_) stgMallocWords(RTSflags.GcFlags.heapSize + EXTRA_HEAP_WORDS,
62 /* ToDo (ADR): trash entire heap contents */
64 if (RTSflags.GcFlags.force2s) {
65 stat_init("TWOSPACE(APPEL)",
66 " No of Roots Caf Caf Astk Bstk",
67 "Astk Bstk Reg No bytes bytes bytes");
70 " No of Roots Caf Mut- Old Collec Resid",
71 "Astk Bstk Reg No able Gen tion %heap");
74 sm->hardHpOverflowSize = 0;
76 if (RTSflags.GcFlags.force2s) {
77 I_ semi_space_words = RTSflags.GcFlags.heapSize / 2;
78 appelInfo.space[0].base = HEAP_FRAME_BASE(heap_space, semi_space_words);
79 appelInfo.space[1].base = HEAP_FRAME_BASE(heap_space + semi_space_words, semi_space_words);
80 appelInfo.space[0].lim = HEAP_FRAME_LIMIT(heap_space, semi_space_words);
81 appelInfo.space[1].lim = HEAP_FRAME_LIMIT(heap_space + semi_space_words, semi_space_words);
82 appelInfo.semi_space = 0;
83 appelInfo.oldlim = heap_space - 1; /* Never in old generation */
85 sm->hp = hp_start = appelInfo.space[appelInfo.semi_space].base - 1;
87 if (! RTSflags.GcFlags.allocAreaSizeGiven) {
88 sm->hplim = appelInfo.space[appelInfo.semi_space].lim;
90 sm->hplim = sm->hp + RTSflags.GcFlags.allocAreaSize;
92 RTSflags.GcFlags.minAllocAreaSize = 0; /* specified size takes precedence */
94 if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) {
95 fprintf(stderr, "Not enough heap for requested alloc size\n");
100 if (RTSflags.GcFlags.forceGC) {
101 if (sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
102 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
104 /* no point in forcing GC,
105 as the semi-space is smaller than forcingInterval */
106 RTSflags.GcFlags.forceGC = rtsFalse;
110 sm->OldLim = appelInfo.oldlim;
114 initExtensions( sm );
117 if (RTSflags.GcFlags.trace) {
118 fprintf(stderr, "APPEL(2s) Heap: 0x%lx .. 0x%lx\n",
119 (W_) heap_space, (W_) (heap_space - 1 + RTSflags.GcFlags.heapSize));
120 fprintf(stderr, "Initial: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
121 appelInfo.semi_space,
122 (W_) appelInfo.space[appelInfo.semi_space].base,
123 (W_) appelInfo.space[appelInfo.semi_space].lim,
124 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
130 /* So not forced 2s */
132 appelInfo.newlim = heap_space + RTSflags.GcFlags.heapSize - 1;
133 if (RTSflags.GcFlags.allocAreaSizeGiven) {
134 appelInfo.newfixed = RTSflags.GcFlags.allocAreaSize;
135 appelInfo.newmin = RTSflags.GcFlags.allocAreaSize;
136 appelInfo.newbase = heap_space + RTSflags.GcFlags.heapSize - appelInfo.newfixed;
138 appelInfo.newfixed = 0;
139 appelInfo.newmin = RTSflags.GcFlags.minAllocAreaSize;
140 appelInfo.newbase = heap_space + (RTSflags.GcFlags.heapSize / 2);
143 appelInfo.oldbase = heap_space;
144 appelInfo.oldlim = heap_space - 1;
145 appelInfo.oldlast = heap_space - 1;
146 appelInfo.oldmax = heap_space - 1 + RTSflags.GcFlags.heapSize - 2*appelInfo.newmin;
148 if (appelInfo.oldbase > appelInfo.oldmax) {
149 fprintf(stderr, "Not enough heap for requested/minimum allocation area\n");
150 fprintf(stderr, "heap_space=%ld\n", heap_space);
151 fprintf(stderr, "heapSize=%ld\n", RTSflags.GcFlags.heapSize);
152 fprintf(stderr, "newmin=%ld\n", appelInfo.newmin);
156 appelInfo.bit_words = (RTSflags.GcFlags.heapSize + BITS_IN(BitWord) - 1) / BITS_IN(BitWord);
157 appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words;
159 if (appelInfo.bit_words > appelInfo.newmin)
160 appelInfo.oldmax = heap_space - 1 + RTSflags.GcFlags.heapSize - appelInfo.bit_words - appelInfo.newmin;
162 if (RTSflags.GcFlags.specifiedOldGenSize) {
163 appelInfo.oldthresh = heap_space -1 + RTSflags.GcFlags.specifiedOldGenSize;
164 if (appelInfo.oldthresh > appelInfo.oldmax) {
165 fprintf(stderr, "Not enough heap for requested major resid size\n");
169 appelInfo.oldthresh = heap_space + RTSflags.GcFlags.heapSize * 2 / 3; /* Initial threshold -- 2/3rds */
170 if (appelInfo.oldthresh > appelInfo.oldmax)
171 appelInfo.oldthresh = appelInfo.oldmax;
174 sm->hp = hp_start = appelInfo.newbase - 1;
175 sm->hplim = appelInfo.newlim;
177 if (RTSflags.GcFlags.forceGC
178 && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
179 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
182 sm->OldLim = appelInfo.oldlim;
185 appelInfo.OldCAFlist = NULL;
186 appelInfo.OldCAFno = 0;
189 initExtensions( sm );
192 appelInfo.PromMutables = 0;
194 if (RTSflags.GcFlags.trace) {
195 fprintf(stderr, "APPEL Heap: 0x%lx .. 0x%lx\n",
196 (W_) heap_space, (W_) (heap_space - 1 + RTSflags.GcFlags.heapSize));
197 fprintf(stderr, "Initial: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx\n",
198 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
199 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
200 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
201 (W_) sm->hp, (W_) sm->hplim);
204 return rtsTrue; /* OK */
208 collect2s(W_ reqsize, smInfo *sm)
210 I_ free_space, /* No of words of free space following GC */
211 alloc, /* Number of words allocated since last GC */
212 resident, /* Number of words remaining after GC */
213 extra_caf_words,/* Extra words referenced from CAFs */
214 caf_roots, /* Number of CAFs */
215 bstk_roots; /* Number of update frames in B stack */
217 SAVE_REGS(&ScavRegDump); /* Save registers */
219 #if defined(PROFILING)
220 if (interval_expired) { heap_profile_setup(); }
221 #endif /* PROFILING */
223 if (RTSflags.GcFlags.trace)
224 fprintf(stderr, "Start: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n",
225 appelInfo.semi_space,
226 (W_) appelInfo.space[appelInfo.semi_space].base,
227 (W_) appelInfo.space[appelInfo.semi_space].lim,
228 (W_) sm->hp, (W_) sm->hplim, (W_) (reqsize * sizeof(W_)));
230 alloc = sm->hp - hp_start;
233 appelInfo.semi_space = NEXT_SEMI_SPACE(appelInfo.semi_space);
234 ToHp = appelInfo.space[appelInfo.semi_space].base - 1;
235 Scav = appelInfo.space[appelInfo.semi_space].base;
236 OldGen = sm->OldLim; /* always evac ! */
238 SetCAFInfoTables( sm->CAFlist );
240 EvacuateLocalGAs(rtsTrue);
244 EvacuateRoots( sm->roots, sm->rootno );
249 EvacuateAStack( MAIN_SpA, stackInfo.botA );
250 EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
255 EvacAndScavengeCAFs( sm->CAFlist, &extra_caf_words, &caf_roots );
258 RebuildGAtables(rtsTrue);
260 reportDeadMallocPtrs( sm->MallocPtrList, NULL, &(sm->MallocPtrList) );
263 /* TIDY UP AND RETURN */
265 sm->hp = hp_start = ToHp; /* Last allocated word */
267 resident = sm->hp - (appelInfo.space[appelInfo.semi_space].base - 1);
268 DO_MAX_RESIDENCY(resident); /* stats only */
270 if (! RTSflags.GcFlags.allocAreaSizeGiven) {
271 sm->hplim = appelInfo.space[appelInfo.semi_space].lim;
272 free_space = sm->hplim - sm->hp;
274 sm->hplim = sm->hp + RTSflags.GcFlags.allocAreaSize;
275 if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) {
278 free_space = RTSflags.GcFlags.allocAreaSize;
282 if (RTSflags.GcFlags.forceGC
283 && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
284 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
287 if (RTSflags.GcFlags.giveStats) {
288 char comment_str[BIG_STRING_LEN];
290 sprintf(comment_str, "%4lu %4ld %3ld %3ld %6lu %6lu %6lu 2s",
291 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
292 bstk_roots, sm->rootno,
293 caf_roots, extra_caf_words*sizeof(W_),
294 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1)*sizeof(W_),
295 (W_) (SUBTRACT_B_STK(MAIN_SpB, stackInfo.botB) + 1)*sizeof(W_));
297 /* ToDo: come up with some interesting statistics for the parallel world */
298 sprintf(comment_str, "%4u %4ld %3ld %3ld %6lu %6lu %6lu 2s",
299 0, 0L, sm->rootno, caf_roots, extra_caf_words*sizeof(W_), 0L, 0L);
303 #if defined(PROFILING)
304 if (interval_expired) { strcat(comment_str, " prof"); }
307 stat_endGC(alloc, RTSflags.GcFlags.heapSize, resident, comment_str);
309 stat_endGC(alloc, RTSflags.GcFlags.heapSize, resident, "");
312 #if defined(PROFILING) || defined(PAR)
313 if (interval_expired) {
314 # if defined(PROFILING)
317 report_cc_profiling(0 /*partial*/);
319 #endif /* PROFILING */
321 if (RTSflags.GcFlags.trace)
322 fprintf(stderr, "Done: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
323 appelInfo.semi_space,
324 (W_) appelInfo.space[appelInfo.semi_space].base,
325 (W_) appelInfo.space[appelInfo.semi_space].lim,
326 (W_) sm->hp, (W_) sm->hplim, (W_) (free_space * sizeof(W_)));
329 /* To help flush out bugs, we trash the part of the heap from
330 which we're about to start allocating, and all of the space
331 we just came from. */
333 I_ old_space = NEXT_SEMI_SPACE(appelInfo.semi_space);
335 TrashMem(appelInfo.space[old_space].base, appelInfo.space[old_space].lim);
336 TrashMem(sm->hp+1, sm->hplim);
340 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
342 if (free_space < RTSflags.GcFlags.minAllocAreaSize || free_space < reqsize)
343 return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */
345 if (reqsize + sm->hardHpOverflowSize > free_space) {
346 return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */
348 return( GC_SUCCESS ); /* Heap OK */
355 collectHeap(reqsize, sm, do_full_collection)
358 rtsBool do_full_collection; /* do a major collection regardless? */
360 I_ bstk_roots, caf_roots, mutable, old_words;
361 P_ old_start, mutptr, prevmut;
364 I_ alloc, /* Number of words allocated since last GC */
365 resident; /* Number of words remaining after GC */
367 fflush(stdout); /* Flush stdout at start of GC */
369 if (RTSflags.GcFlags.force2s) {
370 return collect2s(reqsize, sm);
373 SAVE_REGS(&ScavRegDump); /* Save registers */
375 if (RTSflags.GcFlags.trace)
376 fprintf(stderr, "Start: newbase 0x%lx, newlim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n",
377 (W_) appelInfo.newbase, (W_) appelInfo.newlim, (W_) sm->hp, (W_) sm->hplim, reqsize * sizeof(W_));
379 alloc = sm->hp - hp_start;
382 allocd_since_last_major_GC += sm->hplim - hp_start;
383 /* this is indeed supposed to be less precise than alloc above */
385 /* COPYING COLLECTION */
387 /* Set ToHp to end of old gen */
388 ToHp = appelInfo.oldlim;
390 /* Set OldGen register so we only evacuate new gen closures */
391 OldGen = appelInfo.oldlim;
393 /* FIRST: Evacuate and Scavenge CAFs and roots in the old generation */
396 SetCAFInfoTables( sm->CAFlist );
398 DEBUG_STRING("Evacuate CAFs:");
400 CAFptr = sm->CAFlist;
401 prevCAF = ((P_)(&sm->CAFlist)) - FIXED_HS; /* see IND_CLOSURE_LINK */
403 EVACUATE_CLOSURE(CAFptr); /* evac & upd OR return */
406 CAFptr = (P_) IND_CLOSURE_LINK(CAFptr);
408 IND_CLOSURE_LINK(prevCAF) = (W_) appelInfo.OldCAFlist;
409 appelInfo.OldCAFlist = sm->CAFlist;
410 appelInfo.OldCAFno += caf_roots;
413 DEBUG_STRING("Evacuate Mutable Roots:");
415 mutptr = sm->OldMutables;
416 /* Clever, but completely illegal: */
417 prevmut = ((P_)&sm->OldMutables) - FIXED_HS;
421 /* Scavenge the OldMutable */
422 P_ orig_mutptr = mutptr;
423 P_ info = (P_) INFO_PTR(mutptr);
424 StgScavPtr scav_code = SCAV_CODE(info);
428 /* Remove from OldMutables if no longer mutable */
429 if (!IS_MUTABLE(info)) {
431 MUT_LINK(prevmut) = MUT_LINK(mutptr);
432 mutptr = (P_) MUT_LINK(mutptr);
433 MUT_LINK(tmp) = MUT_NOT_LINKED;
436 mutptr = (P_) MUT_LINK(mutptr);
443 EvacuateLocalGAs(rtsFalse);
448 DEBUG_STRING("Scavenge evacuated old generation roots:");
450 Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */
454 old_words = ToHp - old_start;
456 /* PROMOTE closures rooted in the old generation and reset list of old gen roots */
458 appelInfo.oldlim = ToHp;
460 /* SECOND: Evacuate and scavenge remaining roots
461 These may already have been evacuated -- just get new address
464 EvacuateRoots( sm->roots, sm->rootno );
470 EvacuateAStack( MAIN_SpA, stackInfo.botA );
471 EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
472 /* ToDo: Optimisation which squeezes out garbage update frames */
475 Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */
479 appelInfo.oldlim = ToHp;
481 /* record newly promoted mutuple roots */
482 MUT_LINK(prevmut) = (W_) appelInfo.PromMutables;
483 appelInfo.PromMutables = 0;
485 /* set new generation base, if not fixed */
486 if (! appelInfo.newfixed) {
487 appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2);
491 RebuildGAtables(rtsFalse);
493 reportDeadMallocPtrs(sm->MallocPtrList,
494 sm->OldMallocPtrList,
495 &(sm->OldMallocPtrList));
496 sm->MallocPtrList = NULL; /* all (new) MallocPtrs have been promoted */
499 resident = appelInfo.oldlim - sm->OldLim;
500 /* DONT_DO_MAX_RESIDENCY -- it is just a minor collection */
502 if (RTSflags.GcFlags.giveStats) {
503 char minor_str[BIG_STRING_LEN];
505 sprintf(minor_str, "%4lu %4ld %3ld %3ld %4ld Minor",
506 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
507 bstk_roots, sm->rootno, caf_roots, mutable); /* oldnew_roots, old_words */
509 /* ToDo: come up with some interesting statistics for the parallel world */
510 sprintf(minor_str, "%4u %4ld %3ld %3ld %4ld Minor",
511 0, 0L, sm->rootno, caf_roots, mutable);
513 stat_endGC(alloc, alloc, resident, minor_str);
515 stat_endGC(alloc, alloc, resident, "");
518 /* Note: if do_full_collection we want to force a full collection. [ADR] */
520 if (RTSflags.GcFlags.forceGC
521 && allocd_since_last_major_GC >= RTSflags.GcFlags.forcingInterval) {
522 do_full_collection = 1;
525 if ((appelInfo.oldlim < appelInfo.oldthresh) &&
526 (reqsize + sm->hardHpOverflowSize <= appelInfo.newlim - appelInfo.newbase) &&
527 (! do_full_collection) ) {
529 sm->hp = hp_start = appelInfo.newbase - 1;
530 sm->hplim = appelInfo.newlim;
532 if (RTSflags.GcFlags.forceGC
533 && (allocd_since_last_major_GC + (sm->hplim - hp_start) > RTSflags.GcFlags.forcingInterval)) {
534 sm->hplim = sm->hp + (RTSflags.GcFlags.forcingInterval - allocd_since_last_major_GC);
537 sm->OldLim = appelInfo.oldlim;
539 if (RTSflags.GcFlags.trace) {
540 fprintf(stderr, "Minor: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
541 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
542 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
543 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
544 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
548 /* To help flush out bugs, we trash the part of the heap from
549 which we're about to start allocating. */
550 TrashMem(sm->hp+1, sm->hplim);
553 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
555 return GC_SUCCESS; /* Heap OK -- Enough space to continue */
558 DEBUG_STRING("Major Collection Required");
560 allocd_since_last_major_GC = 0;
564 alloc = (appelInfo.oldlim - appelInfo.oldbase) + 1;
566 appelInfo.bit_words = (alloc + BITS_IN(BitWord) - 1) / BITS_IN(BitWord);
567 appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words;
568 /* For some reason, this doesn't seem to use the last
569 allocatable word at appelInfo.newlim */
571 if (appelInfo.bits <= appelInfo.oldlim) {
572 fprintf(stderr, "APPEL Major: Not enough space for bit vector\n");
573 return GC_HARD_LIMIT_EXCEEDED;
576 /* Zero bit vector for marking phase of major collection */
577 { BitWord *ptr = appelInfo.bits,
578 *end = appelInfo.bits + appelInfo.bit_words;
579 while (ptr < end) { *(ptr++) = 0; };
586 /* bracket use of MARK_REG_MAP with RESTORE/SAVE of SCAV_REG_MAP */
587 RESTORE_REGS(&ScavRegDump);
590 appelInfo.OldCAFlist,
596 SAVE_REGS(&ScavRegDump);
600 sweepUpDeadMallocPtrs(sm->OldMallocPtrList,
606 /* Reset OldMutables -- this will be reconstructed during scan */
609 LinkCAFs(appelInfo.OldCAFlist);
611 LinkRoots( sm->roots, sm->rootno );
616 LinkLiveGAs(appelInfo.oldbase, appelInfo.bits);
618 DEBUG_STRING("Linking Stable Pointer Table:");
619 LINK_LOCATION_TO_CLOSURE(&sm->StablePointerTable);
620 LinkAStack( MAIN_SpA, stackInfo.botA );
621 LinkBStack( MAIN_SuB, stackInfo.botB );
624 /* Do Inplace Compaction */
625 /* Returns start of next closure, -1 gives last allocated word */
627 appelInfo.oldlim = Inplace_Compaction(appelInfo.oldbase,
633 ,&(sm->OldMallocPtrList)
637 appelInfo.oldlast = appelInfo.oldlim;
638 resident = (appelInfo.oldlim - appelInfo.oldbase) + 1;
639 DO_MAX_RESIDENCY(resident); /* stats only */
641 /* set new generation base, if not fixed */
642 if (! appelInfo.newfixed) {
643 appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2);
646 /* set major threshold, if not fixed */
647 /* next major collection when old gen occupies 2/3rds of the free space or exceeds oldmax */
648 if (! RTSflags.GcFlags.specifiedOldGenSize) {
649 appelInfo.oldthresh = appelInfo.oldlim + (appelInfo.newlim - appelInfo.oldlim) * 2 / 3;
650 if (appelInfo.oldthresh > appelInfo.oldmax)
651 appelInfo.oldthresh = appelInfo.oldmax;
654 sm->hp = hp_start = appelInfo.newbase - 1;
655 sm->hplim = appelInfo.newlim;
657 if (RTSflags.GcFlags.forceGC
658 && sm->hplim > sm->hp + RTSflags.GcFlags.forcingInterval) {
659 sm->hplim = sm->hp + RTSflags.GcFlags.forcingInterval;
662 sm->OldLim = appelInfo.oldlim;
668 if (RTSflags.GcFlags.giveStats) {
669 char major_str[BIG_STRING_LEN];
671 sprintf(major_str, "%4lu %4ld %3ld %3ld %4d %4d *Major* %4.1f%%",
672 (W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
673 bstk_roots, sm->rootno, appelInfo.OldCAFno,
674 0, 0, resident / (StgDouble) RTSflags.GcFlags.heapSize * 100);
676 /* ToDo: come up with some interesting statistics for the parallel world */
677 sprintf(major_str, "%4u %4ld %3ld %3ld %4d %4d *Major* %4.1f%%",
678 0, 0L, sm->rootno, appelInfo.OldCAFno, 0, 0,
679 resident / (StgDouble) RTSflags.GcFlags.heapSize * 100);
682 stat_endGC(0, alloc, resident, major_str);
684 stat_endGC(0, alloc, resident, "");
687 if (RTSflags.GcFlags.trace) {
688 fprintf(stderr, "Major: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
689 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
690 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
691 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
692 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
696 /* To help flush out bugs, we trash the part of the heap from
697 which we're about to start allocating. */
698 TrashMem(sm->hp+1, sm->hplim);
701 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
703 if ((appelInfo.oldlim > appelInfo.oldmax)
704 || (reqsize > sm->hplim - sm->hp) ) {
705 return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */
706 } else if (reqsize + sm->hardHpOverflowSize > sm->hplim - sm->hp) {
707 return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */
709 return( GC_SUCCESS ); /* Heap OK */