1 ***************************************************************************
3 APPEL'S GARBAGE COLLECTION
5 Global heap requirements as for 1s and 2s collectors.
6 ++ All closures in the old generation that are updated must be
7 updated with indirections and placed on the linked list of
8 updated old generation closures.
10 ***************************************************************************
16 #include "SMinternal.h"
17 #include "SMcopying.h"
18 #include "SMcompacting.h"
23 appelData appelInfo = {0, 0, 0, 0, 0,
24 0, 0, 0, 0, 0, 0, 0, 0, 0,
28 P_ heap_space = 0; /* Address of first word of slab
29 of memory allocated for heap */
31 P_ hp_start; /* Value of Hp when reduction was resumed */
33 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data */
34 P_ thisbase; /* Start of old gen before this minor collection */
35 P_ prevbase; /* Start of old gen before previous minor collection */
36 I_ prev_prom = 0; /* Promoted previous minor collection */
37 I_ dead_prev_prom = 0; /* Dead words promoted previous minor */
38 #endif /* PROMOTION_DATA */
40 #if defined(_GC_DEBUG)
42 debug_look_for (start, stop, villain)
43 P_ start, stop, villain;
46 for (i = start; i <= stop; i++) {
47 if ( (P_) *i == villain ) {
48 fprintf(stderr, "* %x : %x\n", i, villain);
58 if (heap_space == 0) { /* allocates if it doesn't already exist */
60 /* Allocate the roots space */
61 sm->roots = (P_ *) xmalloc( SM_MAXROOTS * sizeof(W_) );
63 /* Allocate the heap */
64 heap_space = (P_) xmalloc((SM_word_heap_size + EXTRA_HEAP_WORDS) * sizeof(W_));
66 /* ToDo (ADR): trash entire heap contents */
68 if (SM_force_gc == USE_2s) {
69 stat_init("TWOSPACE(APPEL)",
70 " No of Roots Caf Caf Astk Bstk",
71 "Astk Bstk Reg No bytes bytes bytes");
74 " No of Roots Caf Mut- Old Collec Resid",
75 "Astk Bstk Reg No able Gen tion %heap");
78 sm->hardHpOverflowSize = 0;
80 if (SM_force_gc == USE_2s) {
81 I_ semi_space_words = SM_word_heap_size / 2;
82 appelInfo.space[0].base = HEAP_FRAME_BASE(heap_space, semi_space_words);
83 appelInfo.space[1].base = HEAP_FRAME_BASE(heap_space + semi_space_words, semi_space_words);
84 appelInfo.space[0].lim = HEAP_FRAME_LIMIT(heap_space, semi_space_words);
85 appelInfo.space[1].lim = HEAP_FRAME_LIMIT(heap_space + semi_space_words, semi_space_words);
86 appelInfo.semi_space = 0;
87 appelInfo.oldlim = heap_space - 1; /* Never in old generation */
89 sm->hp = hp_start = appelInfo.space[appelInfo.semi_space].base - 1;
92 sm->hplim = sm->hp + SM_alloc_size;
93 SM_alloc_min = 0; /* No min; alloc size specified */
95 if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) {
96 fprintf(stderr, "Not enough heap for requested alloc size\n");
100 sm->hplim = appelInfo.space[appelInfo.semi_space].lim;
103 #if defined(FORCE_GC)
105 if (sm->hplim > sm->hp + GCInterval) {
106 sm->hplim = sm->hp + GCInterval;
109 /* no point in forcing GC,
110 as the semi-space is smaller than GCInterval */
114 #endif /* FORCE_GC */
116 #if defined(LIFE_PROFILE)
117 sm->hplim = sm->hp + ((sm->hplim - sm->hp) / 2); /* space for HpLim incr */
119 sm->hplim = sm->hp + LifeInterval;
121 #endif /* LIFE_PROFILE */
123 sm->OldLim = appelInfo.oldlim;
127 initExtensions( sm );
131 fprintf(stderr, "APPEL(2s) Heap: 0x%lx .. 0x%lx\n",
132 (W_) heap_space, (W_) (heap_space - 1 + SM_word_heap_size));
133 fprintf(stderr, "Initial: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
134 appelInfo.semi_space,
135 (W_) appelInfo.space[appelInfo.semi_space].base,
136 (W_) appelInfo.space[appelInfo.semi_space].lim,
137 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
143 /* So not forced 2s */
145 appelInfo.newlim = heap_space + SM_word_heap_size - 1;
147 appelInfo.newfixed = SM_alloc_size;
148 appelInfo.newmin = SM_alloc_size;
149 appelInfo.newbase = heap_space + SM_word_heap_size - appelInfo.newfixed;
151 appelInfo.newfixed = 0;
152 appelInfo.newmin = SM_alloc_min;
153 appelInfo.newbase = heap_space + (SM_word_heap_size / 2);
156 appelInfo.oldbase = heap_space;
157 appelInfo.oldlim = heap_space - 1;
158 appelInfo.oldlast = heap_space - 1;
159 appelInfo.oldmax = heap_space - 1 + SM_word_heap_size - 2*appelInfo.newmin;
161 if (appelInfo.oldbase > appelInfo.oldmax) {
162 fprintf(stderr, "Not enough heap for requested/minimum allocation area\n");
166 appelInfo.bit_words = (SM_word_heap_size + BITS_IN(BitWord) - 1) / BITS_IN(BitWord);
167 appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words;
168 if (appelInfo.bit_words > appelInfo.newmin)
169 appelInfo.oldmax = heap_space - 1 + SM_word_heap_size - appelInfo.bit_words - appelInfo.newmin;
171 if (SM_major_gen_size) {
172 appelInfo.oldthresh = heap_space -1 + SM_major_gen_size;
173 if (appelInfo.oldthresh > appelInfo.oldmax) {
174 fprintf(stderr, "Not enough heap for requested major resid size\n");
178 appelInfo.oldthresh = heap_space + SM_word_heap_size * 2 / 3; /* Initial threshold -- 2/3rds */
179 if (appelInfo.oldthresh > appelInfo.oldmax)
180 appelInfo.oldthresh = appelInfo.oldmax;
183 sm->hp = hp_start = appelInfo.newbase - 1;
184 sm->hplim = appelInfo.newlim;
186 #if defined(FORCE_GC)
187 if (force_GC && (sm->hplim > sm->hp + GCInterval)) {
188 sm->hplim = sm->hp + GCInterval;
190 #endif /* FORCE_GC */
192 sm->OldLim = appelInfo.oldlim;
195 appelInfo.OldCAFlist = NULL;
196 appelInfo.OldCAFno = 0;
199 initExtensions( sm );
202 appelInfo.PromMutables = 0;
204 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data */
205 prevbase = appelInfo.oldlim + 1;
206 thisbase = appelInfo.oldlim + 1;
207 #endif /* PROMOTION_DATA */
210 fprintf(stderr, "APPEL Heap: 0x%lx .. 0x%lx\n",
211 (W_) heap_space, (W_) (heap_space - 1 + SM_word_heap_size));
212 fprintf(stderr, "Initial: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx\n",
213 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
214 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
215 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
216 (W_) sm->hp, (W_) sm->hplim);
223 collect2s(reqsize, sm)
227 #if defined(LIFE_PROFILE)
228 I_ next_interval; /* if doing profile */
230 I_ free_space, /* No of words of free space following GC */
231 alloc, /* Number of words allocated since last GC */
232 resident, /* Number of words remaining after GC */
233 extra_caf_words,/* Extra words referenced from CAFs */
234 caf_roots, /* Number of CAFs */
235 bstk_roots; /* Number of update frames in B stack */
237 SAVE_REGS(&ScavRegDump); /* Save registers */
239 #if defined(LIFE_PROFILE)
240 if (do_life_prof) { life_profile_setup(); }
241 #endif /* LIFE_PROFILE */
243 #if defined(USE_COST_CENTRES)
244 if (interval_expired) { heap_profile_setup(); }
245 #endif /* USE_COST_CENTRES */
248 fprintf(stderr, "Start: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n",
249 appelInfo.semi_space,
250 (W_) appelInfo.space[appelInfo.semi_space].base,
251 (W_) appelInfo.space[appelInfo.semi_space].lim,
252 (W_) sm->hp, (W_) sm->hplim, (W_) (reqsize * sizeof(W_)));
254 alloc = sm->hp - hp_start;
257 appelInfo.semi_space = NEXT_SEMI_SPACE(appelInfo.semi_space);
258 ToHp = appelInfo.space[appelInfo.semi_space].base - 1;
259 Scav = appelInfo.space[appelInfo.semi_space].base;
260 OldGen = sm->OldLim; /* always evac ! */
262 SetCAFInfoTables( sm->CAFlist );
264 EvacuateLocalGAs(rtsTrue);
268 EvacuateRoots( sm->roots, sm->rootno );
273 EvacuateAStack( MAIN_SpA, stackInfo.botA );
274 EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
279 EvacAndScavengeCAFs( sm->CAFlist, &extra_caf_words, &caf_roots );
282 RebuildGAtables(rtsTrue);
284 reportDeadMallocPtrs( sm->MallocPtrList, NULL, &(sm->MallocPtrList) );
287 /* TIDY UP AND RETURN */
289 sm->hp = hp_start = ToHp; /* Last allocated word */
291 resident = sm->hp - (appelInfo.space[appelInfo.semi_space].base - 1);
292 DO_MAX_RESIDENCY(resident); /* stats only */
295 sm->hplim = sm->hp + SM_alloc_size;
296 if (sm->hplim > appelInfo.space[appelInfo.semi_space].lim) {
299 free_space = SM_alloc_size;
302 sm->hplim = appelInfo.space[appelInfo.semi_space].lim;
303 free_space = sm->hplim - sm->hp;
306 #if defined(FORCE_GC)
307 if (force_GC && (sm->hplim > sm->hp + GCInterval)) {
308 sm->hplim = sm->hp + GCInterval;
310 #endif /* FORCE_GC */
312 if (SM_stats_verbose) {
313 char comment_str[BIG_STRING_LEN];
315 sprintf(comment_str, "%4u %4ld %3ld %3ld %6lu %6lu %6lu 2s",
316 (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
317 bstk_roots, sm->rootno,
318 caf_roots, extra_caf_words*sizeof(W_),
319 (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1)*sizeof(W_),
320 (SUBTRACT_B_STK(MAIN_SpB, stackInfo.botB) + 1)*sizeof(W_));
322 /* ToDo: come up with some interesting statistics for the parallel world */
323 sprintf(comment_str, "%4u %4ld %3ld %3ld %6lu %6lu %6lu 2s",
324 0, 0L, sm->rootno, caf_roots, extra_caf_words*sizeof(W_), 0L, 0L);
328 #if defined(LIFE_PROFILE)
330 strcat(comment_str, " life");
333 #if defined(USE_COST_CENTRES)
334 if (interval_expired) {
335 strcat(comment_str, " prof");
339 stat_endGC(alloc, SM_word_heap_size, resident, comment_str);
341 stat_endGC(alloc, SM_word_heap_size, resident, "");
344 #if defined(LIFE_PROFILE)
345 free_space = free_space / 2; /* space for HpLim incr */
347 next_interval = life_profile_done(alloc, reqsize);
348 free_space -= next_interval; /* ensure interval available */
350 #endif /* LIFE_PROFILE */
352 #if defined(USE_COST_CENTRES) || defined(GUM)
353 if (interval_expired) {
354 # if defined(USE_COST_CENTRES)
357 report_cc_profiling(0 /*partial*/);
359 #endif /* USE_COST_CENTRES */
362 fprintf(stderr, "Done: space %ld, base 0x%lx, lim 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
363 appelInfo.semi_space,
364 (W_) appelInfo.space[appelInfo.semi_space].base,
365 (W_) appelInfo.space[appelInfo.semi_space].lim,
366 (W_) sm->hp, (W_) sm->hplim, (W_) (free_space * sizeof(W_)));
369 /* To help flush out bugs, we trash the part of the heap from
370 which we're about to start allocating, and all of the space
371 we just came from. */
373 I_ old_space = NEXT_SEMI_SPACE(appelInfo.semi_space);
374 TrashMem(appelInfo.space[old_space].base, appelInfo.space[old_space].lim);
375 TrashMem(sm->hp+1, sm->hplim);
379 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
381 if ( (SM_alloc_min > free_space) || (reqsize > free_space) ) {
382 return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */
385 #if defined(LIFE_PROFILE)
386 /* ToDo: this may not be right now (WDP 94/11) */
388 /* space for HpLim incr */
389 sm->hplim = sm->hp + ((sm->hplim - sm->hp) / 2);
391 /* set hplim for next life profile */
392 sm->hplim = sm->hp + next_interval;
394 #endif /* LIFE_PROFILE */
396 if (reqsize + sm->hardHpOverflowSize > free_space) {
397 return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */
399 return( GC_SUCCESS ); /* Heap OK */
406 collectHeap(reqsize, sm, do_full_collection)
409 rtsBool do_full_collection; /* do a major collection regardless? */
411 I_ bstk_roots, caf_roots, mutable, old_words;
412 P_ oldptr, old_start, mutptr, prevmut;
416 I_ alloc, /* Number of words allocated since last GC */
417 resident; /* Number of words remaining after GC */
419 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data */
420 I_ promote, /* Promoted this minor collection */
421 dead_prom, /* Dead words promoted this minor */
422 dead_prev; /* Promoted words that died since previos minor collection */
425 #endif /* PROMOTION_DATA */
427 fflush(stdout); /* Flush stdout at start of GC */
429 if (SM_force_gc == USE_2s) {
430 return collect2s(reqsize, sm);
433 SAVE_REGS(&ScavRegDump); /* Save registers */
436 fprintf(stderr, "Start: newbase 0x%lx, newlim 0x%lx\n hp 0x%lx, hplim 0x%lx, req %lu\n",
437 (W_) appelInfo.newbase, (W_) appelInfo.newlim, (W_) sm->hp, (W_) sm->hplim, reqsize * sizeof(W_));
439 alloc = sm->hp - hp_start;
443 alloc_since_last_major_GC += sm->hplim - hp_start;
444 /* this is indeed supposed to be less precise than alloc above */
445 #endif /* FORCE_GC */
447 /* COPYING COLLECTION */
449 /* Set ToHp to end of old gen */
450 ToHp = appelInfo.oldlim;
452 /* Set OldGen register so we only evacuate new gen closures */
453 OldGen = appelInfo.oldlim;
455 /* FIRST: Evacuate and Scavenge CAFs and roots in the old generation */
458 SetCAFInfoTables( sm->CAFlist );
460 DEBUG_STRING("Evacuate CAFs:");
462 CAFptr = sm->CAFlist;
463 prevCAF = ((P_)(&sm->CAFlist)) - FIXED_HS; /* see IND_CLOSURE_LINK */
465 EVACUATE_CLOSURE(CAFptr); /* evac & upd OR return */
468 CAFptr = (P_) IND_CLOSURE_LINK(CAFptr);
470 IND_CLOSURE_LINK(prevCAF) = (W_) appelInfo.OldCAFlist;
471 appelInfo.OldCAFlist = sm->CAFlist;
472 appelInfo.OldCAFno += caf_roots;
475 DEBUG_STRING("Evacuate Mutable Roots:");
477 mutptr = sm->OldMutables;
478 /* Clever, but completely illegal: */
479 prevmut = ((P_)&sm->OldMutables) - FIXED_HS;
483 /* Scavenge the OldMutable */
484 P_ info = (P_) INFO_PTR(mutptr);
485 StgScavPtr scav_code = SCAV_CODE(info);
489 /* Remove from OldMutables if no longer mutable */
490 if (!IS_MUTABLE(info)) {
492 MUT_LINK(prevmut) = MUT_LINK(mutptr);
493 mutptr = (P_) MUT_LINK(mutptr);
494 MUT_LINK(tmp) = MUT_NOT_LINKED;
497 mutptr = (P_) MUT_LINK(mutptr);
503 EvacuateLocalGAs(rtsFalse);
508 DEBUG_STRING("Scavenge evacuated old generation roots:");
510 Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */
514 old_words = ToHp - old_start;
516 /* PROMOTE closures rooted in the old generation and reset list of old gen roots */
518 appelInfo.oldlim = ToHp;
520 /* SECOND: Evacuate and scavenge remaining roots
521 These may already have been evacuated -- just get new address
524 EvacuateRoots( sm->roots, sm->rootno );
530 EvacuateAStack( MAIN_SpA, stackInfo.botA );
531 EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
532 /* ToDo: Optimisation which squeezes out garbage update frames */
535 Scav = appelInfo.oldlim + 1; /* Point to (info field of) first closure */
539 appelInfo.oldlim = ToHp;
541 /* record newly promoted mutuple roots */
542 MUT_LINK(prevmut) = (W_) appelInfo.PromMutables;
543 appelInfo.PromMutables = 0;
545 /* set new generation base, if not fixed */
546 if (! appelInfo.newfixed) {
547 appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2);
551 RebuildGAtables(rtsFalse);
553 reportDeadMallocPtrs(sm->MallocPtrList,
554 sm->OldMallocPtrList,
555 &(sm->OldMallocPtrList));
556 sm->MallocPtrList = NULL; /* all (new) MallocPtrs have been promoted */
559 resident = appelInfo.oldlim - sm->OldLim;
560 /* DONT_DO_MAX_RESIDENCY -- it is just a minor collection */
562 if (SM_stats_verbose) {
563 char minor_str[BIG_STRING_LEN];
565 sprintf(minor_str, "%4u %4ld %3ld %3ld %4ld Minor",
566 (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
567 bstk_roots, sm->rootno, caf_roots, mutable); /* oldnew_roots, old_words */
569 /* ToDo: come up with some interesting statistics for the parallel world */
570 sprintf(minor_str, "%4u %4ld %3ld %3ld %4ld Minor",
571 0, 0L, sm->rootno, caf_roots, mutable);
573 stat_endGC(alloc, alloc, resident, minor_str);
575 stat_endGC(alloc, alloc, resident, "");
578 /* Note: if do_full_collection we want to force a full collection. [ADR] */
581 if (force_GC && (alloc_since_last_major_GC >= GCInterval)) {
582 do_full_collection = 1;
584 #endif /* FORCE_GC */
586 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data major required */
588 if (! SM_stats_verbose &&
589 (appelInfo.oldlim < appelInfo.oldthresh) &&
590 (reqsize + sm->hardHpOverflowSize <= appelInfo.newlim - appelInfo.newbase) &&
591 (! do_full_collection) ) {
593 #else /* ! PROMOTION_DATA */
595 if ((appelInfo.oldlim < appelInfo.oldthresh) &&
596 (reqsize + sm->hardHpOverflowSize <= appelInfo.newlim - appelInfo.newbase) &&
597 (! do_full_collection) ) {
599 #endif /* ! PROMOTION_DATA */
601 sm->hp = hp_start = appelInfo.newbase - 1;
602 sm->hplim = appelInfo.newlim;
604 #if defined(FORCE_GC)
606 (alloc_since_last_major_GC + (sm->hplim - hp_start) > GCInterval))
608 sm->hplim = sm->hp + (GCInterval - alloc_since_last_major_GC);
610 #endif /* FORCE_GC */
612 sm->OldLim = appelInfo.oldlim;
615 fprintf(stderr, "Minor: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
616 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
617 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
618 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
619 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
623 /* To help flush out bugs, we trash the part of the heap from
624 which we're about to start allocating. */
625 TrashMem(sm->hp+1, sm->hplim);
628 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
630 return GC_SUCCESS; /* Heap OK -- Enough space to continue */
633 DEBUG_STRING("Major Collection Required");
636 alloc_since_last_major_GC = 0;
637 #endif /* FORCE_GC */
641 alloc = (appelInfo.oldlim - appelInfo.oldbase) + 1;
643 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data */
644 if (SM_stats_verbose) {
645 promote = appelInfo.oldlim - thisbase + 1;
647 #endif /* PROMOTION_DATA */
649 appelInfo.bit_words = (alloc + BITS_IN(BitWord) - 1) / BITS_IN(BitWord);
650 appelInfo.bits = (BitWord *)(appelInfo.newlim) - appelInfo.bit_words;
651 /* For some reason, this doesn't seem to use the last
652 allocatable word at appelInfo.newlim */
654 if (appelInfo.bits <= appelInfo.oldlim) {
655 fprintf(stderr, "APPEL Major: Not enough space for bit vector\n");
656 return GC_HARD_LIMIT_EXCEEDED;
659 /* Zero bit vector for marking phase of major collection */
660 { BitWord *ptr = appelInfo.bits,
661 *end = appelInfo.bits + appelInfo.bit_words;
662 while (ptr < end) { *(ptr++) = 0; };
669 /* bracket use of MARK_REG_MAP with RESTORE/SAVE of SCAV_REG_MAP */
670 RESTORE_REGS(&ScavRegDump);
673 appelInfo.OldCAFlist,
679 SAVE_REGS(&ScavRegDump);
683 sweepUpDeadMallocPtrs(sm->OldMallocPtrList,
689 /* Reset OldMutables -- this will be reconstructed during scan */
692 LinkCAFs(appelInfo.OldCAFlist);
694 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data */
695 /* What does this have to do with CAFs? -- JSM */
696 if (SM_stats_verbose) {
701 fprintf(stderr, "Promote Bases: lim 0x%lx this 0x%lx prev 0x%lx Actual: ",
702 appelInfo.oldlim + 1, thisbase, prevbase);
705 /* search for first live closure for thisbase & prevbase */
706 for (root = 0; root < 2; root++) {
707 P_ baseptr, search, scan_w_start;
708 I_ prev_words, bit_words, bit_rem;
709 BitWord *bit_array_ptr, *bit_array_end;
711 baseptr = base[root];
712 prev_words = (baseptr - appelInfo.oldbase);
713 bit_words = prev_words / BITS_IN(BitWord);
714 bit_rem = prev_words & (BITS_IN(BitWord) - 1);
716 bit_array_ptr = appelInfo.bits + bit_words;
717 bit_array_end = appelInfo.bits + appelInfo.bit_words;
718 scan_w_start = baseptr - bit_rem;
721 while (bit_array_ptr < bit_array_end && !baseptr) {
722 BitWord w = *(bit_array_ptr++);
723 search = scan_w_start;
729 while (w && !baseptr) {
730 if (w & 0x1) { /* bit set -- found first closure */
733 search++; /* look at next bit */
737 scan_w_start += BITS_IN(BitWord);
740 fprintf(stderr, "0x%lx%s", baseptr, root == 2 ? "\n" : " ");
743 base[root] = baseptr;
745 LINK_LOCATION_TO_CLOSURE(base + root);
749 #endif /* PROMOTION_DATA */
751 LinkRoots( sm->roots, sm->rootno );
756 LinkLiveGAs(appelInfo.oldbase, appelInfo.bits);
758 DEBUG_STRING("Linking Stable Pointer Table:");
759 LINK_LOCATION_TO_CLOSURE(&sm->StablePointerTable);
760 LinkAStack( MAIN_SpA, stackInfo.botA );
761 LinkBStack( MAIN_SuB, stackInfo.botB );
764 /* Do Inplace Compaction */
765 /* Returns start of next closure, -1 gives last allocated word */
767 appelInfo.oldlim = Inplace_Compaction(appelInfo.oldbase,
773 ,&(sm->OldMallocPtrList)
777 appelInfo.oldlast = appelInfo.oldlim;
778 resident = (appelInfo.oldlim - appelInfo.oldbase) + 1;
779 DO_MAX_RESIDENCY(resident); /* stats only */
781 /* set new generation base, if not fixed */
782 if (! appelInfo.newfixed) {
783 appelInfo.newbase = appelInfo.oldlim + 1 + (((appelInfo.newlim - appelInfo.oldlim) + 1) / 2);
786 /* set major threshold, if not fixed */
787 /* next major collection when old gen occupies 2/3rds of the free space or exceeds oldmax */
788 if (! SM_major_gen_size) {
789 appelInfo.oldthresh = appelInfo.oldlim + (appelInfo.newlim - appelInfo.oldlim) * 2 / 3;
790 if (appelInfo.oldthresh > appelInfo.oldmax)
791 appelInfo.oldthresh = appelInfo.oldmax;
794 sm->hp = hp_start = appelInfo.newbase - 1;
795 sm->hplim = appelInfo.newlim;
797 #if defined(FORCE_GC)
798 if (force_GC && (sm->hplim > sm->hp + GCInterval)) {
799 sm->hplim = sm->hp + GCInterval;
801 #endif /* FORCE_GC */
803 sm->OldLim = appelInfo.oldlim;
805 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data */
806 if (SM_stats_verbose) {
807 /* restore moved thisbase & prevbase */
808 thisbase = base[0] ? base[0] : appelInfo.oldlim + 1;
809 prevbase = base[1] ? base[1] : appelInfo.oldlim + 1;
811 /* here are the numbers we want */
812 dead_prom = promote - (appelInfo.oldlim + 1 - thisbase);
813 dead_prev = prev_prom - (thisbase - prevbase) - dead_prev_prom;
816 fprintf(stderr, "Collect Bases: lim 0x%lx this 0x%lx prev 0x%lx\n",
817 appelInfo.oldlim + 1, thisbase, prevbase);
818 fprintf(stderr, "Promoted: %ld Dead: this %ld prev %ld + %ld\n",
819 promote, dead_prom, dead_prev_prom, dead_prev);
822 /* save values for next collection */
824 dead_prev_prom = dead_prom;
826 thisbase = appelInfo.oldlim + 1;
828 #endif /* PROMOTION_DATA */
834 if (SM_stats_verbose) {
835 char major_str[BIG_STRING_LEN];
837 sprintf(major_str, "%4u %4ld %3ld %3ld %4d %4d *Major* %4.1f%%",
838 (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
839 bstk_roots, sm->rootno, appelInfo.OldCAFno,
840 0, 0, resident / (StgFloat) SM_word_heap_size * 100);
842 /* ToDo: come up with some interesting statistics for the parallel world */
843 sprintf(major_str, "%4u %4ld %3ld %3ld %4d %4d *Major* %4.1f%%",
844 0, 0L, sm->rootno, appelInfo.OldCAFno, 0, 0,
845 resident / (StgFloat) SM_word_heap_size * 100);
848 #if defined(PROMOTION_DATA) /* For dead promote & premature promote data */
849 { char *promote_str[BIG_STRING_LEN];
850 sprintf(promote_str, " %6ld %6ld", dead_prom*sizeof(W_), dead_prev*sizeof(W_));
851 strcat(major_str, promote_str);
853 #endif /* PROMOTION_DATA */
855 stat_endGC(0, alloc, resident, major_str);
857 stat_endGC(0, alloc, resident, "");
861 fprintf(stderr, "Major: newbase 0x%lx newlim 0x%lx; base 0x%lx lim 0x%lx thresh 0x%lx max 0x%lx\n hp 0x%lx, hplim 0x%lx, free %lu\n",
862 (W_) appelInfo.newbase, (W_) appelInfo.newlim,
863 (W_) appelInfo.oldbase, (W_) appelInfo.oldlim,
864 (W_) appelInfo.oldthresh, (W_) appelInfo.oldmax,
865 (W_) sm->hp, (W_) sm->hplim, (W_) (sm->hplim - sm->hp) * sizeof(W_));
869 /* To help flush out bugs, we trash the part of the heap from
870 which we're about to start allocating. */
871 TrashMem(sm->hp+1, sm->hplim);
874 RESTORE_REGS(&ScavRegDump); /* Restore Registers */
876 if ((appelInfo.oldlim > appelInfo.oldmax)
877 || (reqsize > sm->hplim - sm->hp) ) {
878 return( GC_HARD_LIMIT_EXCEEDED ); /* Heap absolutely exhausted */
879 } else if (reqsize + sm->hardHpOverflowSize > sm->hplim - sm->hp) {
880 return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */
882 return( GC_SUCCESS ); /* Heap OK */