1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.12 1999/02/05 16:03:01 simonm Exp $
4 * (c) The GHC Team, 1998-1999
6 * Storage manager front end
8 * ---------------------------------------------------------------------------*/
15 #include "BlockAlloc.h"
22 #include "StoragePriv.h"
24 bdescr *current_nursery; /* next available nursery block, or NULL */
25 nat nursery_blocks; /* number of blocks in the nursery */
27 StgClosure *caf_list = NULL;
29 bdescr *small_alloc_list; /* allocate()d small objects */
30 bdescr *large_alloc_list; /* allocate()d large objects */
31 nat alloc_blocks; /* number of allocate()d blocks since GC */
32 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
34 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
35 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
37 generation *generations; /* all the generations */
38 generation *g0; /* generation 0, for convenience */
39 generation *oldest_gen; /* oldest generation, for convenience */
40 step *g0s0; /* generation 0, step 0, for convenience */
45 static void *stgAllocForGMP (size_t size_in_bytes);
46 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
47 static void stgDeallocForGMP (void *ptr, size_t size);
56 if (RtsFlags.GcFlags.heapSizeSuggestion >
57 RtsFlags.GcFlags.maxHeapSize) {
58 barf("Suggested heap size (-H<size>) is larger than max. heap size (-M<size>)\n");
63 /* allocate generation info array */
64 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
65 * sizeof(struct _generation),
68 /* Initialise all generations */
69 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
70 gen = &generations[g];
72 gen->mut_list = END_MUT_LIST;
73 gen->mut_once_list = END_MUT_LIST;
75 gen->failed_promotions = 0;
79 /* A couple of convenience pointers */
81 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
83 /* Allocate step structures in each generation */
84 if (RtsFlags.GcFlags.generations > 1) {
85 /* Only for multiple-generations */
87 /* Oldest generation: one step */
88 oldest_gen->n_steps = 1;
90 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
92 /* set up all except the oldest generation with 2 steps */
93 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
94 generations[g].n_steps = RtsFlags.GcFlags.steps;
95 generations[g].steps =
96 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
97 "initStorage: steps");
101 /* single generation, i.e. a two-space collector */
103 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
106 /* Initialise all steps */
107 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
108 for (s = 0; s < generations[g].n_steps; s++) {
109 step = &generations[g].steps[s];
113 step->gen = &generations[g];
117 step->large_objects = NULL;
118 step->new_large_objects = NULL;
119 step->scavenged_large_objects = NULL;
123 /* Set up the destination pointers in each younger gen. step */
124 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
125 for (s = 0; s < generations[g].n_steps-1; s++) {
126 generations[g].steps[s].to = &generations[g].steps[s+1];
128 generations[g].steps[s].to = &generations[g+1].steps[0];
131 /* The oldest generation has one step and its destination is the
133 oldest_gen->steps[0].to = &oldest_gen->steps[0];
135 /* generation 0 is special: that's the nursery */
136 generations[0].max_blocks = 0;
138 /* G0S0: the allocation area. Policy: keep the allocation area
139 * small to begin with, even if we have a large suggested heap
140 * size. Reason: we're going to do a major collection first, and we
141 * don't want it to be a big one. This vague idea is borne out by
142 * rigorous experimental evidence.
144 step = &generations[0].steps[0];
146 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
147 step->blocks = allocNursery(NULL, nursery_blocks);
148 step->n_blocks = nursery_blocks;
149 current_nursery = step->blocks;
150 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
152 weak_ptr_list = NULL;
155 /* initialise the allocate() interface */
156 small_alloc_list = NULL;
157 large_alloc_list = NULL;
159 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
162 /* Tell GNU multi-precision pkg about our custom alloc functions */
163 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
166 IF_DEBUG(gc, stat_describe_gens());
170 allocNursery (bdescr *last_bd, nat blocks)
175 /* Allocate a nursery */
176 for (i=0; i < blocks; i++) {
182 bd->free = bd->start;
189 resizeNursery ( nat blocks )
193 if (nursery_blocks == blocks) {
194 ASSERT(g0s0->n_blocks == blocks);
198 else if (nursery_blocks < blocks) {
199 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
201 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
207 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
209 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
217 g0s0->n_blocks = nursery_blocks = blocks;
226 /* Return code ignored for now */
227 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
228 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
229 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
230 allocated -= BLOCK_SIZE_W;
232 stat_exit(allocated);
236 newCAF(StgClosure* caf)
238 /* Put this CAF on the mutable list for the old generation.
239 * This is a HACK - the IND_STATIC closure doesn't really have
240 * a mut_link field, but we pretend it has - in fact we re-use
241 * the STATIC_LINK field for the time being, because when we
242 * come to do a major GC we won't need the mut_link field
243 * any more and can use it as a STATIC_LINK.
245 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
246 oldest_gen->mut_once_list = (StgMutClosure *)caf;
250 const StgInfoTable *info;
252 info = get_itbl(caf);
253 ASSERT(info->type == IND_STATIC);
254 STATIC_LINK2(info,caf) = caf_list;
260 /* -----------------------------------------------------------------------------
261 The allocate() interface
263 allocate(n) always succeeds, and returns a chunk of memory n words
264 long. n can be larger than the size of a block if necessary, in
265 which case a contiguous block group will be allocated.
266 -------------------------------------------------------------------------- */
277 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
278 /* ToDo: allocate directly into generation 1 */
279 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
280 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
281 bd = allocGroup(req_blocks);
282 dbl_link_onto(bd, &g0s0->large_objects);
286 bd->free = bd->start;
287 /* don't add these blocks to alloc_blocks, since we're assuming
288 * that large objects are likely to remain live for quite a while
289 * (eg. running threads), so garbage collecting early won't make
294 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
295 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
296 if (small_alloc_list) {
297 small_alloc_list->free = alloc_Hp;
300 bd->link = small_alloc_list;
301 small_alloc_list = bd;
305 alloc_Hp = bd->start;
306 alloc_HpLim = bd->start + BLOCK_SIZE_W;
315 lnat allocated_bytes(void)
317 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
320 /* -----------------------------------------------------------------------------
321 Allocation functions for GMP.
323 These all use the allocate() interface - we can't have any garbage
324 collection going on during a gmp operation, so we use allocate()
325 which always succeeds. The gmp operations which might need to
326 allocate will ask the storage manager (via doYouWantToGC()) whether
327 a garbage collection is required, in case we get into a loop doing
328 only allocate() style allocation.
329 -------------------------------------------------------------------------- */
332 stgAllocForGMP (size_t size_in_bytes)
335 nat data_size_in_words, total_size_in_words;
337 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
338 ASSERT(size_in_bytes % sizeof(W_) == 0);
340 data_size_in_words = size_in_bytes / sizeof(W_);
341 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
343 /* allocate and fill it in. */
344 arr = (StgArrWords *)allocate(total_size_in_words);
345 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
347 /* and return a ptr to the goods inside the array */
348 return(BYTE_ARR_CTS(arr));
352 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
354 void *new_stuff_ptr = stgAllocForGMP(new_size);
356 char *p = (char *) ptr;
357 char *q = (char *) new_stuff_ptr;
359 for (; i < old_size; i++, p++, q++) {
363 return(new_stuff_ptr);
367 stgDeallocForGMP (void *ptr STG_UNUSED,
368 size_t size STG_UNUSED)
370 /* easy for us: the garbage collector does the dealloc'n */
373 /* -----------------------------------------------------------------------------
375 -------------------------------------------------------------------------- */
377 /* Approximate the amount of live data in the heap. To be called just
378 * after garbage collection (see GarbageCollect()).
387 if (RtsFlags.GcFlags.generations == 1) {
388 live = g0s0->to_blocks * BLOCK_SIZE_W +
389 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
392 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
393 for (s = 0; s < generations[g].n_steps; s++) {
394 /* approximate amount of live data (doesn't take into account slop
395 * at end of each block).
397 if (g == 0 && s == 0) {
400 step = &generations[g].steps[s];
401 live += step->n_blocks * BLOCK_SIZE_W +
402 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
408 /* Approximate the number of blocks that will be needed at the next
409 * garbage collection.
411 * Assume: all data currently live will remain live. Steps that will
412 * be collected next time will therefore need twice as many blocks
413 * since all the data will be copied.
422 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
423 for (s = 0; s < generations[g].n_steps; s++) {
424 if (g == 0 && s == 0) { continue; }
425 step = &generations[g].steps[s];
426 if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
427 needed += 2 * step->n_blocks;
429 needed += step->n_blocks;
436 /* -----------------------------------------------------------------------------
439 memInventory() checks for memory leaks by counting up all the
440 blocks we know about and comparing that to the number of blocks
441 allegedly floating around in the system.
442 -------------------------------------------------------------------------- */
452 lnat total_blocks = 0, free_blocks = 0;
454 /* count the blocks we current have */
456 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
457 for (s = 0; s < generations[g].n_steps; s++) {
458 step = &generations[g].steps[s];
459 total_blocks += step->n_blocks;
460 if (RtsFlags.GcFlags.generations == 1) {
461 /* two-space collector has a to-space too :-) */
462 total_blocks += g0s0->to_blocks;
464 for (bd = step->large_objects; bd; bd = bd->link) {
465 total_blocks += bd->blocks;
466 /* hack for megablock groups: they have an extra block or two in
467 the second and subsequent megablocks where the block
468 descriptors would normally go.
470 if (bd->blocks > BLOCKS_PER_MBLOCK) {
471 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
472 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
478 /* any blocks held by allocate() */
479 for (bd = small_alloc_list; bd; bd = bd->link) {
480 total_blocks += bd->blocks;
482 for (bd = large_alloc_list; bd; bd = bd->link) {
483 total_blocks += bd->blocks;
486 /* count the blocks on the free list */
487 free_blocks = countFreeList();
489 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
492 if (total_blocks + free_blocks != mblocks_allocated *
494 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
495 total_blocks, free_blocks, total_blocks + free_blocks,
496 mblocks_allocated * BLOCKS_PER_MBLOCK);
501 /* Full heap sanity check. */
508 if (RtsFlags.GcFlags.generations == 1) {
509 checkHeap(g0s0->to_space, NULL);
510 checkChain(g0s0->large_objects);
513 for (g = 0; g <= N; g++) {
514 for (s = 0; s < generations[g].n_steps; s++) {
515 if (g == 0 && s == 0) { continue; }
516 checkHeap(generations[g].steps[s].blocks, NULL);
519 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
520 for (s = 0; s < generations[g].n_steps; s++) {
521 checkHeap(generations[g].steps[s].blocks,
522 generations[g].steps[s].blocks->start);
523 checkChain(generations[g].steps[s].large_objects);
526 checkFreeListSanity();