1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.9 1999/02/02 14:21:33 simonm Exp $
4 * Storage manager front end
6 * ---------------------------------------------------------------------------*/
13 #include "BlockAlloc.h"
20 #include "StoragePriv.h"
22 bdescr *current_nursery; /* next available nursery block, or NULL */
23 nat nursery_blocks; /* number of blocks in the nursery */
25 StgClosure *caf_list = NULL;
27 bdescr *small_alloc_list; /* allocate()d small objects */
28 bdescr *large_alloc_list; /* allocate()d large objects */
29 nat alloc_blocks; /* number of allocate()d blocks since GC */
30 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
32 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
33 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
35 generation *generations; /* all the generations */
36 generation *g0; /* generation 0, for convenience */
37 generation *oldest_gen; /* oldest generation, for convenience */
38 step *g0s0; /* generation 0, step 0, for convenience */
43 static void *stgAllocForGMP (size_t size_in_bytes);
44 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
45 static void stgDeallocForGMP (void *ptr, size_t size);
54 if (RtsFlags.GcFlags.heapSizeSuggestion >
55 RtsFlags.GcFlags.maxHeapSize) {
56 barf("Suggested heap size (-H<size>) is larger than max. heap size (-M<size>)\n");
61 /* allocate generation info array */
62 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
63 * sizeof(struct _generation),
66 /* Initialise all generations */
67 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
68 gen = &generations[g];
70 gen->mut_list = END_MUT_LIST;
71 gen->mut_once_list = END_MUT_LIST;
73 gen->failed_promotions = 0;
74 gen->max_blocks = RtsFlags.GcFlags.minOldGenSize;
77 /* A couple of convenience pointers */
79 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
81 /* Allocate step structures in each generation */
82 if (RtsFlags.GcFlags.generations > 1) {
83 /* Only for multiple-generations */
85 /* Oldest generation: one step */
86 oldest_gen->n_steps = 1;
88 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
90 /* set up all except the oldest generation with 2 steps */
91 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
92 generations[g].n_steps = RtsFlags.GcFlags.steps;
93 generations[g].steps =
94 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
95 "initStorage: steps");
99 /* single generation, i.e. a two-space collector */
101 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
104 /* Initialise all steps */
105 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
106 for (s = 0; s < generations[g].n_steps; s++) {
107 step = &generations[g].steps[s];
111 step->gen = &generations[g];
115 step->large_objects = NULL;
116 step->new_large_objects = NULL;
117 step->scavenged_large_objects = NULL;
121 /* Set up the destination pointers in each younger gen. step */
122 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
123 for (s = 0; s < generations[g].n_steps-1; s++) {
124 generations[g].steps[s].to = &generations[g].steps[s+1];
126 generations[g].steps[s].to = &generations[g+1].steps[0];
129 /* The oldest generation has one step and its destination is the
131 oldest_gen->steps[0].to = &oldest_gen->steps[0];
133 /* generation 0 is special: that's the nursery */
134 generations[0].max_blocks = 0;
136 /* G0S0: the allocation area */
137 step = &generations[0].steps[0];
139 step->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
140 step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
141 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
142 current_nursery = step->blocks;
143 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
145 weak_ptr_list = NULL;
148 /* initialise the allocate() interface */
149 small_alloc_list = NULL;
150 large_alloc_list = NULL;
152 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
155 /* Tell GNU multi-precision pkg about our custom alloc functions */
156 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
159 IF_DEBUG(gc, stat_describe_gens());
163 allocNursery (bdescr *last_bd, nat blocks)
168 /* Allocate a nursery */
169 for (i=0; i < blocks; i++) {
175 bd->free = bd->start;
182 resizeNursery ( nat blocks )
186 if (nursery_blocks == blocks) {
187 ASSERT(g0s0->n_blocks == blocks);
191 else if (nursery_blocks < blocks) {
192 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
194 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
200 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
202 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
210 g0s0->n_blocks = nursery_blocks = blocks;
219 /* Return code ignored for now */
220 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
221 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
222 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
223 allocated -= BLOCK_SIZE_W;
225 stat_exit(allocated);
229 newCAF(StgClosure* caf)
231 /* Put this CAF on the mutable list for the old generation.
232 * This is a HACK - the IND_STATIC closure doesn't really have
233 * a mut_link field, but we pretend it has - in fact we re-use
234 * the STATIC_LINK field for the time being, because when we
235 * come to do a major GC we won't need the mut_link field
236 * any more and can use it as a STATIC_LINK.
238 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
239 oldest_gen->mut_once_list = (StgMutClosure *)caf;
243 const StgInfoTable *info;
245 info = get_itbl(caf);
246 ASSERT(info->type == IND_STATIC);
247 STATIC_LINK2(info,caf) = caf_list;
253 /* -----------------------------------------------------------------------------
254 The allocate() interface
256 allocate(n) always succeeds, and returns a chunk of memory n words
257 long. n can be larger than the size of a block if necessary, in
258 which case a contiguous block group will be allocated.
259 -------------------------------------------------------------------------- */
270 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
271 /* ToDo: allocate directly into generation 1 */
272 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
273 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
274 bd = allocGroup(req_blocks);
275 dbl_link_onto(bd, &g0s0->large_objects);
279 bd->free = bd->start;
280 /* don't add these blocks to alloc_blocks, since we're assuming
281 * that large objects are likely to remain live for quite a while
282 * (eg. running threads), so garbage collecting early won't make
287 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
288 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
289 if (small_alloc_list) {
290 small_alloc_list->free = alloc_Hp;
293 bd->link = small_alloc_list;
294 small_alloc_list = bd;
298 alloc_Hp = bd->start;
299 alloc_HpLim = bd->start + BLOCK_SIZE_W;
308 lnat allocated_bytes(void)
310 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
313 /* -----------------------------------------------------------------------------
314 Allocation functions for GMP.
316 These all use the allocate() interface - we can't have any garbage
317 collection going on during a gmp operation, so we use allocate()
318 which always succeeds. The gmp operations which might need to
319 allocate will ask the storage manager (via doYouWantToGC()) whether
320 a garbage collection is required, in case we get into a loop doing
321 only allocate() style allocation.
322 -------------------------------------------------------------------------- */
325 stgAllocForGMP (size_t size_in_bytes)
328 nat data_size_in_words, total_size_in_words;
330 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
331 ASSERT(size_in_bytes % sizeof(W_) == 0);
333 data_size_in_words = size_in_bytes / sizeof(W_);
334 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
336 /* allocate and fill it in. */
337 arr = (StgArrWords *)allocate(total_size_in_words);
338 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
340 /* and return a ptr to the goods inside the array */
341 return(BYTE_ARR_CTS(arr));
345 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
347 void *new_stuff_ptr = stgAllocForGMP(new_size);
349 char *p = (char *) ptr;
350 char *q = (char *) new_stuff_ptr;
352 for (; i < old_size; i++, p++, q++) {
356 return(new_stuff_ptr);
360 stgDeallocForGMP (void *ptr STG_UNUSED,
361 size_t size STG_UNUSED)
363 /* easy for us: the garbage collector does the dealloc'n */
366 /* -----------------------------------------------------------------------------
368 -------------------------------------------------------------------------- */
370 /* Approximate the amount of live data in the heap. To be called just
371 * after garbage collection (see GarbageCollect()).
380 if (RtsFlags.GcFlags.generations == 1) {
381 live = g0s0->to_blocks * BLOCK_SIZE_W +
382 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
385 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
386 for (s = 0; s < generations[g].n_steps; s++) {
387 /* approximate amount of live data (doesn't take into account slop
388 * at end of each block).
390 if (g == 0 && s == 0) {
393 step = &generations[g].steps[s];
394 live += step->n_blocks * BLOCK_SIZE_W +
395 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
401 /* Approximate the number of blocks that will be needed at the next
402 * garbage collection.
404 * Assume: all data currently live will remain live. Steps that will
405 * be collected next time will therefore need twice as many blocks
406 * since all the data will be copied.
415 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
416 for (s = 0; s < generations[g].n_steps; s++) {
417 if (g == 0 && s == 0) { continue; }
418 step = &generations[g].steps[s];
419 if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
420 needed += 2 * step->n_blocks;
422 needed += step->n_blocks;
429 /* -----------------------------------------------------------------------------
432 memInventory() checks for memory leaks by counting up all the
433 blocks we know about and comparing that to the number of blocks
434 allegedly floating around in the system.
435 -------------------------------------------------------------------------- */
445 lnat total_blocks = 0, free_blocks = 0;
447 /* count the blocks we current have */
449 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
450 for (s = 0; s < generations[g].n_steps; s++) {
451 step = &generations[g].steps[s];
452 total_blocks += step->n_blocks;
453 if (RtsFlags.GcFlags.generations == 1) {
454 /* two-space collector has a to-space too :-) */
455 total_blocks += g0s0->to_blocks;
457 for (bd = step->large_objects; bd; bd = bd->link) {
458 total_blocks += bd->blocks;
459 /* hack for megablock groups: they have an extra block or two in
460 the second and subsequent megablocks where the block
461 descriptors would normally go.
463 if (bd->blocks > BLOCKS_PER_MBLOCK) {
464 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
465 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
471 /* any blocks held by allocate() */
472 for (bd = small_alloc_list; bd; bd = bd->link) {
473 total_blocks += bd->blocks;
475 for (bd = large_alloc_list; bd; bd = bd->link) {
476 total_blocks += bd->blocks;
479 /* count the blocks on the free list */
480 free_blocks = countFreeList();
482 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
485 if (total_blocks + free_blocks != mblocks_allocated *
487 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
488 total_blocks, free_blocks, total_blocks + free_blocks,
489 mblocks_allocated * BLOCKS_PER_MBLOCK);
494 /* Full heap sanity check. */
501 if (RtsFlags.GcFlags.generations == 1) {
502 checkHeap(g0s0->to_space, NULL);
503 checkChain(g0s0->large_objects);
506 for (g = 0; g <= N; g++) {
507 for (s = 0; s < generations[g].n_steps; s++) {
508 if (g == 0 && s == 0) { continue; }
509 checkHeap(generations[g].steps[s].blocks, NULL);
512 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
513 for (s = 0; s < generations[g].n_steps; s++) {
514 checkHeap(generations[g].steps[s].blocks,
515 generations[g].steps[s].blocks->start);
516 checkChain(generations[g].steps[s].large_objects);
519 checkFreeListSanity();