1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.8 1999/01/28 15:04:02 simonm Exp $
4 * Storage manager front end
6 * ---------------------------------------------------------------------------*/
13 #include "BlockAlloc.h"
20 #include "StoragePriv.h"
22 bdescr *current_nursery; /* next available nursery block, or NULL */
23 nat nursery_blocks; /* number of blocks in the nursery */
25 StgClosure *caf_list = NULL;
27 bdescr *small_alloc_list; /* allocate()d small objects */
28 bdescr *large_alloc_list; /* allocate()d large objects */
29 nat alloc_blocks; /* number of allocate()d blocks since GC */
30 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
32 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
33 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
35 generation *generations; /* all the generations */
36 generation *g0; /* generation 0, for convenience */
37 generation *oldest_gen; /* oldest generation, for convenience */
38 step *g0s0; /* generation 0, step 0, for convenience */
43 static void *stgAllocForGMP (size_t size_in_bytes);
44 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
45 static void stgDeallocForGMP (void *ptr, size_t size);
54 if (RtsFlags.GcFlags.heapSizeSuggestion >
55 RtsFlags.GcFlags.maxHeapSize) {
56 barf("Suggested heap size (-H<size>) is larger than max. heap size (-M<size>)\n");
61 /* allocate generation info array */
62 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
63 * sizeof(struct _generation),
66 /* Initialise all generations */
67 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
68 gen = &generations[g];
70 gen->mut_list = END_MUT_LIST;
72 gen->failed_promotions = 0;
73 gen->max_blocks = RtsFlags.GcFlags.minOldGenSize;
76 /* A couple of convenience pointers */
78 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
80 /* Allocate step structures in each generation */
81 if (RtsFlags.GcFlags.generations > 1) {
82 /* Only for multiple-generations */
84 /* Oldest generation: one step */
85 oldest_gen->n_steps = 1;
87 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
89 /* set up all except the oldest generation with 2 steps */
90 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
91 generations[g].n_steps = RtsFlags.GcFlags.steps;
92 generations[g].steps =
93 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
94 "initStorage: steps");
98 /* single generation, i.e. a two-space collector */
100 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
103 /* Initialise all steps */
104 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
105 for (s = 0; s < generations[g].n_steps; s++) {
106 step = &generations[g].steps[s];
110 step->gen = &generations[g];
114 step->large_objects = NULL;
115 step->new_large_objects = NULL;
116 step->scavenged_large_objects = NULL;
120 /* Set up the destination pointers in each younger gen. step */
121 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
122 for (s = 0; s < generations[g].n_steps-1; s++) {
123 generations[g].steps[s].to = &generations[g].steps[s+1];
125 generations[g].steps[s].to = &generations[g+1].steps[0];
128 /* The oldest generation has one step and its destination is the
130 oldest_gen->steps[0].to = &oldest_gen->steps[0];
132 /* generation 0 is special: that's the nursery */
133 generations[0].max_blocks = 0;
135 /* G0S0: the allocation area */
136 step = &generations[0].steps[0];
138 step->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
139 step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
140 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
141 current_nursery = step->blocks;
142 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
144 weak_ptr_list = NULL;
147 /* initialise the allocate() interface */
148 small_alloc_list = NULL;
149 large_alloc_list = NULL;
151 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
154 /* Tell GNU multi-precision pkg about our custom alloc functions */
155 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
158 IF_DEBUG(gc, stat_describe_gens());
162 allocNursery (bdescr *last_bd, nat blocks)
167 /* Allocate a nursery */
168 for (i=0; i < blocks; i++) {
174 bd->free = bd->start;
181 resizeNursery ( nat blocks )
185 if (nursery_blocks == blocks) {
186 ASSERT(g0s0->n_blocks == blocks);
190 else if (nursery_blocks < blocks) {
191 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
193 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
199 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
201 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
209 g0s0->n_blocks = nursery_blocks = blocks;
218 /* Return code ignored for now */
219 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
220 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
221 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
222 allocated -= BLOCK_SIZE_W;
224 stat_exit(allocated);
228 recordMutable(StgMutClosure *p)
232 ASSERT(closure_MUTABLE(p));
236 /* no need to bother in generation 0 */
241 if (p->mut_link == NULL) {
242 p->mut_link = bd->gen->mut_list;
243 bd->gen->mut_list = p;
248 newCAF(StgClosure* caf)
250 /* Put this CAF on the mutable list for the old generation.
251 * This is a HACK - the IND_STATIC closure doesn't really have
252 * a mut_link field, but we pretend it has - in fact we re-use
253 * the STATIC_LINK field for the time being, because when we
254 * come to do a major GC we won't need the mut_link field
255 * any more and can use it as a STATIC_LINK.
257 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list;
258 oldest_gen->mut_list = (StgMutClosure *)caf;
262 const StgInfoTable *info;
264 info = get_itbl(caf);
265 ASSERT(info->type == IND_STATIC);
266 STATIC_LINK2(info,caf) = caf_list;
272 /* -----------------------------------------------------------------------------
273 The allocate() interface
275 allocate(n) always succeeds, and returns a chunk of memory n words
276 long. n can be larger than the size of a block if necessary, in
277 which case a contiguous block group will be allocated.
278 -------------------------------------------------------------------------- */
289 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
290 /* ToDo: allocate directly into generation 1 */
291 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
292 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
293 bd = allocGroup(req_blocks);
294 dbl_link_onto(bd, &g0s0->large_objects);
298 bd->free = bd->start;
299 /* don't add these blocks to alloc_blocks, since we're assuming
300 * that large objects are likely to remain live for quite a while
301 * (eg. running threads), so garbage collecting early won't make
306 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
307 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
308 if (small_alloc_list) {
309 small_alloc_list->free = alloc_Hp;
312 bd->link = small_alloc_list;
313 small_alloc_list = bd;
317 alloc_Hp = bd->start;
318 alloc_HpLim = bd->start + BLOCK_SIZE_W;
327 lnat allocated_bytes(void)
329 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
332 /* -----------------------------------------------------------------------------
333 Allocation functions for GMP.
335 These all use the allocate() interface - we can't have any garbage
336 collection going on during a gmp operation, so we use allocate()
337 which always succeeds. The gmp operations which might need to
338 allocate will ask the storage manager (via doYouWantToGC()) whether
339 a garbage collection is required, in case we get into a loop doing
340 only allocate() style allocation.
341 -------------------------------------------------------------------------- */
344 stgAllocForGMP (size_t size_in_bytes)
347 nat data_size_in_words, total_size_in_words;
349 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
350 ASSERT(size_in_bytes % sizeof(W_) == 0);
352 data_size_in_words = size_in_bytes / sizeof(W_);
353 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
355 /* allocate and fill it in. */
356 arr = (StgArrWords *)allocate(total_size_in_words);
357 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
359 /* and return a ptr to the goods inside the array */
360 return(BYTE_ARR_CTS(arr));
364 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
366 void *new_stuff_ptr = stgAllocForGMP(new_size);
368 char *p = (char *) ptr;
369 char *q = (char *) new_stuff_ptr;
371 for (; i < old_size; i++, p++, q++) {
375 return(new_stuff_ptr);
379 stgDeallocForGMP (void *ptr STG_UNUSED,
380 size_t size STG_UNUSED)
382 /* easy for us: the garbage collector does the dealloc'n */
385 /* -----------------------------------------------------------------------------
387 -------------------------------------------------------------------------- */
389 /* Approximate the amount of live data in the heap. To be called just
390 * after garbage collection (see GarbageCollect()).
399 if (RtsFlags.GcFlags.generations == 1) {
400 live = g0s0->to_blocks * BLOCK_SIZE_W +
401 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
404 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
405 for (s = 0; s < generations[g].n_steps; s++) {
406 /* approximate amount of live data (doesn't take into account slop
407 * at end of each block).
409 if (g == 0 && s == 0) {
412 step = &generations[g].steps[s];
413 live += step->n_blocks * BLOCK_SIZE_W +
414 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
420 /* Approximate the number of blocks that will be needed at the next
421 * garbage collection.
423 * Assume: all data currently live will remain live. Steps that will
424 * be collected next time will therefore need twice as many blocks
425 * since all the data will be copied.
434 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
435 for (s = 0; s < generations[g].n_steps; s++) {
436 if (g == 0 && s == 0) { continue; }
437 step = &generations[g].steps[s];
438 if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
439 needed += 2 * step->n_blocks;
441 needed += step->n_blocks;
448 /* -----------------------------------------------------------------------------
451 memInventory() checks for memory leaks by counting up all the
452 blocks we know about and comparing that to the number of blocks
453 allegedly floating around in the system.
454 -------------------------------------------------------------------------- */
464 lnat total_blocks = 0, free_blocks = 0;
466 /* count the blocks we current have */
468 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
469 for (s = 0; s < generations[g].n_steps; s++) {
470 step = &generations[g].steps[s];
471 total_blocks += step->n_blocks;
472 if (RtsFlags.GcFlags.generations == 1) {
473 /* two-space collector has a to-space too :-) */
474 total_blocks += g0s0->to_blocks;
476 for (bd = step->large_objects; bd; bd = bd->link) {
477 total_blocks += bd->blocks;
478 /* hack for megablock groups: they have an extra block or two in
479 the second and subsequent megablocks where the block
480 descriptors would normally go.
482 if (bd->blocks > BLOCKS_PER_MBLOCK) {
483 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
484 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
490 /* any blocks held by allocate() */
491 for (bd = small_alloc_list; bd; bd = bd->link) {
492 total_blocks += bd->blocks;
494 for (bd = large_alloc_list; bd; bd = bd->link) {
495 total_blocks += bd->blocks;
498 /* count the blocks on the free list */
499 free_blocks = countFreeList();
501 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
504 if (total_blocks + free_blocks != mblocks_allocated *
506 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
507 total_blocks, free_blocks, total_blocks + free_blocks,
508 mblocks_allocated * BLOCKS_PER_MBLOCK);
513 /* Full heap sanity check. */
520 if (RtsFlags.GcFlags.generations == 1) {
521 checkHeap(g0s0->to_space, NULL);
522 checkChain(g0s0->large_objects);
525 for (g = 0; g <= N; g++) {
526 for (s = 0; s < generations[g].n_steps; s++) {
527 if (g == 0 && s == 0) { continue; }
528 checkHeap(generations[g].steps[s].blocks, NULL);
531 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
532 for (s = 0; s < generations[g].n_steps; s++) {
533 checkHeap(generations[g].steps[s].blocks,
534 generations[g].steps[s].blocks->start);
535 checkChain(generations[g].steps[s].large_objects);
538 checkFreeListSanity();