1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.11 1999/02/05 14:48:01 simonm Exp $
4 * Storage manager front end
6 * ---------------------------------------------------------------------------*/
13 #include "BlockAlloc.h"
20 #include "StoragePriv.h"
22 bdescr *current_nursery; /* next available nursery block, or NULL */
23 nat nursery_blocks; /* number of blocks in the nursery */
25 StgClosure *caf_list = NULL;
27 bdescr *small_alloc_list; /* allocate()d small objects */
28 bdescr *large_alloc_list; /* allocate()d large objects */
29 nat alloc_blocks; /* number of allocate()d blocks since GC */
30 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
32 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
33 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
35 generation *generations; /* all the generations */
36 generation *g0; /* generation 0, for convenience */
37 generation *oldest_gen; /* oldest generation, for convenience */
38 step *g0s0; /* generation 0, step 0, for convenience */
43 static void *stgAllocForGMP (size_t size_in_bytes);
44 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
45 static void stgDeallocForGMP (void *ptr, size_t size);
54 if (RtsFlags.GcFlags.heapSizeSuggestion >
55 RtsFlags.GcFlags.maxHeapSize) {
56 barf("Suggested heap size (-H<size>) is larger than max. heap size (-M<size>)\n");
61 /* allocate generation info array */
62 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
63 * sizeof(struct _generation),
66 /* Initialise all generations */
67 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
68 gen = &generations[g];
70 gen->mut_list = END_MUT_LIST;
71 gen->mut_once_list = END_MUT_LIST;
73 gen->failed_promotions = 0;
77 /* A couple of convenience pointers */
79 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
81 /* Allocate step structures in each generation */
82 if (RtsFlags.GcFlags.generations > 1) {
83 /* Only for multiple-generations */
85 /* Oldest generation: one step */
86 oldest_gen->n_steps = 1;
88 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
90 /* set up all except the oldest generation with 2 steps */
91 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
92 generations[g].n_steps = RtsFlags.GcFlags.steps;
93 generations[g].steps =
94 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
95 "initStorage: steps");
99 /* single generation, i.e. a two-space collector */
101 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
104 /* Initialise all steps */
105 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
106 for (s = 0; s < generations[g].n_steps; s++) {
107 step = &generations[g].steps[s];
111 step->gen = &generations[g];
115 step->large_objects = NULL;
116 step->new_large_objects = NULL;
117 step->scavenged_large_objects = NULL;
121 /* Set up the destination pointers in each younger gen. step */
122 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
123 for (s = 0; s < generations[g].n_steps-1; s++) {
124 generations[g].steps[s].to = &generations[g].steps[s+1];
126 generations[g].steps[s].to = &generations[g+1].steps[0];
129 /* The oldest generation has one step and its destination is the
131 oldest_gen->steps[0].to = &oldest_gen->steps[0];
133 /* generation 0 is special: that's the nursery */
134 generations[0].max_blocks = 0;
136 /* G0S0: the allocation area. Policy: keep the allocation area
137 * small to begin with, even if we have a large suggested heap
138 * size. Reason: we're going to do a major collection first, and we
139 * don't want it to be a big one. This vague idea is borne out by
140 * rigorous experimental evidence.
142 step = &generations[0].steps[0];
144 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
145 step->blocks = allocNursery(NULL, nursery_blocks);
146 step->n_blocks = nursery_blocks;
147 current_nursery = step->blocks;
148 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
150 weak_ptr_list = NULL;
153 /* initialise the allocate() interface */
154 small_alloc_list = NULL;
155 large_alloc_list = NULL;
157 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
160 /* Tell GNU multi-precision pkg about our custom alloc functions */
161 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
164 IF_DEBUG(gc, stat_describe_gens());
168 allocNursery (bdescr *last_bd, nat blocks)
173 /* Allocate a nursery */
174 for (i=0; i < blocks; i++) {
180 bd->free = bd->start;
187 resizeNursery ( nat blocks )
191 if (nursery_blocks == blocks) {
192 ASSERT(g0s0->n_blocks == blocks);
196 else if (nursery_blocks < blocks) {
197 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
199 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
205 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
207 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
215 g0s0->n_blocks = nursery_blocks = blocks;
224 /* Return code ignored for now */
225 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
226 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
227 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
228 allocated -= BLOCK_SIZE_W;
230 stat_exit(allocated);
234 newCAF(StgClosure* caf)
236 /* Put this CAF on the mutable list for the old generation.
237 * This is a HACK - the IND_STATIC closure doesn't really have
238 * a mut_link field, but we pretend it has - in fact we re-use
239 * the STATIC_LINK field for the time being, because when we
240 * come to do a major GC we won't need the mut_link field
241 * any more and can use it as a STATIC_LINK.
243 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
244 oldest_gen->mut_once_list = (StgMutClosure *)caf;
248 const StgInfoTable *info;
250 info = get_itbl(caf);
251 ASSERT(info->type == IND_STATIC);
252 STATIC_LINK2(info,caf) = caf_list;
258 /* -----------------------------------------------------------------------------
259 The allocate() interface
261 allocate(n) always succeeds, and returns a chunk of memory n words
262 long. n can be larger than the size of a block if necessary, in
263 which case a contiguous block group will be allocated.
264 -------------------------------------------------------------------------- */
275 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
276 /* ToDo: allocate directly into generation 1 */
277 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
278 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
279 bd = allocGroup(req_blocks);
280 dbl_link_onto(bd, &g0s0->large_objects);
284 bd->free = bd->start;
285 /* don't add these blocks to alloc_blocks, since we're assuming
286 * that large objects are likely to remain live for quite a while
287 * (eg. running threads), so garbage collecting early won't make
292 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
293 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
294 if (small_alloc_list) {
295 small_alloc_list->free = alloc_Hp;
298 bd->link = small_alloc_list;
299 small_alloc_list = bd;
303 alloc_Hp = bd->start;
304 alloc_HpLim = bd->start + BLOCK_SIZE_W;
313 lnat allocated_bytes(void)
315 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
318 /* -----------------------------------------------------------------------------
319 Allocation functions for GMP.
321 These all use the allocate() interface - we can't have any garbage
322 collection going on during a gmp operation, so we use allocate()
323 which always succeeds. The gmp operations which might need to
324 allocate will ask the storage manager (via doYouWantToGC()) whether
325 a garbage collection is required, in case we get into a loop doing
326 only allocate() style allocation.
327 -------------------------------------------------------------------------- */
330 stgAllocForGMP (size_t size_in_bytes)
333 nat data_size_in_words, total_size_in_words;
335 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
336 ASSERT(size_in_bytes % sizeof(W_) == 0);
338 data_size_in_words = size_in_bytes / sizeof(W_);
339 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
341 /* allocate and fill it in. */
342 arr = (StgArrWords *)allocate(total_size_in_words);
343 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
345 /* and return a ptr to the goods inside the array */
346 return(BYTE_ARR_CTS(arr));
350 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
352 void *new_stuff_ptr = stgAllocForGMP(new_size);
354 char *p = (char *) ptr;
355 char *q = (char *) new_stuff_ptr;
357 for (; i < old_size; i++, p++, q++) {
361 return(new_stuff_ptr);
365 stgDeallocForGMP (void *ptr STG_UNUSED,
366 size_t size STG_UNUSED)
368 /* easy for us: the garbage collector does the dealloc'n */
371 /* -----------------------------------------------------------------------------
373 -------------------------------------------------------------------------- */
375 /* Approximate the amount of live data in the heap. To be called just
376 * after garbage collection (see GarbageCollect()).
385 if (RtsFlags.GcFlags.generations == 1) {
386 live = g0s0->to_blocks * BLOCK_SIZE_W +
387 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
390 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
391 for (s = 0; s < generations[g].n_steps; s++) {
392 /* approximate amount of live data (doesn't take into account slop
393 * at end of each block).
395 if (g == 0 && s == 0) {
398 step = &generations[g].steps[s];
399 live += step->n_blocks * BLOCK_SIZE_W +
400 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
406 /* Approximate the number of blocks that will be needed at the next
407 * garbage collection.
409 * Assume: all data currently live will remain live. Steps that will
410 * be collected next time will therefore need twice as many blocks
411 * since all the data will be copied.
420 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
421 for (s = 0; s < generations[g].n_steps; s++) {
422 if (g == 0 && s == 0) { continue; }
423 step = &generations[g].steps[s];
424 if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
425 needed += 2 * step->n_blocks;
427 needed += step->n_blocks;
434 /* -----------------------------------------------------------------------------
437 memInventory() checks for memory leaks by counting up all the
438 blocks we know about and comparing that to the number of blocks
439 allegedly floating around in the system.
440 -------------------------------------------------------------------------- */
450 lnat total_blocks = 0, free_blocks = 0;
452 /* count the blocks we current have */
454 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
455 for (s = 0; s < generations[g].n_steps; s++) {
456 step = &generations[g].steps[s];
457 total_blocks += step->n_blocks;
458 if (RtsFlags.GcFlags.generations == 1) {
459 /* two-space collector has a to-space too :-) */
460 total_blocks += g0s0->to_blocks;
462 for (bd = step->large_objects; bd; bd = bd->link) {
463 total_blocks += bd->blocks;
464 /* hack for megablock groups: they have an extra block or two in
465 the second and subsequent megablocks where the block
466 descriptors would normally go.
468 if (bd->blocks > BLOCKS_PER_MBLOCK) {
469 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
470 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
476 /* any blocks held by allocate() */
477 for (bd = small_alloc_list; bd; bd = bd->link) {
478 total_blocks += bd->blocks;
480 for (bd = large_alloc_list; bd; bd = bd->link) {
481 total_blocks += bd->blocks;
484 /* count the blocks on the free list */
485 free_blocks = countFreeList();
487 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
490 if (total_blocks + free_blocks != mblocks_allocated *
492 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
493 total_blocks, free_blocks, total_blocks + free_blocks,
494 mblocks_allocated * BLOCKS_PER_MBLOCK);
499 /* Full heap sanity check. */
506 if (RtsFlags.GcFlags.generations == 1) {
507 checkHeap(g0s0->to_space, NULL);
508 checkChain(g0s0->large_objects);
511 for (g = 0; g <= N; g++) {
512 for (s = 0; s < generations[g].n_steps; s++) {
513 if (g == 0 && s == 0) { continue; }
514 checkHeap(generations[g].steps[s].blocks, NULL);
517 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
518 for (s = 0; s < generations[g].n_steps; s++) {
519 checkHeap(generations[g].steps[s].blocks,
520 generations[g].steps[s].blocks->start);
521 checkChain(generations[g].steps[s].large_objects);
524 checkFreeListSanity();