1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.16 1999/03/02 19:50:12 sof Exp $
4 * (c) The GHC Team, 1998-1999
6 * Storage manager front end
8 * ---------------------------------------------------------------------------*/
15 #include "BlockAlloc.h"
22 #include "StoragePriv.h"
24 bdescr *current_nursery; /* next available nursery block, or NULL */
25 nat nursery_blocks; /* number of blocks in the nursery */
27 StgClosure *caf_list = NULL;
29 bdescr *small_alloc_list; /* allocate()d small objects */
30 bdescr *large_alloc_list; /* allocate()d large objects */
31 nat alloc_blocks; /* number of allocate()d blocks since GC */
32 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
34 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
35 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
37 generation *generations; /* all the generations */
38 generation *g0; /* generation 0, for convenience */
39 generation *oldest_gen; /* oldest generation, for convenience */
40 step *g0s0; /* generation 0, step 0, for convenience */
45 static void *stgAllocForGMP (size_t size_in_bytes);
46 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
47 static void stgDeallocForGMP (void *ptr, size_t size);
56 if (RtsFlags.GcFlags.heapSizeSuggestion >
57 RtsFlags.GcFlags.maxHeapSize) {
58 RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
63 /* allocate generation info array */
64 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
65 * sizeof(struct _generation),
68 /* Initialise all generations */
69 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
70 gen = &generations[g];
72 gen->mut_list = END_MUT_LIST;
73 gen->mut_once_list = END_MUT_LIST;
75 gen->failed_promotions = 0;
79 /* A couple of convenience pointers */
81 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
83 /* Allocate step structures in each generation */
84 if (RtsFlags.GcFlags.generations > 1) {
85 /* Only for multiple-generations */
87 /* Oldest generation: one step */
88 oldest_gen->n_steps = 1;
90 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
92 /* set up all except the oldest generation with 2 steps */
93 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
94 generations[g].n_steps = RtsFlags.GcFlags.steps;
95 generations[g].steps =
96 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
97 "initStorage: steps");
101 /* single generation, i.e. a two-space collector */
103 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
106 /* Initialise all steps */
107 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
108 for (s = 0; s < generations[g].n_steps; s++) {
109 step = &generations[g].steps[s];
113 step->gen = &generations[g];
118 step->scan_bd = NULL;
119 step->large_objects = NULL;
120 step->new_large_objects = NULL;
121 step->scavenged_large_objects = NULL;
125 /* Set up the destination pointers in each younger gen. step */
126 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
127 for (s = 0; s < generations[g].n_steps-1; s++) {
128 generations[g].steps[s].to = &generations[g].steps[s+1];
130 generations[g].steps[s].to = &generations[g+1].steps[0];
133 /* The oldest generation has one step and its destination is the
135 oldest_gen->steps[0].to = &oldest_gen->steps[0];
137 /* generation 0 is special: that's the nursery */
138 generations[0].max_blocks = 0;
140 /* G0S0: the allocation area. Policy: keep the allocation area
141 * small to begin with, even if we have a large suggested heap
142 * size. Reason: we're going to do a major collection first, and we
143 * don't want it to be a big one. This vague idea is borne out by
144 * rigorous experimental evidence.
146 step = &generations[0].steps[0];
148 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
149 step->blocks = allocNursery(NULL, nursery_blocks);
150 step->n_blocks = nursery_blocks;
151 current_nursery = step->blocks;
152 g0s0->to_space = NULL;
153 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
155 weak_ptr_list = NULL;
158 /* initialise the allocate() interface */
159 small_alloc_list = NULL;
160 large_alloc_list = NULL;
162 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
165 /* Tell GNU multi-precision pkg about our custom alloc functions */
166 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
169 IF_DEBUG(gc, stat_describe_gens());
173 allocNursery (bdescr *last_bd, nat blocks)
178 /* Allocate a nursery */
179 for (i=0; i < blocks; i++) {
185 bd->free = bd->start;
192 resizeNursery ( nat blocks )
196 if (nursery_blocks == blocks) {
197 ASSERT(g0s0->n_blocks == blocks);
201 else if (nursery_blocks < blocks) {
202 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
204 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
210 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
212 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
220 g0s0->n_blocks = nursery_blocks = blocks;
229 /* Return code ignored for now */
230 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
231 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
232 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
233 allocated -= BLOCK_SIZE_W;
235 stat_exit(allocated);
239 newCAF(StgClosure* caf)
241 /* Put this CAF on the mutable list for the old generation.
242 * This is a HACK - the IND_STATIC closure doesn't really have
243 * a mut_link field, but we pretend it has - in fact we re-use
244 * the STATIC_LINK field for the time being, because when we
245 * come to do a major GC we won't need the mut_link field
246 * any more and can use it as a STATIC_LINK.
248 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
249 oldest_gen->mut_once_list = (StgMutClosure *)caf;
253 const StgInfoTable *info;
255 info = get_itbl(caf);
256 ASSERT(info->type == IND_STATIC);
257 STATIC_LINK2(info,caf) = caf_list;
263 /* -----------------------------------------------------------------------------
264 The allocate() interface
266 allocate(n) always succeeds, and returns a chunk of memory n words
267 long. n can be larger than the size of a block if necessary, in
268 which case a contiguous block group will be allocated.
269 -------------------------------------------------------------------------- */
280 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
281 /* ToDo: allocate directly into generation 1 */
282 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
283 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
284 bd = allocGroup(req_blocks);
285 dbl_link_onto(bd, &g0s0->large_objects);
289 bd->free = bd->start;
290 /* don't add these blocks to alloc_blocks, since we're assuming
291 * that large objects are likely to remain live for quite a while
292 * (eg. running threads), so garbage collecting early won't make
297 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
298 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
299 if (small_alloc_list) {
300 small_alloc_list->free = alloc_Hp;
303 bd->link = small_alloc_list;
304 small_alloc_list = bd;
308 alloc_Hp = bd->start;
309 alloc_HpLim = bd->start + BLOCK_SIZE_W;
318 lnat allocated_bytes(void)
320 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
323 /* -----------------------------------------------------------------------------
324 Allocation functions for GMP.
326 These all use the allocate() interface - we can't have any garbage
327 collection going on during a gmp operation, so we use allocate()
328 which always succeeds. The gmp operations which might need to
329 allocate will ask the storage manager (via doYouWantToGC()) whether
330 a garbage collection is required, in case we get into a loop doing
331 only allocate() style allocation.
332 -------------------------------------------------------------------------- */
335 stgAllocForGMP (size_t size_in_bytes)
338 nat data_size_in_words, total_size_in_words;
340 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
341 ASSERT(size_in_bytes % sizeof(W_) == 0);
343 data_size_in_words = size_in_bytes / sizeof(W_);
344 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
346 /* allocate and fill it in. */
347 arr = (StgArrWords *)allocate(total_size_in_words);
348 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
350 /* and return a ptr to the goods inside the array */
351 return(BYTE_ARR_CTS(arr));
355 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
357 void *new_stuff_ptr = stgAllocForGMP(new_size);
359 char *p = (char *) ptr;
360 char *q = (char *) new_stuff_ptr;
362 for (; i < old_size; i++, p++, q++) {
366 return(new_stuff_ptr);
370 stgDeallocForGMP (void *ptr STG_UNUSED,
371 size_t size STG_UNUSED)
373 /* easy for us: the garbage collector does the dealloc'n */
376 /* -----------------------------------------------------------------------------
378 -------------------------------------------------------------------------- */
380 /* Approximate the amount of live data in the heap. To be called just
381 * after garbage collection (see GarbageCollect()).
390 if (RtsFlags.GcFlags.generations == 1) {
391 live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W +
392 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
396 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
397 for (s = 0; s < generations[g].n_steps; s++) {
398 /* approximate amount of live data (doesn't take into account slop
399 * at end of each block).
401 if (g == 0 && s == 0) {
404 step = &generations[g].steps[s];
405 live += (step->n_blocks - 1) * BLOCK_SIZE_W +
406 ((lnat)step->hp_bd->free - (lnat)step->hp_bd->start) / sizeof(W_);
412 /* Approximate the number of blocks that will be needed at the next
413 * garbage collection.
415 * Assume: all data currently live will remain live. Steps that will
416 * be collected next time will therefore need twice as many blocks
417 * since all the data will be copied.
426 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
427 for (s = 0; s < generations[g].n_steps; s++) {
428 if (g == 0 && s == 0) { continue; }
429 step = &generations[g].steps[s];
430 if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
431 needed += 2 * step->n_blocks;
433 needed += step->n_blocks;
440 /* -----------------------------------------------------------------------------
443 memInventory() checks for memory leaks by counting up all the
444 blocks we know about and comparing that to the number of blocks
445 allegedly floating around in the system.
446 -------------------------------------------------------------------------- */
456 lnat total_blocks = 0, free_blocks = 0;
458 /* count the blocks we current have */
460 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
461 for (s = 0; s < generations[g].n_steps; s++) {
462 step = &generations[g].steps[s];
463 total_blocks += step->n_blocks;
464 if (RtsFlags.GcFlags.generations == 1) {
465 /* two-space collector has a to-space too :-) */
466 total_blocks += g0s0->to_blocks;
468 for (bd = step->large_objects; bd; bd = bd->link) {
469 total_blocks += bd->blocks;
470 /* hack for megablock groups: they have an extra block or two in
471 the second and subsequent megablocks where the block
472 descriptors would normally go.
474 if (bd->blocks > BLOCKS_PER_MBLOCK) {
475 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
476 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
482 /* any blocks held by allocate() */
483 for (bd = small_alloc_list; bd; bd = bd->link) {
484 total_blocks += bd->blocks;
486 for (bd = large_alloc_list; bd; bd = bd->link) {
487 total_blocks += bd->blocks;
490 /* count the blocks on the free list */
491 free_blocks = countFreeList();
493 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
496 if (total_blocks + free_blocks != mblocks_allocated *
498 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
499 total_blocks, free_blocks, total_blocks + free_blocks,
500 mblocks_allocated * BLOCKS_PER_MBLOCK);
505 /* Full heap sanity check. */
512 if (RtsFlags.GcFlags.generations == 1) {
513 checkHeap(g0s0->to_space, NULL);
514 checkChain(g0s0->large_objects);
517 for (g = 0; g <= N; g++) {
518 for (s = 0; s < generations[g].n_steps; s++) {
519 if (g == 0 && s == 0) { continue; }
520 checkHeap(generations[g].steps[s].blocks, NULL);
523 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
524 for (s = 0; s < generations[g].n_steps; s++) {
525 checkHeap(generations[g].steps[s].blocks,
526 generations[g].steps[s].blocks->start);
527 checkChain(generations[g].steps[s].large_objects);
530 checkFreeListSanity();