1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.15 1999/02/26 16:44:14 simonm Exp $
4 * (c) The GHC Team, 1998-1999
6 * Storage manager front end
8 * ---------------------------------------------------------------------------*/
15 #include "BlockAlloc.h"
22 #include "StoragePriv.h"
24 bdescr *current_nursery; /* next available nursery block, or NULL */
25 nat nursery_blocks; /* number of blocks in the nursery */
27 StgClosure *caf_list = NULL;
29 bdescr *small_alloc_list; /* allocate()d small objects */
30 bdescr *large_alloc_list; /* allocate()d large objects */
31 nat alloc_blocks; /* number of allocate()d blocks since GC */
32 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
34 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
35 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
37 generation *generations; /* all the generations */
38 generation *g0; /* generation 0, for convenience */
39 generation *oldest_gen; /* oldest generation, for convenience */
40 step *g0s0; /* generation 0, step 0, for convenience */
45 static void *stgAllocForGMP (size_t size_in_bytes);
46 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
47 static void stgDeallocForGMP (void *ptr, size_t size);
56 if (RtsFlags.GcFlags.heapSizeSuggestion >
57 RtsFlags.GcFlags.maxHeapSize) {
58 RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
63 /* allocate generation info array */
64 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
65 * sizeof(struct _generation),
68 /* Initialise all generations */
69 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
70 gen = &generations[g];
72 gen->mut_list = END_MUT_LIST;
73 gen->mut_once_list = END_MUT_LIST;
75 gen->failed_promotions = 0;
79 /* A couple of convenience pointers */
81 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
83 /* Allocate step structures in each generation */
84 if (RtsFlags.GcFlags.generations > 1) {
85 /* Only for multiple-generations */
87 /* Oldest generation: one step */
88 oldest_gen->n_steps = 1;
90 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
92 /* set up all except the oldest generation with 2 steps */
93 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
94 generations[g].n_steps = RtsFlags.GcFlags.steps;
95 generations[g].steps =
96 stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
97 "initStorage: steps");
101 /* single generation, i.e. a two-space collector */
103 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
106 /* Initialise all steps */
107 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
108 for (s = 0; s < generations[g].n_steps; s++) {
109 step = &generations[g].steps[s];
113 step->gen = &generations[g];
118 step->scan_bd = NULL;
119 step->large_objects = NULL;
120 step->new_large_objects = NULL;
121 step->scavenged_large_objects = NULL;
125 /* Set up the destination pointers in each younger gen. step */
126 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
127 for (s = 0; s < generations[g].n_steps-1; s++) {
128 generations[g].steps[s].to = &generations[g].steps[s+1];
130 generations[g].steps[s].to = &generations[g+1].steps[0];
133 /* The oldest generation has one step and its destination is the
135 oldest_gen->steps[0].to = &oldest_gen->steps[0];
137 /* generation 0 is special: that's the nursery */
138 generations[0].max_blocks = 0;
140 /* G0S0: the allocation area. Policy: keep the allocation area
141 * small to begin with, even if we have a large suggested heap
142 * size. Reason: we're going to do a major collection first, and we
143 * don't want it to be a big one. This vague idea is borne out by
144 * rigorous experimental evidence.
146 step = &generations[0].steps[0];
148 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
149 step->blocks = allocNursery(NULL, nursery_blocks);
150 step->n_blocks = nursery_blocks;
151 current_nursery = step->blocks;
152 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
154 weak_ptr_list = NULL;
157 /* initialise the allocate() interface */
158 small_alloc_list = NULL;
159 large_alloc_list = NULL;
161 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
164 /* Tell GNU multi-precision pkg about our custom alloc functions */
165 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
168 IF_DEBUG(gc, stat_describe_gens());
172 allocNursery (bdescr *last_bd, nat blocks)
177 /* Allocate a nursery */
178 for (i=0; i < blocks; i++) {
184 bd->free = bd->start;
191 resizeNursery ( nat blocks )
195 if (nursery_blocks == blocks) {
196 ASSERT(g0s0->n_blocks == blocks);
200 else if (nursery_blocks < blocks) {
201 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
203 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
209 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
211 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
219 g0s0->n_blocks = nursery_blocks = blocks;
228 /* Return code ignored for now */
229 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
230 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
231 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
232 allocated -= BLOCK_SIZE_W;
234 stat_exit(allocated);
238 newCAF(StgClosure* caf)
240 /* Put this CAF on the mutable list for the old generation.
241 * This is a HACK - the IND_STATIC closure doesn't really have
242 * a mut_link field, but we pretend it has - in fact we re-use
243 * the STATIC_LINK field for the time being, because when we
244 * come to do a major GC we won't need the mut_link field
245 * any more and can use it as a STATIC_LINK.
247 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
248 oldest_gen->mut_once_list = (StgMutClosure *)caf;
252 const StgInfoTable *info;
254 info = get_itbl(caf);
255 ASSERT(info->type == IND_STATIC);
256 STATIC_LINK2(info,caf) = caf_list;
262 /* -----------------------------------------------------------------------------
263 The allocate() interface
265 allocate(n) always succeeds, and returns a chunk of memory n words
266 long. n can be larger than the size of a block if necessary, in
267 which case a contiguous block group will be allocated.
268 -------------------------------------------------------------------------- */
279 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
280 /* ToDo: allocate directly into generation 1 */
281 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
282 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
283 bd = allocGroup(req_blocks);
284 dbl_link_onto(bd, &g0s0->large_objects);
288 bd->free = bd->start;
289 /* don't add these blocks to alloc_blocks, since we're assuming
290 * that large objects are likely to remain live for quite a while
291 * (eg. running threads), so garbage collecting early won't make
296 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
297 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
298 if (small_alloc_list) {
299 small_alloc_list->free = alloc_Hp;
302 bd->link = small_alloc_list;
303 small_alloc_list = bd;
307 alloc_Hp = bd->start;
308 alloc_HpLim = bd->start + BLOCK_SIZE_W;
317 lnat allocated_bytes(void)
319 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
322 /* -----------------------------------------------------------------------------
323 Allocation functions for GMP.
325 These all use the allocate() interface - we can't have any garbage
326 collection going on during a gmp operation, so we use allocate()
327 which always succeeds. The gmp operations which might need to
328 allocate will ask the storage manager (via doYouWantToGC()) whether
329 a garbage collection is required, in case we get into a loop doing
330 only allocate() style allocation.
331 -------------------------------------------------------------------------- */
334 stgAllocForGMP (size_t size_in_bytes)
337 nat data_size_in_words, total_size_in_words;
339 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
340 ASSERT(size_in_bytes % sizeof(W_) == 0);
342 data_size_in_words = size_in_bytes / sizeof(W_);
343 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
345 /* allocate and fill it in. */
346 arr = (StgArrWords *)allocate(total_size_in_words);
347 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
349 /* and return a ptr to the goods inside the array */
350 return(BYTE_ARR_CTS(arr));
354 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
356 void *new_stuff_ptr = stgAllocForGMP(new_size);
358 char *p = (char *) ptr;
359 char *q = (char *) new_stuff_ptr;
361 for (; i < old_size; i++, p++, q++) {
365 return(new_stuff_ptr);
369 stgDeallocForGMP (void *ptr STG_UNUSED,
370 size_t size STG_UNUSED)
372 /* easy for us: the garbage collector does the dealloc'n */
375 /* -----------------------------------------------------------------------------
377 -------------------------------------------------------------------------- */
379 /* Approximate the amount of live data in the heap. To be called just
380 * after garbage collection (see GarbageCollect()).
389 if (RtsFlags.GcFlags.generations == 1) {
390 live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W +
391 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
395 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
396 for (s = 0; s < generations[g].n_steps; s++) {
397 /* approximate amount of live data (doesn't take into account slop
398 * at end of each block).
400 if (g == 0 && s == 0) {
403 step = &generations[g].steps[s];
404 live += (step->n_blocks - 1) * BLOCK_SIZE_W +
405 ((lnat)step->hp_bd->free - (lnat)step->hp_bd->start) / sizeof(W_);
411 /* Approximate the number of blocks that will be needed at the next
412 * garbage collection.
414 * Assume: all data currently live will remain live. Steps that will
415 * be collected next time will therefore need twice as many blocks
416 * since all the data will be copied.
425 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
426 for (s = 0; s < generations[g].n_steps; s++) {
427 if (g == 0 && s == 0) { continue; }
428 step = &generations[g].steps[s];
429 if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
430 needed += 2 * step->n_blocks;
432 needed += step->n_blocks;
439 /* -----------------------------------------------------------------------------
442 memInventory() checks for memory leaks by counting up all the
443 blocks we know about and comparing that to the number of blocks
444 allegedly floating around in the system.
445 -------------------------------------------------------------------------- */
455 lnat total_blocks = 0, free_blocks = 0;
457 /* count the blocks we current have */
459 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
460 for (s = 0; s < generations[g].n_steps; s++) {
461 step = &generations[g].steps[s];
462 total_blocks += step->n_blocks;
463 if (RtsFlags.GcFlags.generations == 1) {
464 /* two-space collector has a to-space too :-) */
465 total_blocks += g0s0->to_blocks;
467 for (bd = step->large_objects; bd; bd = bd->link) {
468 total_blocks += bd->blocks;
469 /* hack for megablock groups: they have an extra block or two in
470 the second and subsequent megablocks where the block
471 descriptors would normally go.
473 if (bd->blocks > BLOCKS_PER_MBLOCK) {
474 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
475 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
481 /* any blocks held by allocate() */
482 for (bd = small_alloc_list; bd; bd = bd->link) {
483 total_blocks += bd->blocks;
485 for (bd = large_alloc_list; bd; bd = bd->link) {
486 total_blocks += bd->blocks;
489 /* count the blocks on the free list */
490 free_blocks = countFreeList();
492 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
495 if (total_blocks + free_blocks != mblocks_allocated *
497 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
498 total_blocks, free_blocks, total_blocks + free_blocks,
499 mblocks_allocated * BLOCKS_PER_MBLOCK);
504 /* Full heap sanity check. */
511 if (RtsFlags.GcFlags.generations == 1) {
512 checkHeap(g0s0->to_space, NULL);
513 checkChain(g0s0->large_objects);
516 for (g = 0; g <= N; g++) {
517 for (s = 0; s < generations[g].n_steps; s++) {
518 if (g == 0 && s == 0) { continue; }
519 checkHeap(generations[g].steps[s].blocks, NULL);
522 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
523 for (s = 0; s < generations[g].n_steps; s++) {
524 checkHeap(generations[g].steps[s].blocks,
525 generations[g].steps[s].blocks->start);
526 checkChain(generations[g].steps[s].large_objects);
529 checkFreeListSanity();