1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.6 1999/01/21 10:31:51 simonm Exp $
4 * Storage manager front end
6 * ---------------------------------------------------------------------------*/
13 #include "BlockAlloc.h"
19 #include "StoragePriv.h"
21 bdescr *current_nursery; /* next available nursery block, or NULL */
22 nat nursery_blocks; /* number of blocks in the nursery */
24 StgClosure *caf_list = NULL;
26 bdescr *small_alloc_list; /* allocate()d small objects */
27 bdescr *large_alloc_list; /* allocate()d large objects */
28 nat alloc_blocks; /* number of allocate()d blocks since GC */
29 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
31 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
32 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
34 generation *generations; /* all the generations */
35 generation *g0; /* generation 0, for convenience */
36 generation *oldest_gen; /* oldest generation, for convenience */
37 step *g0s0; /* generation 0, step 0, for convenience */
42 static void *stgAllocForGMP (size_t size_in_bytes);
43 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
44 static void stgDeallocForGMP (void *ptr, size_t size);
55 /* allocate generation info array */
56 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
57 * sizeof(struct _generation),
60 /* Initialise all generations */
61 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
62 gen = &generations[g];
64 gen->mut_list = END_MUT_LIST;
66 gen->failed_promotions = 0;
67 gen->max_blocks = RtsFlags.GcFlags.minOldGenSize;
70 /* A couple of convenience pointers */
72 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
74 /* Allocate step structures in each generation */
75 if (RtsFlags.GcFlags.generations > 1) {
76 /* Only for multiple-generations */
78 /* Oldest generation: one step */
79 oldest_gen->n_steps = 1;
81 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
83 /* set up all except the oldest generation with 2 steps */
84 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
85 generations[g].n_steps = 2;
86 generations[g].steps = stgMallocBytes (2 * sizeof(struct _step),
87 "initStorage: steps");
91 /* single generation, i.e. a two-space collector */
93 g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
96 /* Initialise all steps */
97 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
98 for (s = 0; s < generations[g].n_steps; s++) {
99 step = &generations[g].steps[s];
103 step->gen = &generations[g];
107 step->large_objects = NULL;
108 step->new_large_objects = NULL;
109 step->scavenged_large_objects = NULL;
113 /* Set up the destination pointers in each younger gen. step */
114 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
115 for (s = 0; s < generations[g].n_steps; s++) {
116 step = &generations[g].steps[s];
118 step->to = &generations[g+1].steps[0];
120 step->to = &generations[g].steps[s+1];
125 /* The oldest generation has one step and its destination is the
127 oldest_gen->steps[0].to = &oldest_gen->steps[0];
129 /* generation 0 is special: that's the nursery */
130 generations[0].max_blocks = 0;
132 /* G0S0: the allocation area */
133 step = &generations[0].steps[0];
135 step->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
136 step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
137 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
138 current_nursery = step->blocks;
139 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
141 weak_ptr_list = NULL;
144 /* initialise the allocate() interface */
145 small_alloc_list = NULL;
146 large_alloc_list = NULL;
148 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
151 /* Tell GNU multi-precision pkg about our custom alloc functions */
152 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
155 IF_DEBUG(gc, stat_describe_gens());
159 allocNursery (bdescr *last_bd, nat blocks)
164 /* Allocate a nursery */
165 for (i=0; i < blocks; i++) {
171 bd->free = bd->start;
183 /* Return code ignored for now */
184 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
185 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
186 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
187 allocated -= BLOCK_SIZE_W;
189 stat_exit(allocated);
193 recordMutable(StgMutClosure *p)
197 ASSERT(closure_MUTABLE(p));
201 /* no need to bother in generation 0 */
206 if (p->mut_link == NULL) {
207 p->mut_link = bd->gen->mut_list;
208 bd->gen->mut_list = p;
213 newCAF(StgClosure* caf)
215 /* Put this CAF on the mutable list for the old generation.
216 * This is a HACK - the IND_STATIC closure doesn't really have
217 * a mut_link field, but we pretend it has - in fact we re-use
218 * the STATIC_LINK field for the time being, because when we
219 * come to do a major GC we won't need the mut_link field
220 * any more and can use it as a STATIC_LINK.
222 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list;
223 oldest_gen->mut_list = (StgMutClosure *)caf;
227 const StgInfoTable *info;
229 info = get_itbl(caf);
230 ASSERT(info->type == IND_STATIC);
231 STATIC_LINK2(info,caf) = caf_list;
237 /* -----------------------------------------------------------------------------
238 The allocate() interface
240 allocate(n) always succeeds, and returns a chunk of memory n words
241 long. n can be larger than the size of a block if necessary, in
242 which case a contiguous block group will be allocated.
243 -------------------------------------------------------------------------- */
254 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
255 /* ToDo: allocate directly into generation 1 */
256 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
257 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
258 bd = allocGroup(req_blocks);
259 dbl_link_onto(bd, &g0s0->large_objects);
263 bd->free = bd->start;
264 /* don't add these blocks to alloc_blocks, since we're assuming
265 * that large objects are likely to remain live for quite a while
266 * (eg. running threads), so garbage collecting early won't make
271 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
272 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
273 if (small_alloc_list) {
274 small_alloc_list->free = alloc_Hp;
277 bd->link = small_alloc_list;
278 small_alloc_list = bd;
282 alloc_Hp = bd->start;
283 alloc_HpLim = bd->start + BLOCK_SIZE_W;
292 lnat allocated_bytes(void)
294 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
297 /* -----------------------------------------------------------------------------
298 Allocation functions for GMP.
300 These all use the allocate() interface - we can't have any garbage
301 collection going on during a gmp operation, so we use allocate()
302 which always succeeds. The gmp operations which might need to
303 allocate will ask the storage manager (via doYouWantToGC()) whether
304 a garbage collection is required, in case we get into a loop doing
305 only allocate() style allocation.
306 -------------------------------------------------------------------------- */
309 stgAllocForGMP (size_t size_in_bytes)
312 nat data_size_in_words, total_size_in_words;
314 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
315 ASSERT(size_in_bytes % sizeof(W_) == 0);
317 data_size_in_words = size_in_bytes / sizeof(W_);
318 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
320 /* allocate and fill it in. */
321 arr = (StgArrWords *)allocate(total_size_in_words);
322 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
324 /* and return a ptr to the goods inside the array */
325 return(BYTE_ARR_CTS(arr));
329 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
331 void *new_stuff_ptr = stgAllocForGMP(new_size);
333 char *p = (char *) ptr;
334 char *q = (char *) new_stuff_ptr;
336 for (; i < old_size; i++, p++, q++) {
340 return(new_stuff_ptr);
344 stgDeallocForGMP (void *ptr STG_UNUSED,
345 size_t size STG_UNUSED)
347 /* easy for us: the garbage collector does the dealloc'n */
350 /* -----------------------------------------------------------------------------
353 memInventory() checks for memory leaks by counting up all the
354 blocks we know about and comparing that to the number of blocks
355 allegedly floating around in the system.
356 -------------------------------------------------------------------------- */
366 lnat total_blocks = 0, free_blocks = 0;
368 /* count the blocks we current have */
370 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
371 for (s = 0; s < generations[g].n_steps; s++) {
372 step = &generations[g].steps[s];
373 total_blocks += step->n_blocks;
374 if (RtsFlags.GcFlags.generations == 1) {
375 /* two-space collector has a to-space too :-) */
376 total_blocks += g0s0->to_blocks;
378 for (bd = step->large_objects; bd; bd = bd->link) {
379 total_blocks += bd->blocks;
380 /* hack for megablock groups: they have an extra block or two in
381 the second and subsequent megablocks where the block
382 descriptors would normally go.
384 if (bd->blocks > BLOCKS_PER_MBLOCK) {
385 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
386 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
392 /* any blocks held by allocate() */
393 for (bd = small_alloc_list; bd; bd = bd->link) {
394 total_blocks += bd->blocks;
396 for (bd = large_alloc_list; bd; bd = bd->link) {
397 total_blocks += bd->blocks;
400 /* count the blocks on the free list */
401 free_blocks = countFreeList();
403 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
406 if (total_blocks + free_blocks != mblocks_allocated *
408 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
409 total_blocks, free_blocks, total_blocks + free_blocks,
410 mblocks_allocated * BLOCKS_PER_MBLOCK);