1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.c,v 1.4 1999/01/19 15:07:56 simonm Exp $
4 * Storage manager front end
6 * ---------------------------------------------------------------------------*/
13 #include "BlockAlloc.h"
19 #include "StoragePriv.h"
21 bdescr *current_nursery; /* next available nursery block, or NULL */
22 nat nursery_blocks; /* number of blocks in the nursery */
24 StgClosure *caf_list = NULL;
26 bdescr *small_alloc_list; /* allocate()d small objects */
27 bdescr *large_alloc_list; /* allocate()d large objects */
28 nat alloc_blocks; /* number of allocate()d blocks since GC */
29 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
31 StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
32 StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
34 generation *generations; /* all the generations */
35 generation *g0; /* generation 0, for convenience */
36 generation *oldest_gen; /* oldest generation, for convenience */
37 step *g0s0; /* generation 0, step 0, for convenience */
42 static bdescr *allocNursery (nat blocks);
43 static void *stgAllocForGMP (size_t size_in_bytes);
44 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
45 static void stgDeallocForGMP (void *ptr, size_t size);
55 /* allocate generation info array */
56 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
57 * sizeof(struct _generation),
60 /* set up all generations */
61 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
62 generations[g].no = g;
63 generations[g].mut_list = END_MUT_LIST;
64 generations[g].collections = 0;
65 generations[g].failed_promotions = 0;
68 /* Oldest generation: one step */
69 g = RtsFlags.GcFlags.generations-1;
70 generations[g].n_steps = 1;
71 generations[g].steps =
72 stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
73 generations[g].max_blocks = RtsFlags.GcFlags.minOldGenSize;
74 step = &generations[g].steps[0];
76 step->gen = &generations[g];
79 step->to = step; /* destination is this step */
84 /* set up all except the oldest generation with 2 steps */
85 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
86 generations[g].n_steps = 2;
87 generations[g].steps = stgMallocBytes (2 * sizeof(struct _step),
88 "initStorage: steps");
89 generations[g].max_blocks = RtsFlags.GcFlags.minOldGenSize;
92 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
93 for (s = 0; s < generations[g].n_steps; s++) {
94 step = &generations[g].steps[s];
98 step->gen = &generations[g];
100 step->to = &generations[g+1].steps[0];
102 step->to = &generations[g].steps[s+1];
107 step->large_objects = NULL;
108 step->new_large_objects = NULL;
109 step->scavenged_large_objects = NULL;
113 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
115 /* generation 0 is special: that's the nursery */
116 g0 = &generations[0];
117 generations[0].max_blocks = 0;
119 /* G0S0: the allocation area */
120 step = &generations[0].steps[0];
122 step->blocks = allocNursery(RtsFlags.GcFlags.minAllocAreaSize);
123 step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
124 nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
125 current_nursery = step->blocks;
126 /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
128 weak_ptr_list = NULL;
131 /* initialise the allocate() interface */
132 small_alloc_list = NULL;
133 large_alloc_list = NULL;
135 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
138 /* Tell GNU multi-precision pkg about our custom alloc functions */
139 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
142 IF_DEBUG(gc, stat_describe_gens());
146 allocNursery (nat blocks)
148 bdescr *last_bd, *bd;
152 /* Allocate a nursery */
153 for (i=0; i < blocks; i++) {
159 bd->free = bd->start;
171 /* Return code ignored for now */
172 /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
173 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
174 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
175 allocated -= BLOCK_SIZE_W;
177 stat_exit(allocated);
181 recordMutable(StgMutClosure *p)
185 ASSERT(closure_MUTABLE(p));
189 /* no need to bother in generation 0 */
194 if (p->mut_link == NULL) {
195 p->mut_link = bd->gen->mut_list;
196 bd->gen->mut_list = p;
201 newCAF(StgClosure* caf)
203 const StgInfoTable *info;
205 /* Put this CAF on the mutable list for the old generation.
206 * This is a HACK - the IND_STATIC closure doesn't really have
207 * a mut_link field, but we pretend it has - in fact we re-use
208 * the STATIC_LINK field for the time being, because when we
209 * come to do a major GC we won't need the mut_link field
210 * any more and can use it as a STATIC_LINK.
212 ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list;
213 oldest_gen->mut_list = (StgMutClosure *)caf;
216 info = get_itbl(caf);
217 ASSERT(info->type == IND_STATIC);
218 STATIC_LINK2(info,caf) = caf_list;
223 /* -----------------------------------------------------------------------------
224 The allocate() interface
226 allocate(n) always succeeds, and returns a chunk of memory n words
227 long. n can be larger than the size of a block if necessary, in
228 which case a contiguous block group will be allocated.
229 -------------------------------------------------------------------------- */
237 TICK_ALLOC_PRIM(n,wibble,wibble,wibble)
240 /* big allocation (>LARGE_OBJECT_THRESHOLD) */
241 /* ToDo: allocate directly into generation 1 */
242 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
243 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
244 bd = allocGroup(req_blocks);
245 dbl_link_onto(bd, &g0s0->large_objects);
249 bd->free = bd->start;
250 /* don't add these blocks to alloc_blocks, since we're assuming
251 * that large objects are likely to remain live for quite a while
252 * (eg. running threads), so garbage collecting early won't make
257 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
258 } else if (small_alloc_list == NULL || alloc_Hp + n > alloc_HpLim) {
259 if (small_alloc_list) {
260 small_alloc_list->free = alloc_Hp;
263 bd->link = small_alloc_list;
264 small_alloc_list = bd;
268 alloc_Hp = bd->start;
269 alloc_HpLim = bd->start + BLOCK_SIZE_W;
278 lnat allocated_bytes(void)
280 return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
283 /* -----------------------------------------------------------------------------
284 Allocation functions for GMP.
286 These all use the allocate() interface - we can't have any garbage
287 collection going on during a gmp operation, so we use allocate()
288 which always succeeds. The gmp operations which might need to
289 allocate will ask the storage manager (via doYouWantToGC()) whether
290 a garbage collection is required, in case we get into a loop doing
291 only allocate() style allocation.
292 -------------------------------------------------------------------------- */
295 stgAllocForGMP (size_t size_in_bytes)
298 nat data_size_in_words, total_size_in_words;
300 /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
301 ASSERT(size_in_bytes % sizeof(W_) == 0);
303 data_size_in_words = size_in_bytes / sizeof(W_);
304 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
306 /* allocate and fill it in. */
307 arr = (StgArrWords *)allocate(total_size_in_words);
308 SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
310 /* and return a ptr to the goods inside the array */
311 return(BYTE_ARR_CTS(arr));
315 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
317 void *new_stuff_ptr = stgAllocForGMP(new_size);
319 char *p = (char *) ptr;
320 char *q = (char *) new_stuff_ptr;
322 for (; i < old_size; i++, p++, q++) {
326 return(new_stuff_ptr);
330 stgDeallocForGMP (void *ptr STG_UNUSED,
331 size_t size STG_UNUSED)
333 /* easy for us: the garbage collector does the dealloc'n */
336 /* -----------------------------------------------------------------------------
339 memInventory() checks for memory leaks by counting up all the
340 blocks we know about and comparing that to the number of blocks
341 allegedly floating around in the system.
342 -------------------------------------------------------------------------- */
352 lnat total_blocks = 0, free_blocks = 0;
354 /* count the blocks we current have */
355 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
356 for (s = 0; s < generations[g].n_steps; s++) {
357 step = &generations[g].steps[s];
358 total_blocks += step->n_blocks;
359 for (bd = step->large_objects; bd; bd = bd->link) {
360 total_blocks += bd->blocks;
361 /* hack for megablock groups: they have an extra block or two in
362 the second and subsequent megablocks where the block
363 descriptors would normally go.
365 if (bd->blocks > BLOCKS_PER_MBLOCK) {
366 total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
367 * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
373 /* any blocks held by allocate() */
374 for (bd = small_alloc_list; bd; bd = bd->link) {
375 total_blocks += bd->blocks;
377 for (bd = large_alloc_list; bd; bd = bd->link) {
378 total_blocks += bd->blocks;
381 /* count the blocks on the free list */
382 free_blocks = countFreeList();
384 ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
387 if (total_blocks + free_blocks != mblocks_allocated *
389 fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
390 total_blocks, free_blocks, total_blocks + free_blocks,
391 mblocks_allocated * BLOCKS_PER_MBLOCK);