1 /* -----------------------------------------------------------------------------
2 (c) The University of Glasgow 2001
4 Arena allocation. Arenas provide fast memory allocation at the
5 expense of fine-grained recycling of storage: memory may be
6 only be returned to the system by freeing the entire arena, it
7 isn't possible to return individual objects within an arena.
9 Do not assume that sequentially allocated objects will be adjacent
12 Quirks: this allocator makes use of the RTS block allocator. If
13 the current block doesn't have enough room for the requested
14 object, then a new block is allocated. This means that allocating
15 large objects will tend to result in wasted space at the end of
16 each block. In the worst case, half of the allocated space is
17 wasted. This allocator is therefore best suited to situations in
18 which most allocations are small.
19 -------------------------------------------------------------------------- */
27 // Each arena struct is allocated using malloc().
30 StgWord *free; // ptr to next free byte in current block
31 StgWord *lim; // limit (== last free byte + 1)
34 // We like to keep track of how many blocks we've allocated for
35 // Storage.c:memInventory().
36 static long arena_blocks = 0;
44 arena = stgMallocBytes(sizeof(Arena), "newArena");
45 arena->current = allocBlock_lock();
46 arena->current->link = NULL;
47 arena->free = arena->current->start;
48 arena->lim = arena->current->start + BLOCK_SIZE_W;
54 // The minimum alignment of an allocated block.
57 /* 'n' is assumed to be a power of 2 */
58 #define ROUNDUP(x,n) (((x)+((n)-1))&(~((n)-1)))
59 #define B_TO_W(x) ((x) / sizeof(W_))
61 // Allocate some memory in an arena
63 arenaAlloc( Arena *arena, size_t size )
70 // round up to nearest alignment chunk.
71 size = ROUNDUP(size,MIN_ALIGN);
73 // size of allocated block in words.
74 size_w = B_TO_W(size);
76 if ( arena->free + size_w < arena->lim ) {
77 // enough room in the current block...
79 arena->free += size_w;
82 // allocate a fresh block...
83 req_blocks = (lnat)BLOCK_ROUND_UP(size) / BLOCK_SIZE;
84 bd = allocGroup_lock(req_blocks);
85 arena_blocks += req_blocks;
91 bd->link = arena->current;
93 arena->free = bd->free + size_w;
94 arena->lim = bd->free + bd->blocks * BLOCK_SIZE_W;
99 // Free an entire arena
101 arenaFree( Arena *arena )
105 for (bd = arena->current; bd != NULL; bd = next) {
107 arena_blocks -= bd->blocks;
108 ASSERT(arena_blocks >= 0);