+}
+
+static void
+free_list_insert (bdescr *bd)
+{
+ bdescr *p, *prev;
+
+ if (!free_list) {
+ dbl_link_onto(bd, &free_list);
+ return;
+ }
+
+ prev = NULL;
+ p = free_list;
+ while (p != NULL && p->blocks < bd->blocks) {
+ prev = p;
+ p = p->link;
+ }
+ if (prev == NULL)
+ {
+ dbl_link_onto(bd, &free_list);
+ }
+ else
+ {
+ dbl_link_insert_after(bd, prev);
+ }
+}
+
+
+STATIC_INLINE bdescr *
+tail_of (bdescr *bd)
+{
+ return bd + bd->blocks - 1;
+}
+
+// After splitting a group, the last block of each group must have a
+// tail that points to the head block, to keep our invariants for
+// coalescing.
+STATIC_INLINE void
+setup_tail (bdescr *bd)
+{
+ bdescr *tail;
+ tail = tail_of(bd);
+ if (tail != bd) {
+ tail->blocks = 0;
+ tail->free = 0;
+ tail->link = bd;
+ }
+}
+
+
+// Take a free block group bd, and split off a group of size n from
+// it. Adjust the free list as necessary, and return the new group.
+static bdescr *
+split_free_block (bdescr *bd, nat n)
+{
+ bdescr *fg; // free group
+
+ ASSERT(bd->blocks > n);
+ fg = bd + bd->blocks - n; // take n blocks off the end
+ fg->blocks = n;
+ bd->blocks -= n;
+ setup_tail(bd);
+ free_list_push_backwards(bd);
+ return fg;
+}
+
+static bdescr *
+alloc_mega_group (nat mblocks)
+{
+ bdescr *best, *bd, *prev;
+ nat n;
+
+ n = MBLOCK_GROUP_BLOCKS(mblocks);
+
+ best = NULL;
+ prev = NULL;
+ for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
+ {
+ if (bd->blocks == n)
+ {
+ if (prev) {
+ prev->link = bd->link;
+ } else {
+ free_mblock_list = bd->link;
+ }
+ initGroup(n, bd);
+ return bd;
+ }
+ else if (bd->blocks > n)
+ {
+ if (!best || bd->blocks < best->blocks)
+ {
+ best = bd;
+ }
+ }
+ }
+
+ if (best)
+ {
+ // we take our chunk off the end here.
+ nat best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks);
+ bd = FIRST_BDESCR(MBLOCK_ROUND_DOWN(best) +
+ (best_mblocks-mblocks)*MBLOCK_SIZE);
+
+ best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks);
+ initMBlock(MBLOCK_ROUND_DOWN(bd));
+ }
+ else
+ {
+ void *mblock = getMBlocks(mblocks);
+ initMBlock(mblock); // only need to init the 1st one
+ bd = FIRST_BDESCR(mblock);
+ }
+ bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks);
+ return bd;
+}
+
+bdescr *
+allocGroup (nat n)
+{
+ bdescr *bd, *rem;
+
+ ASSERT_SM_LOCK();
+
+ if (n == 0) barf("allocGroup: requested zero blocks");
+
+ if (n >= BLOCKS_PER_MBLOCK)
+ {
+ bd = alloc_mega_group(BLOCKS_TO_MBLOCKS(n));
+ // only the bdescrs of the first MB are required to be initialised
+ initGroup(BLOCKS_PER_MBLOCK, bd);
+ IF_DEBUG(sanity, checkFreeListSanity());
+ return bd;
+ }
+
+ // The free list is sorted by size, so we get best fit.
+ for (bd = free_list; bd != NULL; bd = bd->link)
+ {
+ if (bd->blocks == n) // exactly the right size!
+ {
+ dbl_link_remove(bd, &free_list);
+ initGroup(n, bd); // initialise it
+ IF_DEBUG(sanity, checkFreeListSanity());
+ return bd;
+ }
+ if (bd->blocks > n) // block too big...
+ {
+ bd = split_free_block(bd, n);
+ initGroup(n, bd); // initialise the new chunk
+ IF_DEBUG(sanity, checkFreeListSanity());
+ return bd;
+ }
+ }
+
+ bd = alloc_mega_group(1);
+ bd->blocks = n;
+ initGroup(n,bd); // we know the group will fit
+ rem = bd + n;
+ rem->blocks = BLOCKS_PER_MBLOCK-n;
+ initGroup(BLOCKS_PER_MBLOCK-n, rem); // init the slop
+ freeGroup(rem); // add the slop on to the free list
+ IF_DEBUG(sanity, checkFreeListSanity());
+ return bd;