+ if (small_alloc_list != NULL) {
+ ASSERT(alloc_Hp >= small_alloc_list->start &&
+ alloc_Hp <= small_alloc_list->start + BLOCK_SIZE);
+ small_alloc_list->free = alloc_Hp;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ allocateLocal()
+
+ This allocates memory in the current thread - it is intended for
+ use primarily from STG-land where we have a Capability. It is
+ better than allocate() because it doesn't require taking the
+ sm_mutex lock in the common case.
+
+ Memory is allocated directly from the nursery if possible (but not
+ from the current nursery block, so as not to interfere with
+ Hp/HpLim).
+ -------------------------------------------------------------------------- */
+
+StgPtr
+allocateLocal (Capability *cap, nat n)
+{
+ bdescr *bd;
+ StgPtr p;
+
+ TICK_ALLOC_HEAP_NOCTR(n);
+ CCS_ALLOC(CCCS,n);
+
+ /* big allocation (>LARGE_OBJECT_THRESHOLD) */
+ /* ToDo: allocate directly into generation 1 */
+ if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
+ nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
+ ACQUIRE_SM_LOCK;
+ bd = allocGroup(req_blocks);
+ dbl_link_onto(bd, &g0s0->large_objects);
+ g0s0->n_large_blocks += req_blocks;
+ bd->gen_no = 0;
+ bd->step = g0s0;
+ bd->flags = BF_LARGE;
+ bd->free = bd->start + n;
+ alloc_blocks += req_blocks;
+ RELEASE_SM_LOCK;
+ return bd->start;
+
+ /* small allocation (<LARGE_OBJECT_THRESHOLD) */
+ } else {
+
+ bd = cap->r.rCurrentAlloc;
+ if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
+
+ // The CurrentAlloc block is full, we need to find another
+ // one. First, we try taking the next block from the
+ // nursery:
+ bd = cap->r.rCurrentNursery->link;
+
+ if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
+ // The nursery is empty, or the next block is already
+ // full: allocate a fresh block (we can't fail here).
+ ACQUIRE_SM_LOCK;
+ bd = allocBlock();
+ cap->r.rNursery->n_blocks++;
+ RELEASE_SM_LOCK;
+ bd->gen_no = 0;
+ bd->step = cap->r.rNursery;
+ bd->flags = 0;
+ } else {
+ // we have a block in the nursery: take it and put
+ // it at the *front* of the nursery list, and use it
+ // to allocate() from.
+ cap->r.rCurrentNursery->link = bd->link;
+ if (bd->link != NULL) {
+ bd->link->u.back = cap->r.rCurrentNursery;
+ }
+ }
+ dbl_link_onto(bd, &cap->r.rNursery->blocks);
+ cap->r.rCurrentAlloc = bd;
+ IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
+ }
+ }
+ p = bd->free;
+ bd->free += n;
+ return p;
+}
+
+/* ---------------------------------------------------------------------------
+ Allocate a fixed/pinned object.
+
+ We allocate small pinned objects into a single block, allocating a
+ new block when the current one overflows. The block is chained
+ onto the large_object_list of generation 0 step 0.
+
+ NOTE: The GC can't in general handle pinned objects. This
+ interface is only safe to use for ByteArrays, which have no
+ pointers and don't require scavenging. It works because the
+ block's descriptor has the BF_LARGE flag set, so the block is
+ treated as a large object and chained onto various lists, rather
+ than the individual objects being copied. However, when it comes
+ to scavenge the block, the GC will only scavenge the first object.
+ The reason is that the GC can't linearly scan a block of pinned
+ objects at the moment (doing so would require using the
+ mostly-copying techniques). But since we're restricting ourselves
+ to pinned ByteArrays, not scavenging is ok.
+
+ This function is called by newPinnedByteArray# which immediately
+ fills the allocated memory with a MutableByteArray#.
+ ------------------------------------------------------------------------- */
+
+StgPtr
+allocatePinned( nat n )
+{
+ StgPtr p;
+ bdescr *bd = pinned_object_block;
+
+ // If the request is for a large object, then allocate()
+ // will give us a pinned object anyway.
+ if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
+ return allocate(n);
+ }
+
+ ACQUIRE_SM_LOCK;
+
+ TICK_ALLOC_HEAP_NOCTR(n);
+ CCS_ALLOC(CCCS,n);
+
+ // we always return 8-byte aligned memory. bd->free must be
+ // 8-byte aligned to begin with, so we just round up n to
+ // the nearest multiple of 8 bytes.
+ if (sizeof(StgWord) == 4) {
+ n = (n+1) & ~1;
+ }
+
+ // If we don't have a block of pinned objects yet, or the current
+ // one isn't large enough to hold the new object, allocate a new one.
+ if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
+ pinned_object_block = bd = allocBlock();
+ dbl_link_onto(bd, &g0s0->large_objects);
+ bd->gen_no = 0;
+ bd->step = g0s0;
+ bd->flags = BF_PINNED | BF_LARGE;
+ bd->free = bd->start;
+ alloc_blocks++;
+ }
+
+ p = bd->free;
+ bd->free += n;
+ RELEASE_SM_LOCK;
+ return p;
+}
+
+/* -----------------------------------------------------------------------------
+ This is the write barrier for MUT_VARs, a.k.a. IORefs. A
+ MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
+ is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
+ and is put on the mutable list.
+ -------------------------------------------------------------------------- */
+
+void
+dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
+{
+ Capability *cap = regTableToCapability(reg);
+ bdescr *bd;
+ if (p->header.info == &stg_MUT_VAR_CLEAN_info) {
+ p->header.info = &stg_MUT_VAR_DIRTY_info;
+ bd = Bdescr(p);
+ if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
+ }