/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.41 2001/07/23 17:23:20 simonmar Exp $
+ * $Id: Storage.c,v 1.45 2001/08/08 11:27:17 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
bdescr *small_alloc_list; /* allocate()d small objects */
bdescr *large_alloc_list; /* allocate()d large objects */
+bdescr *pinned_object_block; /* allocate pinned objects into this block */
nat alloc_blocks; /* number of allocate()d blocks since GC */
nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
static void stgDeallocForGMP (void *ptr, size_t size);
void
-initStorage (void)
+initStorage( void )
{
nat g, s;
step *stp;
}
#endif
- if (RtsFlags.GcFlags.heapSizeSuggestion >
+ if (RtsFlags.GcFlags.maxHeapSize != 0 &&
+ RtsFlags.GcFlags.heapSizeSuggestion >
RtsFlags.GcFlags.maxHeapSize) {
RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
}
generations[g].steps[s].to = &generations[g+1].steps[0];
}
- /* The oldest generation has one step and it is compacted. */
- if (RtsFlags.GcFlags.compact) {
- oldest_gen->steps[0].is_compacted = 1;
- }
+ /* The oldest generation has one step. */
oldest_gen->steps[0].to = &oldest_gen->steps[0];
/* generation 0 is special: that's the nursery */
-------------------------------------------------------------------------- */
StgPtr
-allocate(nat n)
+allocate( nat n )
{
bdescr *bd;
StgPtr p;
return p;
}
-lnat allocated_bytes(void)
+lnat
+allocated_bytes( void )
{
return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
}
+/* ---------------------------------------------------------------------------
+ Allocate a fixed/pinned object.
+
+ We allocate small pinned objects into a single block, allocating a
+ new block when the current one overflows. The block is chained
+ onto the large_object_list of generation 0 step 0.
+
+ NOTE: The GC can't in general handle pinned objects. This
+ interface is only safe to use for ByteArrays, which have no
+ pointers and don't require scavenging. It works because the
+ block's descriptor has the BF_LARGE flag set, so the block is
+ treated as a large object and chained onto various lists, rather
+ than the individual objects being copied. However, when it comes
+ to scavenge the block, the GC will only scavenge the first object.
+ The reason is that the GC can't linearly scan a block of pinned
+ objects at the moment (doing so would require using the
+ mostly-copying techniques). But since we're restricting ourselves
+ to pinned ByteArrays, not scavenging is ok.
+
+ This function is called by newPinnedByteArray# which immediately
+ fills the allocated memory with a MutableByteArray#.
+ ------------------------------------------------------------------------- */
+
+StgPtr
+allocatePinned( nat n )
+{
+ StgPtr p;
+ bdescr *bd = pinned_object_block;
+
+ ACQUIRE_LOCK(&sm_mutex);
+
+ TICK_ALLOC_HEAP_NOCTR(n);
+ CCS_ALLOC(CCCS,n);
+
+ // If the request is for a large object, then allocate()
+ // will give us a pinned object anyway.
+ if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
+ RELEASE_LOCK(&sm_mutex);
+ return allocate(n);
+ }
+
+ // If we don't have a block of pinned objects yet, or the current
+ // one isn't large enough to hold the new object, allocate a new one.
+ if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
+ pinned_object_block = bd = allocBlock();
+ dbl_link_onto(bd, &g0s0->large_objects);
+ bd->gen_no = 0;
+ bd->step = g0s0;
+ bd->flags = BF_LARGE;
+ bd->free = bd->start;
+ alloc_blocks++;
+ }
+
+ p = bd->free;
+ bd->free += n;
+ RELEASE_LOCK(&sm_mutex);
+ return p;
+}
+
/* -----------------------------------------------------------------------------
Allocation functions for GMP.
continue;
}
stp = &generations[g].steps[s];
- live += (stp->n_blocks - 1) * BLOCK_SIZE_W;
+ live += (stp->n_large_blocks + stp->n_blocks - 1) * BLOCK_SIZE_W;
if (stp->hp_bd != NULL) {
live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start)
/ sizeof(W_);
extern lnat
calcNeeded(void)
{
- lnat needed = 0;
- nat g, s;
- step *stp;
-
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0) { continue; }
- stp = &generations[g].steps[s];
- if (generations[g].steps[0].n_blocks > generations[g].max_blocks
- && stp->is_compacted == 0) {
- needed += 2 * stp->n_blocks;
- } else {
- needed += stp->n_blocks;
- }
+ lnat needed = 0;
+ nat g, s;
+ step *stp;
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ stp = &generations[g].steps[s];
+ if (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ > generations[g].max_blocks
+ && stp->is_compacted == 0) {
+ needed += 2 * stp->n_blocks;
+ } else {
+ needed += stp->n_blocks;
+ }
+ }
}
- }
- return needed;
+ return needed;
}
/* -----------------------------------------------------------------------------
{
nat n;
for (n=0; bd != NULL; bd=bd->link) {
- n++;
+ n += bd->blocks;
}
return n;
}
for (s = 0; s < generations[g].n_steps; s++) {
if (g == 0 && s == 0) { continue; }
checkHeap(generations[g].steps[s].blocks);
+ checkChain(generations[g].steps[s].large_objects);
ASSERT(countBlocks(generations[g].steps[s].blocks)
== generations[g].steps[s].n_blocks);
- checkChain(generations[g].steps[s].large_objects);
+ ASSERT(countBlocks(generations[g].steps[s].large_objects)
+ == generations[g].steps[s].n_large_blocks);
if (g > 0) {
checkMutableList(generations[g].mut_list, g);
checkMutOnceList(generations[g].mut_once_list, g);