/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.52 2001/10/18 14:41:01 simonmar Exp $
+ * $Id: Storage.c,v 1.79 2003/03/26 18:59:34 sof Exp $
*
* (c) The GHC Team, 1998-1999
*
#include "Storage.h"
#include "Schedule.h"
+#include "OSThreads.h"
#include "StoragePriv.h"
-#ifndef SMP
-nat nursery_blocks; /* number of blocks in the nursery */
-#endif
+#include "RetainerProfile.h" // for counting memory blocks (memInventory)
+
+#include <stdlib.h>
+#include <string.h>
StgClosure *caf_list = NULL;
bdescr *small_alloc_list; /* allocate()d small objects */
-bdescr *large_alloc_list; /* allocate()d large objects */
bdescr *pinned_object_block; /* allocate pinned objects into this block */
nat alloc_blocks; /* number of allocate()d blocks since GC */
nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
-generation *generations; /* all the generations */
-generation *g0; /* generation 0, for convenience */
-generation *oldest_gen; /* oldest generation, for convenience */
-step *g0s0; /* generation 0, step 0, for convenience */
+generation *generations = NULL; /* all the generations */
+generation *g0 = NULL; /* generation 0, for convenience */
+generation *oldest_gen = NULL; /* oldest generation, for convenience */
+step *g0s0 = NULL; /* generation 0, step 0, for convenience */
lnat total_allocated = 0; /* total memory allocated during run */
* simultaneous access by two STG threads.
*/
#ifdef SMP
-pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER;
+Mutex sm_mutex = INIT_MUTEX_VAR;
#endif
/*
step *stp;
generation *gen;
- /* If we're doing heap profiling, we want a two-space heap with a
- * fixed-size allocation area so that we get roughly even-spaced
- * samples.
- */
-
- /* As an experiment, try a 2 generation collector
- */
-
-#if defined(PROFILING) || defined(DEBUG)
- if (RtsFlags.ProfFlags.doHeapProfile) {
- RtsFlags.GcFlags.generations = 1;
- RtsFlags.GcFlags.steps = 1;
- RtsFlags.GcFlags.oldGenFactor = 0;
- RtsFlags.GcFlags.heapSizeSuggestion = 0;
+ if (generations != NULL) {
+ // multi-init protection
+ return;
}
-#endif
+ /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
+ * doing something reasonable.
+ */
+ ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info));
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
+ ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
+
if (RtsFlags.GcFlags.maxHeapSize != 0 &&
RtsFlags.GcFlags.heapSizeSuggestion >
RtsFlags.GcFlags.maxHeapSize) {
initBlockAllocator();
+#if defined(SMP)
+ initMutex(&sm_mutex);
+#endif
+
/* allocate generation info array */
generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
* sizeof(struct _generation),
stp = &generations[g].steps[s];
stp->no = s;
stp->blocks = NULL;
+ stp->n_to_blocks = 0;
stp->n_blocks = 0;
stp->gen = &generations[g];
stp->gen_no = g;
/* initialise the allocate() interface */
small_alloc_list = NULL;
- large_alloc_list = NULL;
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
-#ifdef SMP
- pthread_mutex_init(&sm_mutex, NULL);
-#endif
-
IF_DEBUG(gc, statDescribeGens());
}
* come to do a major GC we won't need the mut_link field
* any more and can use it as a STATIC_LINK.
*/
- ACQUIRE_LOCK(&sm_mutex);
+ ACQUIRE_SM_LOCK;
- if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) {
- ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
- ((StgIndStatic *)caf)->static_link = caf_list;
- caf_list = caf;
- } else {
- ((StgIndStatic *)caf)->saved_info = NULL;
- ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
- oldest_gen->mut_once_list = (StgMutClosure *)caf;
- }
+ ((StgIndStatic *)caf)->saved_info = NULL;
+ ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
+ oldest_gen->mut_once_list = (StgMutClosure *)caf;
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
#ifdef PAR
/* If we are PAR or DIST then we never forget a CAF */
#endif /* PAR */
}
+// An alternate version of newCaf which is used for dynamically loaded
+// object code in GHCi. In this case we want to retain *all* CAFs in
+// the object code, because they might be demanded at any time from an
+// expression evaluated on the command line.
+//
+// The linker hackily arranges that references to newCaf from dynamic
+// code end up pointing to newDynCAF.
+void
+newDynCAF(StgClosure *caf)
+{
+ ACQUIRE_SM_LOCK;
+
+ ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
+ ((StgIndStatic *)caf)->static_link = caf_list;
+ caf_list = caf;
+
+ RELEASE_SM_LOCK;
+}
+
/* -----------------------------------------------------------------------------
Nursery management.
-------------------------------------------------------------------------- */
allocNurseries( void )
{
#ifdef SMP
- {
- Capability *cap;
- bdescr *bd;
-
- g0s0->blocks = NULL;
- g0s0->n_blocks = 0;
- for (cap = free_capabilities; cap != NULL; cap = cap->link) {
- cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
- cap->rCurrentNursery = cap->rNursery;
- for (bd = cap->rNursery; bd != NULL; bd = bd->link) {
- bd->u.back = (bdescr *)cap;
- }
- }
+ Capability *cap;
+ bdescr *bd;
+
+ g0s0->blocks = NULL;
+ g0s0->n_blocks = 0;
+ for (cap = free_capabilities; cap != NULL; cap = cap->link) {
+ cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
+ cap->r.rCurrentNursery = cap->r.rNursery;
/* Set the back links to be equal to the Capability,
* so we can do slightly better informed locking.
*/
+ for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
+ bd->u.back = (bdescr *)cap;
+ }
}
#else /* SMP */
- nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
- g0s0->blocks = allocNursery(NULL, nursery_blocks);
- g0s0->n_blocks = nursery_blocks;
+ g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
+ g0s0->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
g0s0->to_blocks = NULL;
g0s0->n_to_blocks = 0;
- MainRegTable.rNursery = g0s0->blocks;
- MainRegTable.rCurrentNursery = g0s0->blocks;
+ MainCapability.r.rNursery = g0s0->blocks;
+ MainCapability.r.rCurrentNursery = g0s0->blocks;
/* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
#endif
}
ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes);
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
- for (bd = cap->rNursery; bd; bd = bd->link) {
+ for (bd = cap->r.rNursery; bd; bd = bd->link) {
bd->free = bd->start;
ASSERT(bd->gen_no == 0);
ASSERT(bd->step == g0s0);
IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
}
- cap->rCurrentNursery = cap->rNursery;
+ cap->r.rCurrentNursery = cap->r.rNursery;
}
#else
for (bd = g0s0->blocks; bd; bd = bd->link) {
ASSERT(bd->step == g0s0);
IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
}
- MainRegTable.rNursery = g0s0->blocks;
- MainRegTable.rCurrentNursery = g0s0->blocks;
+ MainCapability.r.rNursery = g0s0->blocks;
+ MainCapability.r.rCurrentNursery = g0s0->blocks;
#endif
}
bdescr *
-allocNursery (bdescr *last_bd, nat blocks)
+allocNursery (bdescr *tail, nat blocks)
{
bdescr *bd;
nat i;
- /* Allocate a nursery */
+ // Allocate a nursery: we allocate fresh blocks one at a time and
+ // cons them on to the front of the list, not forgetting to update
+ // the back pointer on the tail of the list to point to the new block.
for (i=0; i < blocks; i++) {
+ // @LDV profiling
+ /*
+ processNursery() in LdvProfile.c assumes that every block group in
+ the nursery contains only a single block. So, if a block group is
+ given multiple blocks, change processNursery() accordingly.
+ */
bd = allocBlock();
- bd->link = last_bd;
+ bd->link = tail;
+ // double-link the nursery: we might need to insert blocks
+ if (tail != NULL) {
+ tail->u.back = bd;
+ }
bd->step = g0s0;
bd->gen_no = 0;
bd->flags = 0;
bd->free = bd->start;
- last_bd = bd;
+ tail = bd;
}
- return last_bd;
+ tail->u.back = NULL;
+ return tail;
}
void
resizeNursery ( nat blocks )
{
bdescr *bd;
+ nat nursery_blocks;
#ifdef SMP
barf("resizeNursery: can't resize in SMP mode");
#endif
+ nursery_blocks = g0s0->n_blocks;
if (nursery_blocks == blocks) {
- ASSERT(g0s0->n_blocks == blocks);
return;
}
IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
blocks));
- for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
- next_bd = bd->link;
- freeGroup(bd);
- bd = next_bd;
+
+ bd = g0s0->blocks;
+ while (nursery_blocks > blocks) {
+ next_bd = bd->link;
+ next_bd->u.back = NULL;
+ nursery_blocks -= bd->blocks; // might be a large block
+ freeGroup(bd);
+ bd = next_bd;
}
g0s0->blocks = bd;
+ // might have gone just under, by freeing a large block, so make
+ // up the difference.
+ if (nursery_blocks < blocks) {
+ g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
+ }
}
- g0s0->n_blocks = nursery_blocks = blocks;
+ g0s0->n_blocks = blocks;
+ ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks);
}
/* -----------------------------------------------------------------------------
bdescr *bd;
StgPtr p;
- ACQUIRE_LOCK(&sm_mutex);
+ ACQUIRE_SM_LOCK;
TICK_ALLOC_HEAP_NOCTR(n);
CCS_ALLOC(CCCS,n);
bd->gen_no = 0;
bd->step = g0s0;
bd->flags = BF_LARGE;
- bd->free = bd->start;
+ bd->free = bd->start + n;
/* don't add these blocks to alloc_blocks, since we're assuming
* that large objects are likely to remain live for quite a while
* (eg. running threads), so garbage collecting early won't make
* much difference.
*/
alloc_blocks += req_blocks;
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
return bd->start;
/* small allocation (<LARGE_OBJECT_THRESHOLD) */
p = alloc_Hp;
alloc_Hp += n;
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
return p;
}
lnat
allocated_bytes( void )
{
- return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
+ lnat allocated;
+
+ allocated = alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp);
+ if (pinned_object_block != NULL) {
+ allocated -= (pinned_object_block->start + BLOCK_SIZE_W) -
+ pinned_object_block->free;
+ }
+
+ return allocated;
+}
+
+void
+tidyAllocateLists (void)
+{
+ if (small_alloc_list != NULL) {
+ ASSERT(alloc_Hp >= small_alloc_list->start &&
+ alloc_Hp <= small_alloc_list->start + BLOCK_SIZE);
+ small_alloc_list->free = alloc_Hp;
+ }
}
/* ---------------------------------------------------------------------------
StgPtr p;
bdescr *bd = pinned_object_block;
- ACQUIRE_LOCK(&sm_mutex);
-
- TICK_ALLOC_HEAP_NOCTR(n);
- CCS_ALLOC(CCCS,n);
-
// If the request is for a large object, then allocate()
// will give us a pinned object anyway.
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- RELEASE_LOCK(&sm_mutex);
return allocate(n);
}
+ ACQUIRE_SM_LOCK;
+
+ TICK_ALLOC_HEAP_NOCTR(n);
+ CCS_ALLOC(CCCS,n);
+
+ // we always return 8-byte aligned memory. bd->free must be
+ // 8-byte aligned to begin with, so we just round up n to
+ // the nearest multiple of 8 bytes.
+ if (sizeof(StgWord) == 4) {
+ n = (n+1) & ~1;
+ }
+
// If we don't have a block of pinned objects yet, or the current
// one isn't large enough to hold the new object, allocate a new one.
if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
p = bd->free;
bd->free += n;
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
return p;
}
StgArrWords* arr;
nat data_size_in_words, total_size_in_words;
- /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
- ASSERT(size_in_bytes % sizeof(W_) == 0);
-
- data_size_in_words = size_in_bytes / sizeof(W_);
+ /* round up to a whole number of words */
+ data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_);
total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
/* allocate and fill it in. */
+ allocated_bytes();
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
- for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) {
+ for ( bd = cap->r.rCurrentNursery->link; bd != NULL; bd = bd->link ) {
allocated -= BLOCK_SIZE_W;
}
- if (cap->rCurrentNursery->free < cap->rCurrentNursery->start
+ if (cap->r.rCurrentNursery->free < cap->r.rCurrentNursery->start
+ BLOCK_SIZE_W) {
- allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W)
- - cap->rCurrentNursery->free;
+ allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
+ - cap->r.rCurrentNursery->free;
}
}
#else /* !SMP */
- bdescr *current_nursery = MainRegTable.rCurrentNursery;
+ bdescr *current_nursery = MainCapability.r.rCurrentNursery;
- allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
+ allocated = (g0s0->n_blocks * BLOCK_SIZE_W) + allocated_bytes();
for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
allocated -= BLOCK_SIZE_W;
}
for (bd = small_alloc_list; bd; bd = bd->link) {
total_blocks += bd->blocks;
}
- for (bd = large_alloc_list; bd; bd = bd->link) {
- total_blocks += bd->blocks;
+
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
+ total_blocks += retainerStackBlocks();
}
-
+#endif
+
// count the blocks allocated by the arena allocator
total_blocks += arenaBlocks();
ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
}
-static nat
+
+nat
countBlocks(bdescr *bd)
{
nat n;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0) { continue; }
- checkHeap(generations[g].steps[s].blocks);
- checkChain(generations[g].steps[s].large_objects);
ASSERT(countBlocks(generations[g].steps[s].blocks)
== generations[g].steps[s].n_blocks);
ASSERT(countBlocks(generations[g].steps[s].large_objects)
== generations[g].steps[s].n_large_blocks);
+ if (g == 0 && s == 0) { continue; }
+ checkHeap(generations[g].steps[s].blocks);
+ checkChain(generations[g].steps[s].large_objects);
if (g > 0) {
checkMutableList(generations[g].mut_list, g);
checkMutOnceList(generations[g].mut_once_list, g);
}
}
+// handy function for use in gdb, because Bdescr() is inlined.
+extern bdescr *_bdescr( StgPtr p );
+
+bdescr *
+_bdescr( StgPtr p )
+{
+ return Bdescr(p);
+}
+
#endif