/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.59 2002/02/04 20:21:22 sof Exp $
+ * $Id: Storage.c,v 1.79 2003/03/26 18:59:34 sof Exp $
*
* (c) The GHC Team, 1998-1999
*
#include "RetainerProfile.h" // for counting memory blocks (memInventory)
+#include <stdlib.h>
+#include <string.h>
+
StgClosure *caf_list = NULL;
bdescr *small_alloc_list; /* allocate()d small objects */
-bdescr *large_alloc_list; /* allocate()d large objects */
bdescr *pinned_object_block; /* allocate pinned objects into this block */
nat alloc_blocks; /* number of allocate()d blocks since GC */
nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */
StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */
-generation *generations; /* all the generations */
-generation *g0; /* generation 0, for convenience */
-generation *oldest_gen; /* oldest generation, for convenience */
-step *g0s0; /* generation 0, step 0, for convenience */
+generation *generations = NULL; /* all the generations */
+generation *g0 = NULL; /* generation 0, for convenience */
+generation *oldest_gen = NULL; /* oldest generation, for convenience */
+step *g0s0 = NULL; /* generation 0, step 0, for convenience */
lnat total_allocated = 0; /* total memory allocated during run */
step *stp;
generation *gen;
+ if (generations != NULL) {
+ // multi-init protection
+ return;
+ }
+
+ /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
+ * doing something reasonable.
+ */
+ ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info));
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
+ ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
+
if (RtsFlags.GcFlags.maxHeapSize != 0 &&
RtsFlags.GcFlags.heapSizeSuggestion >
RtsFlags.GcFlags.maxHeapSize) {
initBlockAllocator();
#if defined(SMP)
- initCondition(&sm_mutex);
+ initMutex(&sm_mutex);
#endif
/* allocate generation info array */
stp = &generations[g].steps[s];
stp->no = s;
stp->blocks = NULL;
+ stp->n_to_blocks = 0;
stp->n_blocks = 0;
stp->gen = &generations[g];
stp->gen_no = g;
/* initialise the allocate() interface */
small_alloc_list = NULL;
- large_alloc_list = NULL;
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
-#if defined(SMP)
- initMutex(&sm_mutex);
-#endif
-
IF_DEBUG(gc, statDescribeGens());
}
*/
ACQUIRE_SM_LOCK;
- if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) {
- ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
- ((StgIndStatic *)caf)->static_link = caf_list;
- caf_list = caf;
- } else {
- ((StgIndStatic *)caf)->saved_info = NULL;
- ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
- oldest_gen->mut_once_list = (StgMutClosure *)caf;
- }
+ ((StgIndStatic *)caf)->saved_info = NULL;
+ ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
+ oldest_gen->mut_once_list = (StgMutClosure *)caf;
RELEASE_SM_LOCK;
#endif /* PAR */
}
+// An alternate version of newCaf which is used for dynamically loaded
+// object code in GHCi. In this case we want to retain *all* CAFs in
+// the object code, because they might be demanded at any time from an
+// expression evaluated on the command line.
+//
+// The linker hackily arranges that references to newCaf from dynamic
+// code end up pointing to newDynCAF.
+void
+newDynCAF(StgClosure *caf)
+{
+ ACQUIRE_SM_LOCK;
+
+ ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
+ ((StgIndStatic *)caf)->static_link = caf_list;
+ caf_list = caf;
+
+ RELEASE_SM_LOCK;
+}
+
/* -----------------------------------------------------------------------------
Nursery management.
-------------------------------------------------------------------------- */
allocNurseries( void )
{
#ifdef SMP
- {
- Capability *cap;
- bdescr *bd;
-
- g0s0->blocks = NULL;
- g0s0->n_blocks = 0;
- for (cap = free_capabilities; cap != NULL; cap = cap->link) {
- cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
- cap->r.rCurrentNursery = cap->r.rNursery;
- for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
- bd->u.back = (bdescr *)cap;
- }
- }
+ Capability *cap;
+ bdescr *bd;
+
+ g0s0->blocks = NULL;
+ g0s0->n_blocks = 0;
+ for (cap = free_capabilities; cap != NULL; cap = cap->link) {
+ cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
+ cap->r.rCurrentNursery = cap->r.rNursery;
/* Set the back links to be equal to the Capability,
* so we can do slightly better informed locking.
*/
+ for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
+ bd->u.back = (bdescr *)cap;
+ }
}
#else /* SMP */
g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
bd->gen_no = 0;
bd->step = g0s0;
bd->flags = BF_LARGE;
- bd->free = bd->start;
+ bd->free = bd->start + n;
/* don't add these blocks to alloc_blocks, since we're assuming
* that large objects are likely to remain live for quite a while
* (eg. running threads), so garbage collecting early won't make
lnat
allocated_bytes( void )
{
- return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
+ lnat allocated;
+
+ allocated = alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp);
+ if (pinned_object_block != NULL) {
+ allocated -= (pinned_object_block->start + BLOCK_SIZE_W) -
+ pinned_object_block->free;
+ }
+
+ return allocated;
+}
+
+void
+tidyAllocateLists (void)
+{
+ if (small_alloc_list != NULL) {
+ ASSERT(alloc_Hp >= small_alloc_list->start &&
+ alloc_Hp <= small_alloc_list->start + BLOCK_SIZE);
+ small_alloc_list->free = alloc_Hp;
+ }
}
/* ---------------------------------------------------------------------------
StgPtr p;
bdescr *bd = pinned_object_block;
+ // If the request is for a large object, then allocate()
+ // will give us a pinned object anyway.
+ if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
+ return allocate(n);
+ }
+
ACQUIRE_SM_LOCK;
TICK_ALLOC_HEAP_NOCTR(n);
CCS_ALLOC(CCCS,n);
- // If the request is for a large object, then allocate()
- // will give us a pinned object anyway.
- if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- RELEASE_SM_LOCK;
- return allocate(n);
+ // we always return 8-byte aligned memory. bd->free must be
+ // 8-byte aligned to begin with, so we just round up n to
+ // the nearest multiple of 8 bytes.
+ if (sizeof(StgWord) == 4) {
+ n = (n+1) & ~1;
}
// If we don't have a block of pinned objects yet, or the current
StgArrWords* arr;
nat data_size_in_words, total_size_in_words;
- /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */
- ASSERT(size_in_bytes % sizeof(W_) == 0);
-
- data_size_in_words = size_in_bytes / sizeof(W_);
+ /* round up to a whole number of words */
+ data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_);
total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
/* allocate and fill it in. */
for (bd = small_alloc_list; bd; bd = bd->link) {
total_blocks += bd->blocks;
}
- for (bd = large_alloc_list; bd; bd = bd->link) {
- total_blocks += bd->blocks;
- }
#ifdef PROFILING
if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
- for (bd = firstStack; bd != NULL; bd = bd->link)
- total_blocks += bd->blocks;
+ total_blocks += retainerStackBlocks();
}
#endif