/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.53 2001/11/08 12:46:31 simonmar Exp $
+ * $Id: Storage.c,v 1.59 2002/02/04 20:21:22 sof Exp $
*
* (c) The GHC Team, 1998-1999
*
#include "Storage.h"
#include "Schedule.h"
+#include "OSThreads.h"
#include "StoragePriv.h"
+#include "RetainerProfile.h" // for counting memory blocks (memInventory)
+
StgClosure *caf_list = NULL;
bdescr *small_alloc_list; /* allocate()d small objects */
* simultaneous access by two STG threads.
*/
#ifdef SMP
-pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER;
+Mutex sm_mutex = INIT_MUTEX_VAR;
#endif
/*
step *stp;
generation *gen;
- /* If we're doing heap profiling, we want a two-space heap with a
- * fixed-size allocation area so that we get roughly even-spaced
- * samples.
- */
-
- /* As an experiment, try a 2 generation collector
- */
-
-#if defined(PROFILING) || defined(DEBUG)
- if (RtsFlags.ProfFlags.doHeapProfile) {
- RtsFlags.GcFlags.generations = 1;
- RtsFlags.GcFlags.steps = 1;
- RtsFlags.GcFlags.oldGenFactor = 0;
- RtsFlags.GcFlags.heapSizeSuggestion = 0;
- }
-#endif
-
if (RtsFlags.GcFlags.maxHeapSize != 0 &&
RtsFlags.GcFlags.heapSizeSuggestion >
RtsFlags.GcFlags.maxHeapSize) {
initBlockAllocator();
+#if defined(SMP)
+ initCondition(&sm_mutex);
+#endif
+
/* allocate generation info array */
generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
* sizeof(struct _generation),
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
-#ifdef SMP
- pthread_mutex_init(&sm_mutex, NULL);
+#if defined(SMP)
+ initMutex(&sm_mutex);
#endif
IF_DEBUG(gc, statDescribeGens());
* come to do a major GC we won't need the mut_link field
* any more and can use it as a STATIC_LINK.
*/
- ACQUIRE_LOCK(&sm_mutex);
+ ACQUIRE_SM_LOCK;
if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) {
((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
oldest_gen->mut_once_list = (StgMutClosure *)caf;
}
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
#ifdef PAR
/* If we are PAR or DIST then we never forget a CAF */
g0s0->blocks = NULL;
g0s0->n_blocks = 0;
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
- cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
- cap->rCurrentNursery = cap->rNursery;
- for (bd = cap->rNursery; bd != NULL; bd = bd->link) {
+ cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
+ cap->r.rCurrentNursery = cap->r.rNursery;
+ for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
bd->u.back = (bdescr *)cap;
}
}
ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes);
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
- for (bd = cap->rNursery; bd; bd = bd->link) {
+ for (bd = cap->r.rNursery; bd; bd = bd->link) {
bd->free = bd->start;
ASSERT(bd->gen_no == 0);
ASSERT(bd->step == g0s0);
IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
}
- cap->rCurrentNursery = cap->rNursery;
+ cap->r.rCurrentNursery = cap->r.rNursery;
}
#else
for (bd = g0s0->blocks; bd; bd = bd->link) {
// cons them on to the front of the list, not forgetting to update
// the back pointer on the tail of the list to point to the new block.
for (i=0; i < blocks; i++) {
+ // @LDV profiling
+ /*
+ processNursery() in LdvProfile.c assumes that every block group in
+ the nursery contains only a single block. So, if a block group is
+ given multiple blocks, change processNursery() accordingly.
+ */
bd = allocBlock();
bd->link = tail;
// double-link the nursery: we might need to insert blocks
bdescr *bd;
StgPtr p;
- ACQUIRE_LOCK(&sm_mutex);
+ ACQUIRE_SM_LOCK;
TICK_ALLOC_HEAP_NOCTR(n);
CCS_ALLOC(CCCS,n);
* much difference.
*/
alloc_blocks += req_blocks;
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
return bd->start;
/* small allocation (<LARGE_OBJECT_THRESHOLD) */
p = alloc_Hp;
alloc_Hp += n;
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
return p;
}
StgPtr p;
bdescr *bd = pinned_object_block;
- ACQUIRE_LOCK(&sm_mutex);
+ ACQUIRE_SM_LOCK;
TICK_ALLOC_HEAP_NOCTR(n);
CCS_ALLOC(CCCS,n);
// If the request is for a large object, then allocate()
// will give us a pinned object anyway.
if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
return allocate(n);
}
p = bd->free;
bd->free += n;
- RELEASE_LOCK(&sm_mutex);
+ RELEASE_SM_LOCK;
return p;
}
+ allocated_bytes();
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
- for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) {
+ for ( bd = cap->r.rCurrentNursery->link; bd != NULL; bd = bd->link ) {
allocated -= BLOCK_SIZE_W;
}
- if (cap->rCurrentNursery->free < cap->rCurrentNursery->start
+ if (cap->r.rCurrentNursery->free < cap->r.rCurrentNursery->start
+ BLOCK_SIZE_W) {
- allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W)
- - cap->rCurrentNursery->free;
+ allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
+ - cap->r.rCurrentNursery->free;
}
}
for (bd = large_alloc_list; bd; bd = bd->link) {
total_blocks += bd->blocks;
}
-
+
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
+ for (bd = firstStack; bd != NULL; bd = bd->link)
+ total_blocks += bd->blocks;
+ }
+#endif
+
// count the blocks allocated by the arena allocator
total_blocks += arenaBlocks();
}
}
+// handy function for use in gdb, because Bdescr() is inlined.
+extern bdescr *_bdescr( StgPtr p );
+
+bdescr *
+_bdescr( StgPtr p )
+{
+ return Bdescr(p);
+}
+
#endif