/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.22 2000/01/12 15:15:18 simonmar Exp $
+ * $Id: Storage.c,v 1.33 2001/01/24 15:46:19 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#include "Hooks.h"
#include "BlockAlloc.h"
#include "MBlock.h"
-#include "gmp.h"
#include "Weak.h"
#include "Sanity.h"
generation *oldest_gen; /* oldest generation, for convenience */
step *g0s0; /* generation 0, step 0, for convenience */
+lnat total_allocated = 0; /* total memory allocated during run */
+
/*
* Storage manager mutex: protects all the above state from
* simultaneous access by two STG threads.
initStorage (void)
{
nat g, s;
- step *step;
+ step *stp;
generation *gen;
/* If we're doing heap profiling, we want a two-space heap with a
/* Initialise all steps */
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- step->no = s;
- step->blocks = NULL;
- step->n_blocks = 0;
- step->gen = &generations[g];
- step->hp = NULL;
- step->hpLim = NULL;
- step->hp_bd = NULL;
- step->scan = NULL;
- step->scan_bd = NULL;
- step->large_objects = NULL;
- step->new_large_objects = NULL;
- step->scavenged_large_objects = NULL;
+ stp = &generations[g].steps[s];
+ stp->no = s;
+ stp->blocks = NULL;
+ stp->n_blocks = 0;
+ stp->gen = &generations[g];
+ stp->hp = NULL;
+ stp->hpLim = NULL;
+ stp->hp_bd = NULL;
+ stp->scan = NULL;
+ stp->scan_bd = NULL;
+ stp->large_objects = NULL;
+ stp->new_large_objects = NULL;
+ stp->scavenged_large_objects = NULL;
}
}
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
-#ifdef COMPILER
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
-#endif
#ifdef SMP
pthread_mutex_init(&sm_mutex, NULL);
void
exitStorage (void)
{
- stat_exit(calcAllocated());
+ stat_exit(calcAllocated());
}
+/* -----------------------------------------------------------------------------
+ CAF management.
+ -------------------------------------------------------------------------- */
+
void
newCAF(StgClosure* caf)
{
* any more and can use it as a STATIC_LINK.
*/
ACQUIRE_LOCK(&sm_mutex);
+
+ ASSERT( ((StgMutClosure*)caf)->mut_link == NULL );
((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
oldest_gen->mut_once_list = (StgMutClosure *)caf;
-#ifdef DEBUG
- {
- const StgInfoTable *info;
-
- info = get_itbl(caf);
- ASSERT(info->type == IND_STATIC);
-#if 0
- STATIC_LINK2(info,caf) = caf_list;
- caf_list = caf;
-#endif
+#ifdef GHCI
+ /* For dynamically-loaded code, we retain all the CAFs. There is no
+ * way of knowing which ones we'll need in the future.
+ */
+ if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) {
+ caf->payload[2] = caf_list; /* IND_STATIC_LINK2() */
+ caf_list = caf;
}
#endif
+
+#ifdef INTERPRETER
+ /* If we're Hugs, we also have to put it in the CAF table, so that
+ the CAF can be reverted. When reverting, CAFs created by compiled
+ code are recorded in the CAF table, which lives outside the
+ heap, in mallocville. CAFs created by interpreted code are
+ chained together via the link fields in StgCAFs, and are not
+ recorded in the CAF table.
+ */
+ ASSERT( get_itbl(caf)->type == THUNK_STATIC );
+ addToECafTable ( caf, get_itbl(caf) );
+#endif
+
+ RELEASE_LOCK(&sm_mutex);
+}
+
+#ifdef GHCI
+void
+markCafs( void )
+{
+ StgClosure *p;
+
+ for (p = caf_list; p != NULL; p = STATIC_LINK2(get_itbl(p),p)) {
+ MarkRoot(p);
+ }
+}
+#endif /* GHCI */
+
+#ifdef INTERPRETER
+void
+newCAF_made_by_Hugs(StgCAF* caf)
+{
+ ACQUIRE_LOCK(&sm_mutex);
+
+ ASSERT( get_itbl(caf)->type == CAF_ENTERED );
+ recordOldToNewPtrs((StgMutClosure*)caf);
+ caf->link = ecafList;
+ ecafList = caf->link;
+
RELEASE_LOCK(&sm_mutex);
}
+#endif
+
+#ifdef INTERPRETER
+/* These initialisations are critical for correct operation
+ on the first call of addToECafTable.
+*/
+StgCAF* ecafList = END_ECAF_LIST;
+StgCAFTabEntry* ecafTable = NULL;
+StgInt usedECafTable = 0;
+StgInt sizeECafTable = 0;
+
+
+void clearECafTable ( void )
+{
+ usedECafTable = 0;
+}
+
+void addToECafTable ( StgClosure* closure, StgInfoTable* origItbl )
+{
+ StgInt i;
+ StgCAFTabEntry* et2;
+ if (usedECafTable == sizeECafTable) {
+ /* Make the initial table size be 8 */
+ sizeECafTable *= 2;
+ if (sizeECafTable == 0) sizeECafTable = 8;
+ et2 = stgMallocBytes (
+ sizeECafTable * sizeof(StgCAFTabEntry),
+ "addToECafTable" );
+ for (i = 0; i < usedECafTable; i++)
+ et2[i] = ecafTable[i];
+ if (ecafTable) free(ecafTable);
+ ecafTable = et2;
+ }
+ ecafTable[usedECafTable].closure = closure;
+ ecafTable[usedECafTable].origItbl = origItbl;
+ usedECafTable++;
+}
+#endif
/* -----------------------------------------------------------------------------
Nursery management.
* (eg. running threads), so garbage collecting early won't make
* much difference.
*/
+ alloc_blocks += req_blocks;
RELEASE_LOCK(&sm_mutex);
return bd->start;
/* allocate and fill it in. */
arr = (StgArrWords *)allocate(total_size_in_words);
- SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
+ SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
/* and return a ptr to the goods inside the array */
return(BYTE_ARR_CTS(arr));
}
#endif
+ total_allocated += allocated;
return allocated;
}
{
nat g, s;
lnat live = 0;
- step *step;
+ step *stp;
if (RtsFlags.GcFlags.generations == 1) {
live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W +
if (g == 0 && s == 0) {
continue;
}
- step = &generations[g].steps[s];
- live += (step->n_blocks - 1) * BLOCK_SIZE_W +
- ((lnat)step->hp_bd->free - (lnat)step->hp_bd->start) / sizeof(W_);
+ stp = &generations[g].steps[s];
+ live += (stp->n_blocks - 1) * BLOCK_SIZE_W +
+ ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start) / sizeof(W_);
}
}
return live;
{
lnat needed = 0;
nat g, s;
- step *step;
+ step *stp;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
if (g == 0 && s == 0) { continue; }
- step = &generations[g].steps[s];
+ stp = &generations[g].steps[s];
if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
- needed += 2 * step->n_blocks;
+ needed += 2 * stp->n_blocks;
} else {
- needed += step->n_blocks;
+ needed += stp->n_blocks;
}
}
}
memInventory(void)
{
nat g, s;
- step *step;
+ step *stp;
bdescr *bd;
lnat total_blocks = 0, free_blocks = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- total_blocks += step->n_blocks;
+ stp = &generations[g].steps[s];
+ total_blocks += stp->n_blocks;
if (RtsFlags.GcFlags.generations == 1) {
/* two-space collector has a to-space too :-) */
total_blocks += g0s0->to_blocks;
}
- for (bd = step->large_objects; bd; bd = bd->link) {
+ for (bd = stp->large_objects; bd; bd = bd->link) {
total_blocks += bd->blocks;
/* hack for megablock groups: they have an extra block or two in
the second and subsequent megablocks where the block