#include <string.h>
/*
- * All these globals require sm_mutex to access in SMP mode.
+ * All these globals require sm_mutex to access in THREADED_RTS mode.
*/
StgClosure *caf_list = NULL;
StgClosure *revertible_caf_list = NULL;
ullong total_allocated = 0; /* total memory allocated during run */
nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */
-step *nurseries = NULL; /* array of nurseries, >1 only if SMP */
+step *nurseries = NULL; /* array of nurseries, >1 only if THREADED_RTS */
+#ifdef THREADED_RTS
/*
* Storage manager mutex: protects all the above state from
* simultaneous access by two STG threads.
*/
-#ifdef SMP
-Mutex sm_mutex = INIT_MUTEX_VAR;
+Mutex sm_mutex;
+/*
+ * This mutex is used by atomicModifyMutVar# only
+ */
+Mutex atomic_modify_mutvar_mutex;
#endif
+
/*
* Forward references
*/
initBlockAllocator();
-#if defined(SMP)
+#if defined(THREADED_RTS)
initMutex(&sm_mutex);
+ initMutex(&atomic_modify_mutvar_mutex);
#endif
ACQUIRE_SM_LOCK;
g0->steps = stgMallocBytes (sizeof(struct step_), "initStorage: steps");
}
-#ifdef SMP
+#ifdef THREADED_RTS
n_nurseries = n_capabilities;
nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
"initStorage: nurseries");
}
}
-#ifdef SMP
+#ifdef THREADED_RTS
for (s = 0; s < n_nurseries; s++) {
initStep(&nurseries[s], 0, s);
}
}
oldest_gen->steps[0].to = &oldest_gen->steps[0];
-#ifdef SMP
+#ifdef THREADED_RTS
for (s = 0; s < n_nurseries; s++) {
nurseries[s].to = generations[0].steps[0].to;
}
}
}
-#ifdef SMP
+#ifdef THREADED_RTS
if (RtsFlags.GcFlags.generations == 1) {
- errorBelch("-G1 is incompatible with SMP");
+ errorBelch("-G1 is incompatible with -threaded");
stg_exit(EXIT_FAILURE);
}
#endif
static void
assignNurseriesToCapabilities (void)
{
-#ifdef SMP
+#ifdef THREADED_RTS
nat i;
for (i = 0; i < n_nurseries; i++) {
capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
capabilities[i].r.rCurrentAlloc = NULL;
}
-#else /* SMP */
+#else /* THREADED_RTS */
MainCapability.r.rNursery = &nurseries[0];
MainCapability.r.rCurrentNursery = nurseries[0].blocks;
MainCapability.r.rCurrentAlloc = NULL;
}
/* -----------------------------------------------------------------------------
+ This is the write barrier for MUT_VARs, a.k.a. IORefs. A
+ MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
+ is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
+ and is put on the mutable list.
+ -------------------------------------------------------------------------- */
+
+void
+dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
+{
+ Capability *cap = regTableToCapability(reg);
+ bdescr *bd;
+ if (p->header.info == &stg_MUT_VAR_CLEAN_info) {
+ p->header.info = &stg_MUT_VAR_DIRTY_info;
+ bd = Bdescr((StgPtr)p);
+ if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
+ }
+}
+
+/* -----------------------------------------------------------------------------
Allocation functions for GMP.
These all use the allocate() interface - we can't have any garbage
total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
/* allocate and fill it in. */
-#if defined(SMP)
+#if defined(THREADED_RTS)
arr = (StgArrWords *)allocateLocal(myTask()->cap, total_size_in_words);
#else
arr = (StgArrWords *)allocateLocal(&MainCapability, total_size_in_words);
allocated += countNurseryBlocks() * BLOCK_SIZE_W;
{
-#ifdef SMP
+#ifdef THREADED_RTS
nat i;
for (i = 0; i < n_nurseries; i++) {
Capability *cap;
/* count the blocks we current have */
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (i = 0; i < n_capabilities; i++) {
+ for (bd = capabilities[i].mut_lists[g]; bd != NULL; bd = bd->link) {
+ total_blocks += bd->blocks;
+ }
+ }
for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
total_blocks += bd->blocks;
}
for (i = 0; i < n_nurseries; i++) {
total_blocks += stepBlocks(&nurseries[i]);
}
-#ifdef SMP
+#ifdef THREADED_RTS
// We put pinned object blocks in g0s0, so better count blocks there too.
total_blocks += stepBlocks(g0s0);
#endif