, ( "ticky" , NoArg (addWay WayTicky) )
, ( "parallel" , NoArg (addWay WayPar) )
, ( "gransim" , NoArg (addWay WayGran) )
- , ( "smp" , NoArg (addWay WaySMP) )
+ , ( "smp" , NoArg (addWay WayThreaded) ) -- backwards compat.
, ( "debug" , NoArg (addWay WayDebug) )
, ( "ndp" , NoArg (addWay WayNDP) )
, ( "threaded" , NoArg (addWay WayThreaded) )
| WayTicky
| WayPar
| WayGran
- | WaySMP
| WayNDP
| WayUser_a
| WayUser_b
, "-optc-DGRAN"
, "-package concurrent" ]),
- (WaySMP, Way "s" True "SMP"
- [
-#if !defined(mingw32_TARGET_OS)
- "-optc-pthread"
-#endif
-#if !defined(mingw32_TARGET_OS) && !defined(freebsd_TARGET_OS)
- , "-optl-pthread"
-#endif
- ]),
-
(WayNDP, Way "ndp" False "Nested data parallelism"
[ "-fparr"
, "-fflatten"]),
StgHeader header;
StgClosure *volatile current_value;
StgTVarWaitQueue *volatile first_wait_queue_entry;
-#if defined(SMP)
+#if defined(THREADED_RTS)
StgInt volatile num_updates;
#endif
} StgTVar;
StgTVar *tvar;
StgClosure *expected_value;
StgClosure *new_value;
-#if defined(SMP)
+#if defined(THREADED_RTS)
StgInt num_updates;
#endif
} TRecEntry;
#include "gmp.h" // Needs MP_INT definition
/*
- * Spark pools: used to store pending sparks (SMP & PARALLEL_HASKELL only)
+ * Spark pools: used to store pending sparks
+ * (THREADED_RTS & PARALLEL_HASKELL only)
* This is a circular buffer. Invariants:
* - base <= hd < lim
* - base <= tl < lim
struct bdescr_ *rCurrentNursery; /* Hp/HpLim point into this block */
struct bdescr_ *rCurrentAlloc; /* for allocation using allocate() */
StgWord rHpAlloc; /* number of *bytes* being allocated in heap */
- // rmp_tmp1..rmp_result2 are only used in SMP builds to avoid per-thread temps
- // in bss, but currently always incldue here so we just run mkDerivedConstants once
+ // rmp_tmp1..rmp_result2 are only used in THREADED_RTS builds to
+ // avoid per-thread temps in bss, but currently always incldue here
+ // so we just run mkDerivedConstants once
StgInt rmp_tmp_w;
MP_INT rmp_tmp1;
MP_INT rmp_tmp2;
MP_INT rmp_result1;
MP_INT rmp_result2;
StgWord rRet; // holds the return code of the thread
-#if defined(SMP) || defined(PAR)
+#if defined(THREADED_RTS) || defined(PAR)
StgSparkPool rSparks; /* per-task spark pool */
#endif
} StgRegTable;
StgRegTable r;
};
-/* No such thing as a MainCapability under SMP - each thread must have
+/* No such thing as a MainCapability under THREADED_RTS - each thread must have
* its own Capability.
*/
-#if IN_STG_CODE && !defined(SMP)
+#if IN_STG_CODE && !defined(THREADED_RTS)
extern W_ MainCapability[];
#endif
GLOBAL_REG_DECL(StgRegTable *,BaseReg,REG_Base)
#define ASSIGN_BaseReg(e) (BaseReg = (e))
#else
-#ifdef SMP
-#error BaseReg must be in a register for SMP
+#ifdef THREADED_RTS
+#error BaseReg must be in a register for THREADED_RTS
#endif
#define BaseReg (&((struct PartCapability_ *)MainCapability)->r)
#define ASSIGN_BaseReg(e) /*nothing*/
#endif
#ifdef CALLER_SAVES_Base
-#ifdef SMP
-#error "Can't have caller-saved BaseReg with SMP"
+#ifdef THREADED_RTS
+#error "Can't have caller-saved BaseReg with THREADED_RTS"
#endif
#define CALLER_SAVE_Base /* nothing */
#define CALLER_RESTORE_Base BaseReg = &MainRegTable;
#define doNothing() do { } while (0)
-#ifdef SMP
-#define USED_IF_SMP
-#define USED_IF_NOT_SMP STG_UNUSED
-#else
-#define USED_IF_SMP STG_UNUSED
-#define USED_IF_NOT_SMP
-#endif
-
#ifdef DEBUG
#define USED_IF_DEBUG
#define USED_IF_NOT_DEBUG STG_UNUSED
/* TICKY_TICKY needs EAGER_BLACKHOLING to verify no double-entries of
* single-entry thunks.
*/
-/* #if defined(TICKY_TICKY) || defined(SMP) */
+/* #if defined(TICKY_TICKY) || defined(THREADED_RTS) */
#if defined(TICKY_TICKY)
# define EAGER_BLACKHOLING
#else
};
#endif /* PAR */
-#ifdef SMP
+#ifdef THREADED_RTS
struct PAR_FLAGS {
nat nNodes; /* number of threads to run simultaneously */
unsigned int maxLocalSparks;
};
-#endif /* SMP */
+#endif /* THREADED_RTS */
#ifdef GRAN
struct GRAN_STATS_FLAGS {
struct GRAN_COST_FLAGS Costs; /* cost metric for simulation */
struct GRAN_DEBUG_FLAGS Debug; /* debugging options */
- nat maxThreads; /* ToDo: share with SMP and GUM */
+ nat maxThreads; /* ToDo: share with THREADED_RTS and GUM */
/* rtsBool labelling; */
nat packBufferSize;
nat packBufferSize_internal;
struct PROFILING_FLAGS ProfFlags;
struct TICKY_FLAGS TickyFlags;
-#if defined(SMP) || defined(PAR)
+#if defined(THREADED_RTS) || defined(PAR)
struct PAR_FLAGS ParFlags;
#endif
#ifdef GRAN
*
* (c) The GHC Team, 2005
*
- * Macros for SMP support
+ * Macros for THREADED_RTS support
*
* -------------------------------------------------------------------------- */
#ifndef SMP_H
#define SMP_H
-/* SMP is currently not compatible with the following options:
+/* THREADED_RTS is currently not compatible with the following options:
*
- * INTERPRETER
- * PROFILING
+ * PROFILING (but only 1 CPU supported)
* TICKY_TICKY
- * and unregisterised builds.
+ * Unregisterised builds are ok, but only 1 CPU supported.
*/
-#if defined(SMP)
+#if defined(THREADED_RTS)
-#if defined(PROFILING) || defined(TICKY_TICKY)
-#error Build options incompatible with SMP.
+#if defined(TICKY_TICKY)
+#error Build options incompatible with THREADED_RTS.
#endif
/*
#endif
}
-#else /* !SMP */
+#else /* !THREADED_RTS */
#define wb() /* nothing */
return old;
}
-#endif /* !SMP */
+#endif /* !THREADED_RTS */
#endif /* SMP_H */
#ifndef STM_H
#define STM_H
-#ifdef SMP
+#ifdef THREADED_RTS
//#define STM_CG_LOCK
#define STM_FG_LOCKS
#else
via allocate() since the last GC.
Used in the reporting of statistics.
- SMP: allocate and doYouWantToGC can be used from STG code, they are
+ THREADED_RTS: allocate and doYouWantToGC can be used from STG code, they are
surrounded by a mutex.
-------------------------------------------------------------------------- */
/*
* Storage manager mutex
*/
-#if defined(SMP)
+#if defined(THREADED_RTS)
extern Mutex sm_mutex;
#endif
-#if defined(SMP)
+#if defined(THREADED_RTS)
#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex);
#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex);
#define ASSERT_SM_LOCK() ASSERT_LOCK_HELD(&sm_mutex);
/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team 1998-2000
+ * (c) The GHC Team 1998-2006
*
* The block allocator and free list manager.
*
static bdescr *allocMegaGroup(nat mblocks);
static void freeMegaGroup(bdescr *bd);
-// In SMP mode, the free list is protected by sm_mutex. In the
-// threaded RTS, it is protected by the Capability.
+// In THREADED_RTS mode, the free list is protected by sm_mutex.
static bdescr *free_list = NULL;
/* -----------------------------------------------------------------------------
* STG execution, a pointer to the capabilitity is kept in a
* register (BaseReg; actually it is a pointer to cap->r).
*
- * Only in an SMP build will there be multiple capabilities, for
- * the threaded RTS and other non-threaded builds, there is only
- * one global capability, namely MainCapability.
+ * Only in an THREADED_RTS build will there be multiple capabilities,
+ * for non-threaded builds there is only one global capability, namely
+ * MainCapability.
*
* --------------------------------------------------------------------------*/
#include "Schedule.h"
#include "Sparks.h"
-#if !defined(SMP)
-Capability MainCapability; // for non-SMP, we have one global capability
-#endif
+// one global capability, this is the Capability for non-threaded
+// builds, and for +RTS -N1
+Capability MainCapability;
nat n_capabilities;
Capability *capabilities = NULL;
/* ---------------------------------------------------------------------------
* Function: initCapabilities()
*
- * Purpose: set up the Capability handling. For the SMP build,
+ * Purpose: set up the Capability handling. For the THREADED_RTS build,
* we keep a table of them, the size of which is
* controlled by the user via the RTS flag -N.
*
void
initCapabilities( void )
{
-#if defined(SMP)
- nat i,n;
+#if defined(THREADED_RTS)
+ nat i;
#ifndef REG_BaseReg
// We can't support multiple CPUs if BaseReg is not a register
}
#endif
- n_capabilities = n = RtsFlags.ParFlags.nNodes;
- capabilities = stgMallocBytes(n * sizeof(Capability), "initCapabilities");
+ n_capabilities = RtsFlags.ParFlags.nNodes;
+
+ if (n_capabilities == 1) {
+ capabilities = &MainCapability;
+ // THREADED_RTS must work on builds that don't have a mutable
+ // BaseReg (eg. unregisterised), so in this case
+ // capabilities[0] must coincide with &MainCapability.
+ } else {
+ capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
+ "initCapabilities");
+ }
- for (i = 0; i < n; i++) {
+ for (i = 0; i < n_capabilities; i++) {
initCapability(&capabilities[i], i);
}
- IF_DEBUG(scheduler, sched_belch("allocated %d capabilities", n));
-#else
+ IF_DEBUG(scheduler, sched_belch("allocated %d capabilities",
+ n_capabilities));
+
+#else /* !THREADED_RTS */
+
n_capabilities = 1;
capabilities = &MainCapability;
initCapability(&MainCapability, 0);
+
#endif
// There are no free capabilities to begin with. We will start
return;
}
- // If we have an unbound thread on the run queue, or if there's
- // anything else to do, give the Capability to a worker thread.
- if (!emptyRunQueue(cap) || !emptySparkPoolCap(cap) || globalWorkToDo()) {
- if (cap->spare_workers) {
- giveCapabilityToTask(cap,cap->spare_workers);
- // The worker Task pops itself from the queue;
- return;
- }
-
+ if (!cap->spare_workers) {
// Create a worker thread if we don't have one. If the system
// is interrupted, we only create a worker task if there
// are threads that need to be completed. If the system is
}
}
+ // If we have an unbound thread on the run queue, or if there's
+ // anything else to do, give the Capability to a worker thread.
+ if (!emptyRunQueue(cap) || !emptySparkPoolCap(cap) || globalWorkToDo()) {
+ if (cap->spare_workers) {
+ giveCapabilityToTask(cap,cap->spare_workers);
+ // The worker Task pops itself from the queue;
+ return;
+ }
+ }
+
last_free_capability = cap;
IF_DEBUG(scheduler, sched_belch("freeing capability %d", cap->no));
}
}
RELEASE_LOCK(&cap->lock);
}
+ return;
}
void
/* ---------------------------------------------------------------------------
*
- * (c) The GHC Team, 2001-2003
+ * (c) The GHC Team, 2001-2006
*
* Capabilities
*
* The notion of a capability is used when operating in multi-threaded
- * environments (which the SMP and Threads builds of the RTS do), to
+ * environments (which the THREADED_RTS build of the RTS does), to
* hold all the state an OS thread/task needs to run Haskell code:
* its STG registers, a pointer to its TSO, a nursery etc. During
* STG execution, a pointer to the capabilitity is kept in a
* register (BaseReg).
*
- * Only in an SMP build will there be multiple capabilities, the threaded
- * RTS and other non-threaded builds, there is one global capability,
- * namely MainRegTable.
+ * Only in an THREADED_RTS build will there be multiple capabilities,
+ * in the non-threaded builds there is one global capability, namely
+ * MainCapability.
*
* This header file contains the functions for working with capabilities.
* (the main, and only, consumer of this interface is the scheduler).
INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED) {};
#endif
-#if !IN_STG_CODE && !defined(SMP)
-// for non-SMP, we have one global capability
+#if !IN_STG_CODE
+// one global capability
extern Capability MainCapability;
#endif
ACQUIRE_SM_LOCK;
// send exceptions to any threads which were about to die
+ RELEASE_SM_LOCK;
resurrectThreads(resurrected_threads);
+ ACQUIRE_SM_LOCK;
// Update the stable pointer hash table.
updateStablePtrTable(major_gc);
RtsFlags.ConcFlags.ctxtSwitchTime = CS_MIN_MILLISECS; /* In milliseconds */
-#ifdef SMP
+#ifdef THREADED_RTS
RtsFlags.ParFlags.nNodes = 1;
#endif
RtsFlags.ParFlags.fishDelay = FISH_DELAY;
#endif
-#if defined(PAR) || defined(SMP)
+#if defined(PAR) || defined(THREADED_RTS)
RtsFlags.ParFlags.maxLocalSparks = 4096;
-#endif /* PAR || SMP */
+#endif /* PAR || THREADED_RTS */
#if defined(GRAN)
/* ToDo: check defaults for GranSim and GUM */
" -Dz DEBUG: stack squezing",
"",
#endif /* DEBUG */
-#if defined(SMP)
+#if defined(THREADED_RTS)
" -N<n> Use <n> OS threads (default: 1)",
#endif
-#if defined(SMP) || defined(PAR)
+#if defined(THREADED_RTS) || defined(PAR)
" -e<size> Size of spark pools (default 100)",
#endif
#if defined(PAR)
" -qd Turn on PVM-ish debugging",
" -qO Disable output for performance measurement",
#endif
-#if defined(SMP) || defined(PAR)
+#if defined(THREADED_RTS) || defined(PAR)
" -e<n> Maximum number of outstanding local sparks (default: 4096)",
#endif
#if defined(PAR)
error = rtsTrue;
#endif
-#ifdef SMP
-# define SMP_BUILD_ONLY(x) x
-#else
-# define SMP_BUILD_ONLY(x) \
-errorBelch("not built for: -smp"); \
-error = rtsTrue;
-#endif
-
#ifdef PAR
# define PAR_BUILD_ONLY(x) x
#else
error = rtsTrue;
#endif
-#if defined(SMP) || defined(PAR)
-# define PAR_OR_SMP_BUILD_ONLY(x) x
+#ifdef THREADED_RTS
+# define THREADED_BUILD_ONLY(x) x
+#else
+# define THREADED_BUILD_ONLY(x) \
+errorBelch("not built for: -smp"); \
+error = rtsTrue;
+#endif
+
+#if defined(THREADED_RTS) || defined(PAR)
+# define PAR_OR_THREADED_BUILD_ONLY(x) x
#else
-# define PAR_OR_SMP_BUILD_ONLY(x) \
+# define PAR_OR_THREADED_BUILD_ONLY(x) \
errorBelch("not built for: -parallel or -smp"); \
error = rtsTrue;
#endif
}
break;
-#ifdef SMP
+#ifdef THREADED_RTS
case 'N':
- SMP_BUILD_ONLY(
+ THREADED_BUILD_ONLY(
if (rts_argv[arg][2] != '\0') {
RtsFlags.ParFlags.nNodes
= strtol(rts_argv[arg]+2, (char **) NULL, 10);
#endif
/* =========== PARALLEL =========================== */
case 'e':
- PAR_OR_SMP_BUILD_ONLY(
+ PAR_OR_THREADED_BUILD_ONLY(
if (rts_argv[arg][2] != '\0') {
RtsFlags.ParFlags.maxLocalSparks
= strtol(rts_argv[arg]+2, (char **) NULL, 10);
#include "Stats.h" /* initStats */
#include "STM.h" /* initSTM */
#include "Signals.h"
+#include "RtsSignals.h"
#include "Timer.h" /* startTimer, stopTimer */
#include "Weak.h"
#include "Ticky.h"
hs_add_root(void (*init_root)(void))
{
bdescr *bd;
-#ifdef SMP
- Capability cap;
-#else
-#define cap MainCapability
-#endif
nat init_sp;
+ Capability *cap = &MainCapability;
if (hs_init_count <= 0) {
barf("hs_add_root() must be called after hs_init()");
init_stack[--init_sp] = (F_)init_root;
}
- cap.r.rSp = (P_)(init_stack + init_sp);
- StgRun((StgFunPtr)stg_init, &cap.r);
+ cap->r.rSp = (P_)(init_stack + init_sp);
+ StgRun((StgFunPtr)stg_init, &cap->r);
freeGroup_lock(bd);
* in STM.h:
*
* STM_UNIPROC assumes that the caller serialises invocations on the STM interface.
- * In the Haskell RTS this means it is suitable only for non-SMP builds.
+ * In the Haskell RTS this means it is suitable only for non-THREADED_RTS builds.
*
* STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired during
* an invocation on the STM interface. Note that this does not mean that
#define TRUE 1
#define FALSE 0
-// ACQ_ASSERT is used for assertions which are only required for SMP builds with
-// fine-grained locking.
+// ACQ_ASSERT is used for assertions which are only required for
+// THREADED_RTS builds with fine-grained locking.
#if defined(STM_FG_LOCKS)
#define ACQ_ASSERT(_X) ASSERT(_X)
static volatile StgBool token_locked = FALSE;
-#if defined(SMP)
+#if defined(THREADED_RTS)
static void getTokenBatch(Capability *cap) {
while (cas(&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
max_commits += TOKEN_BATCH_SIZE;
SET_HDR (result, &stg_TVAR_info, CCS_SYSTEM);
result -> current_value = new_value;
result -> first_wait_queue_entry = END_STM_WAIT_QUEUE;
-#if defined(SMP)
+#if defined(THREADED_RTS)
result -> num_updates = 0;
#endif
return result;
{
StgPtr p;
-#if defined(SMP)
+#if defined(THREADED_RTS)
// heap sanity checking doesn't work with SMP, because we can't
// zero the slop (see Updates.h).
return;
/*
* This mutex protects most of the global scheduler data in
- * the THREADED_RTS and (inc. SMP) runtime.
+ * the THREADED_RTS runtime.
*/
#if defined(THREADED_RTS)
Mutex sched_mutex;
// scheduler clearer.
//
static void schedulePreLoop (void);
-#if defined(SMP)
+#if defined(THREADED_RTS)
static void schedulePushWork(Capability *cap, Task *task);
#endif
static void scheduleStartSignalHandlers (Capability *cap);
static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
-static void scheduleDoGC(Capability *cap, Task *task, rtsBool force_major);
+static void scheduleDoGC(Capability *cap, Task *task, rtsBool force_major,
+ void (*get_roots)(evac_fn));
static void unblockThread(Capability *cap, StgTSO *tso);
static rtsBool checkBlackHoles(Capability *cap);
}
#endif
-#ifdef SMP
+#if defined(THREADED_RTS)
schedulePushWork(cap,task);
#endif
//
if (interrupted) {
deleteRunQueue(cap);
-#if defined(SMP)
+#if defined(THREADED_RTS)
discardSparksCap(cap);
#endif
if (shutting_down_scheduler) {
}
}
-#if defined(SMP)
+#if defined(THREADED_RTS)
// If the run queue is empty, take a spark and turn it into a thread.
{
if (emptyRunQueue(cap)) {
}
}
}
-#endif // SMP
+#endif // THREADED_RTS
scheduleStartSignalHandlers(cap);
}
if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
- if (ready_to_gc) { scheduleDoGC(cap,task,rtsFalse); }
+ if (ready_to_gc) { scheduleDoGC(cap,task,rtsFalse,GetRoots); }
} /* end of while() */
IF_PAR_DEBUG(verbose,
* Push work to other Capabilities if we have some.
* -------------------------------------------------------------------------- */
-#ifdef SMP
+#if defined(THREADED_RTS)
static void
-schedulePushWork(Capability *cap USED_IF_SMP,
- Task *task USED_IF_SMP)
+schedulePushWork(Capability *cap USED_IF_THREADS,
+ Task *task USED_IF_THREADS)
{
Capability *free_caps[n_capabilities], *cap0;
nat i, n_free_caps;
{
#if defined(PARALLEL_HASKELL)
- // ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
+ // ToDo: add deadlock detection in GUM (similar to THREADED_RTS) -- HWL
return;
#endif
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
- scheduleDoGC( cap, task, rtsTrue/*force major GC*/ );
+ scheduleDoGC( cap, task, rtsTrue/*force major GC*/, GetRoots );
recent_activity = ACTIVITY_DONE_GC;
if ( !emptyRunQueue(cap) ) return;
if (cap->r.rCurrentNursery->u.back != NULL) {
cap->r.rCurrentNursery->u.back->link = bd;
} else {
-#if !defined(SMP)
+#if !defined(THREADED_RTS)
ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
g0s0 == cap->r.rNursery);
#endif
// has tidied up its stack and placed itself on whatever queue
// it needs to be on.
-#if !defined(SMP)
+#if !defined(THREADED_RTS)
ASSERT(t->why_blocked != NotBlocked);
- // This might not be true under SMP: we don't have
+ // This might not be true under THREADED_RTS: we don't have
// exclusive access to this TSO, so someone might have
// woken it up by now. This actually happens: try
// conc023 +RTS -N2.
* -------------------------------------------------------------------------- */
static void
-scheduleDoGC( Capability *cap, Task *task USED_IF_SMP, rtsBool force_major )
+scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS,
+ rtsBool force_major, void (*get_roots)(evac_fn))
{
StgTSO *t;
-#ifdef SMP
+#ifdef THREADED_RTS
static volatile StgWord waiting_for_gc;
rtsBool was_waiting;
nat i;
#endif
-#ifdef SMP
+#ifdef THREADED_RTS
// In order to GC, there must be no threads running Haskell code.
// Therefore, the GC thread needs to hold *all* the capabilities,
// and release them after the GC has completed.
if (was_waiting) {
do {
IF_DEBUG(scheduler, sched_belch("someone else is trying to GC..."));
- yieldCapability(&cap,task);
+ if (cap) yieldCapability(&cap,task);
} while (waiting_for_gc);
return;
}
// ATOMICALLY_FRAME, aborting the (nested)
// transaction, and saving the stack of any
// partially-evaluated thunks on the heap.
- raiseAsync_(cap, t, NULL, rtsTrue, NULL);
+ raiseAsync_(&capabilities[0], t, NULL, rtsTrue, NULL);
#ifdef REG_R1
ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
}
// so this happens periodically:
- scheduleCheckBlackHoles(cap);
+ if (cap) scheduleCheckBlackHoles(cap);
IF_DEBUG(scheduler, printAllThreads());
#if defined(THREADED_RTS)
IF_DEBUG(scheduler,sched_belch("doing GC"));
#endif
- GarbageCollect(GetRoots, force_major);
+ GarbageCollect(get_roots, force_major);
-#if defined(SMP)
+#if defined(THREADED_RTS)
// release our stash of capabilities.
for (i = 0; i < n_capabilities; i++) {
if (cap != &capabilities[i]) {
releaseCapability(&capabilities[i]);
}
}
- task->cap = cap;
+ if (cap) {
+ task->cap = cap;
+ } else {
+ task->cap = NULL;
+ }
#endif
#if defined(GRAN)
* Singleton fork(). Do not copy any running threads.
* ------------------------------------------------------------------------- */
-#if !defined(mingw32_HOST_OS) && !defined(SMP)
+#if !defined(mingw32_HOST_OS)
#define FORKPROCESS_PRIMOP_SUPPORTED
#endif
StgTSO* t,*next;
Capability *cap;
+#if defined(THREADED_RTS)
+ if (RtsFlags.ParFlags.nNodes > 1) {
+ errorBelch("forking not supported with +RTS -N<n> greater than 1");
+ stg_exit(EXIT_FAILURE);
+ }
+#endif
+
IF_DEBUG(scheduler,sched_belch("forking!"));
// ToDo: for SMP, we should probably acquire *all* the capabilities
/* A capability holds the state a native thread needs in
* order to execute STG code. At least one capability is
- * floating around (only SMP builds have more than one).
+ * floating around (only THREADED_RTS builds have more than one).
*/
initCapabilities();
initTaskManager();
-#if defined(SMP) || defined(PARALLEL_HASKELL)
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
initSparkPools();
#endif
-#if defined(SMP)
+#if defined(THREADED_RTS)
/*
* Eagerly start one worker to run each Capability, except for
* Capability 0. The idea is that we're probably going to start a
}
#if !defined(THREADED_RTS)
- evac((StgClosure **)&blocked_queue_hd);
- evac((StgClosure **)&blocked_queue_tl);
- evac((StgClosure **)&sleeping_queue);
+ evac((StgClosure **)(void *)&blocked_queue_hd);
+ evac((StgClosure **)(void *)&blocked_queue_tl);
+ evac((StgClosure **)(void *)&sleeping_queue);
#endif
#endif
- evac((StgClosure **)&blackhole_queue);
+ // evac((StgClosure **)&blackhole_queue);
-#if defined(SMP) || defined(PARALLEL_HASKELL) || defined(GRAN)
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
markSparkQueue(evac);
#endif
static void (*extra_roots)(evac_fn);
+static void
+performGC_(rtsBool force_major, void (*get_roots)(evac_fn))
+{
+ Task *task = myTask();
+
+ if (task == NULL) {
+ ACQUIRE_LOCK(&sched_mutex);
+ task = newBoundTask();
+ RELEASE_LOCK(&sched_mutex);
+ scheduleDoGC(NULL,task,force_major, get_roots);
+ boundTaskExiting(task);
+ } else {
+ scheduleDoGC(NULL,task,force_major, get_roots);
+ }
+}
+
void
performGC(void)
{
-#ifdef THREADED_RTS
- // ToDo: we have to grab all the capabilities here.
- errorBelch("performGC not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
- /* Obligated to hold this lock upon entry */
- GarbageCollect(GetRoots,rtsFalse);
+ performGC_(rtsFalse, GetRoots);
}
void
performMajorGC(void)
{
-#ifdef THREADED_RTS
- errorBelch("performMayjorGC not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
- GarbageCollect(GetRoots,rtsTrue);
+ performGC_(rtsTrue, GetRoots);
}
static void
void
performGCWithRoots(void (*get_roots)(evac_fn))
{
-#ifdef THREADED_RTS
- errorBelch("performGCWithRoots not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
extra_roots = get_roots;
- GarbageCollect(AllRoots,rtsFalse);
+ performGC_(rtsFalse, AllRoots);
}
/* -----------------------------------------------------------------------------
* CATCH_FRAME on the stack. In either case, we strip the entire
* stack and replace the thread with a zombie.
*
- * ToDo: in SMP mode, this function is only safe if either (a) we hold
- * all the Capabilities (eg. in GC), or (b) we own the Capability that
- * the TSO is currently blocked on or on the run queue of.
+ * ToDo: in THREADED_RTS mode, this function is only safe if either
+ * (a) we hold all the Capabilities (eg. in GC, or if there is only
+ * one Capability), or (b) we own the Capability that the TSO is
+ * currently blocked on or on the run queue of.
*
* -------------------------------------------------------------------------- */
*/
void workerStart(Task *task);
-// ToDo: check whether all fcts below are used in the SMP version, too
#if defined(GRAN)
void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
void unlink_from_bq(StgTSO* tso, StgClosure* node);
/* ---------------------------------------------------------------------------
*
- * (c) The GHC Team, 2000-2005
+ * (c) The GHC Team, 2000-2006
*
- * Sparking support for PARALLEL_HASKELL and SMP versions of the RTS.
+ * Sparking support for PARALLEL_HASKELL and THREADED_RTS versions of the RTS.
*
* -------------------------------------------------------------------------*/
# endif
#include "Sparks.h"
-#if defined(SMP) || defined(PARALLEL_HASKELL)
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
static INLINE_ME void bump_hd (StgSparkPool *p)
{ p->hd++; if (p->hd == p->lim) p->hd = p->base; }
void
initSparkPools( void )
{
-#ifdef SMP
+#ifdef THREADED_RTS
/* walk over the capabilities, allocating a spark pool for each one */
nat i;
for (i = 0; i < n_capabilities; i++) {
return 1;
}
-#endif /* PARALLEL_HASKELL || SMP */
+#endif /* PARALLEL_HASKELL || THREADED_RTS */
/* -----------------------------------------------------------------------------
disposeSpark(spark)
StgClosure *spark;
{
-#if !defined(SMP)
+#if !defined(THREADED_RTS)
Capability *cap;
StgSparkPool *pool;
/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team, 2000
+ * (c) The GHC Team, 2000-2006
*
- * Sparking support for GRAN, PAR and SMP versions of the RTS.
+ * Sparking support for GRAN, PAR and THREADED_RTS versions of the RTS.
*
* ---------------------------------------------------------------------------*/
StgInt newSpark (StgRegTable *reg, StgClosure *p);
#endif
-#if defined(PARALLEL_HASKELL) || defined(SMP)
+#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
StgClosure * findSpark (Capability *cap);
void initSparkPools (void);
void markSparkQueue (evac_fn evac);
* PRIVATE below here
* -------------------------------------------------------------------------- */
-#if defined(PARALLEL_HASKELL) || defined(SMP)
+#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
INLINE_HEADER rtsBool
emptySparkPool (StgSparkPool *pool)
#include <string.h>
/*
- * All these globals require sm_mutex to access in SMP mode.
+ * All these globals require sm_mutex to access in THREADED_RTS mode.
*/
StgClosure *caf_list = NULL;
StgClosure *revertible_caf_list = NULL;
ullong total_allocated = 0; /* total memory allocated during run */
nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */
-step *nurseries = NULL; /* array of nurseries, >1 only if SMP */
+step *nurseries = NULL; /* array of nurseries, >1 only if THREADED_RTS */
/*
* Storage manager mutex: protects all the above state from
* simultaneous access by two STG threads.
*/
-#ifdef SMP
+#ifdef THREADED_RTS
Mutex sm_mutex;
#endif
initBlockAllocator();
-#if defined(SMP)
+#if defined(THREADED_RTS)
initMutex(&sm_mutex);
#endif
g0->steps = stgMallocBytes (sizeof(struct step_), "initStorage: steps");
}
-#ifdef SMP
+#ifdef THREADED_RTS
n_nurseries = n_capabilities;
nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
"initStorage: nurseries");
}
}
-#ifdef SMP
+#ifdef THREADED_RTS
for (s = 0; s < n_nurseries; s++) {
initStep(&nurseries[s], 0, s);
}
}
oldest_gen->steps[0].to = &oldest_gen->steps[0];
-#ifdef SMP
+#ifdef THREADED_RTS
for (s = 0; s < n_nurseries; s++) {
nurseries[s].to = generations[0].steps[0].to;
}
}
}
-#ifdef SMP
+#ifdef THREADED_RTS
if (RtsFlags.GcFlags.generations == 1) {
- errorBelch("-G1 is incompatible with SMP");
+ errorBelch("-G1 is incompatible with -threaded");
stg_exit(EXIT_FAILURE);
}
#endif
static void
assignNurseriesToCapabilities (void)
{
-#ifdef SMP
+#ifdef THREADED_RTS
nat i;
for (i = 0; i < n_nurseries; i++) {
capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
capabilities[i].r.rCurrentAlloc = NULL;
}
-#else /* SMP */
+#else /* THREADED_RTS */
MainCapability.r.rNursery = &nurseries[0];
MainCapability.r.rCurrentNursery = nurseries[0].blocks;
MainCapability.r.rCurrentAlloc = NULL;
total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
/* allocate and fill it in. */
-#if defined(SMP)
+#if defined(THREADED_RTS)
arr = (StgArrWords *)allocateLocal(myTask()->cap, total_size_in_words);
#else
arr = (StgArrWords *)allocateLocal(&MainCapability, total_size_in_words);
allocated += countNurseryBlocks() * BLOCK_SIZE_W;
{
-#ifdef SMP
+#ifdef THREADED_RTS
nat i;
for (i = 0; i < n_nurseries; i++) {
Capability *cap;
for (i = 0; i < n_nurseries; i++) {
total_blocks += stepBlocks(&nurseries[i]);
}
-#ifdef SMP
+#ifdef THREADED_RTS
// We put pinned object blocks in g0s0, so better count blocks there too.
total_blocks += stepBlocks(g0s0);
#endif
build Tasks Capabilities
---------------------------------
normal 1 1
- -threaded N 1
- -smp N N
+ -threaded N N
The non-threaded build has a single Task and a single global
Capability.
- The 'threaded' build has multiple Tasks, but a single Capability.
- At any one time only one task executing STG code, other tasks are
- either busy executing code outside the RTS (e.g., a C call) or
- waiting for their turn to (again) evaluate some STG code. A task
- relinquishes its RTS token when it is asked to evaluate an external
+ The THREADED_RTS build allows multiple tasks and mulitple Capabilities.
+ Multiple Tasks may all be running Haskell code simultaneously. A task
+ relinquishes its Capability when it is asked to evaluate an external
(C) call.
-
- The SMP build allows multiple tasks and mulitple Capabilities.
- Multiple Tasks may all be running Haskell code simultaneously.
In general, there may be multiple Tasks for an OS thread. This
happens if one Task makes a foreign call from Haskell, and
* already have been updated (the mutable list will get messed up
* otherwise).
*
- * NB. We do *not* do this in SMP mode, because when we have the
+ * NB. We do *not* do this in THREADED_RTS mode, because when we have the
* possibility of multiple threads entering the same closure, zeroing
* the slop in one of the threads would have a disastrous effect on
* the other (seen in the wild!).
#endif /* CMINUSMINUS */
-#if !defined(DEBUG) || defined(SMP)
+#if !defined(DEBUG) || defined(THREADED_RTS)
#define DEBUG_FILL_SLOP(p) /* do nothing */
#else
#define DEBUG_FILL_SLOP(p) FILL_SLOP(p)
#
# thr : threaded
# thr_p : threaded profiled
-# s : smp
# debug : debugging (compile with -g for the C compiler, and -DDEBUG)
# debug_p : debugging profiled
-# debug_s : debugging smp
# debug_u : debugging unregisterised
# thr_debug : debugging threaded
# thr_debug_p : debugging threaded profiled
ifeq "$(BootingFromHc)" "YES"
GhcRTSWays=
else
-GhcRTSWays=thr thr_p s debug debug_s thr_debug
+GhcRTSWays=thr thr_p debug thr_debug
endif
# Option flags to pass to GHC when it's compiling modules in
WAY_thr_p_NAME=threaded profiled
WAY_thr_p_HC_OPTS=-optc-DTHREADED_RTS -prof
-# Way `s':
-WAY_s_NAME=threads (for SMP)
-WAY_s_HC_OPTS=-optc-DSMP -optc-DTHREADED_RTS
-
# Way 'debug':
WAY_debug_NAME=debug
WAY_debug_HC_OPTS=-optc-DDEBUG
WAY_debug_u_NAME=debug unregisterised
WAY_debug_u_HC_OPTS=-optc-DDEBUG -unreg
-# Way 'debug_s':
-WAY_debug_s_NAME=debug SMP
-WAY_debug_s_HC_OPTS=-optc-DDEBUG -optc-DTHREADED_RTS -optc-DSMP
-
# Way 'thr_debug':
WAY_thr_debug_NAME=threaded
WAY_thr_debug_HC_OPTS=-optc-DTHREADED_RTS -optc-DDEBUG