# define STATIC_INLINE static
#endif
-#ifdef THREADED_RTS
-#define USED_WHEN_THREADED_RTS
-#define USED_WHEN_NON_THREADED_RTS STG_UNUSED
-#else
-#define USED_WHEN_THREADED_RTS STG_UNUSED
-#define USED_WHEN_NON_THREADED_RTS
-#endif
-
-#ifdef SMP
-#define USED_WHEN_SMP
-#else
-#define USED_WHEN_SMP STG_UNUSED
-#endif
-
/* -----------------------------------------------------------------------------
* Global variables
* -------------------------------------------------------------------------- */
/*
* This mutex protects most of the global scheduler data in
- * the THREADED_RTS and (inc. SMP) runtime.
+ * the THREADED_RTS runtime.
*/
#if defined(THREADED_RTS)
Mutex sched_mutex;
// scheduler clearer.
//
static void schedulePreLoop (void);
-#if defined(SMP)
+#if defined(THREADED_RTS)
static void schedulePushWork(Capability *cap, Task *task);
#endif
static void scheduleStartSignalHandlers (Capability *cap);
static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
-static void scheduleDoGC(Capability *cap, Task *task, rtsBool force_major);
+static void scheduleDoGC(Capability *cap, Task *task, rtsBool force_major,
+ void (*get_roots)(evac_fn));
static void unblockThread(Capability *cap, StgTSO *tso);
static rtsBool checkBlackHoles(Capability *cap);
static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
static void raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
- rtsBool stop_at_atomically);
+ rtsBool stop_at_atomically, StgPtr stop_here);
static void deleteThread (Capability *cap, StgTSO *tso);
static void deleteRunQueue (Capability *cap);
// thread for a bit, even if there are others banging at the
// door.
first = rtsFalse;
- ASSERT_CAPABILITY_INVARIANTS(cap,task);
+ ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
} else {
// Yield the capability to higher-priority tasks if necessary.
yieldCapability(&cap, task);
}
#endif
-#ifdef SMP
+#if defined(THREADED_RTS)
schedulePushWork(cap,task);
-#endif
+#endif
// Check whether we have re-entered the RTS from Haskell without
// going via suspendThread()/resumeThread (i.e. a 'safe' foreign
//
if (interrupted) {
deleteRunQueue(cap);
+#if defined(THREADED_RTS)
+ discardSparksCap(cap);
+#endif
if (shutting_down_scheduler) {
IF_DEBUG(scheduler, sched_belch("shutting down"));
// If we are a worker, just exit. If we're a bound thread
}
}
-#if defined(not_yet) && defined(SMP)
- //
- // Top up the run queue from our spark pool. We try to make the
- // number of threads in the run queue equal to the number of
- // free capabilities.
- //
+#if defined(THREADED_RTS)
+ // If the run queue is empty, take a spark and turn it into a thread.
{
- StgClosure *spark;
- if (emptyRunQueue()) {
- spark = findSpark(rtsFalse);
- if (spark == NULL) {
- break; /* no more sparks in the pool */
- } else {
- createSparkThread(spark);
+ if (emptyRunQueue(cap)) {
+ StgClosure *spark;
+ spark = findSpark(cap);
+ if (spark != NULL) {
IF_DEBUG(scheduler,
- sched_belch("==^^ turning spark of closure %p into a thread",
+ sched_belch("turning spark of closure %p into a thread",
(StgClosure *)spark));
+ createSparkThread(cap,spark);
}
}
}
-#endif // SMP
+#endif // THREADED_RTS
scheduleStartSignalHandlers(cap);
scheduleCheckBlockedThreads(cap);
scheduleDetectDeadlock(cap,task);
+#if defined(THREADED_RTS)
+ cap = task->cap; // reload cap, it might have changed
+#endif
// Normally, the only way we can get here with no threads to
// run is if a keyboard interrupt received during
// ----------------------------------------------------------------------
// Run the current thread
+ ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
+
prev_what_next = t->what_next;
errno = t->saved_errno;
cap->in_haskell = rtsTrue;
+ dirtyTSO(t);
+
recent_activity = ACTIVITY_YES;
switch (prev_what_next) {
// happened. So find the new location:
t = cap->r.rCurrentTSO;
-#ifdef SMP
+ // We have run some Haskell code: there might be blackhole-blocked
+ // threads to wake up now.
+ // Lock-free test here should be ok, we're just setting a flag.
+ if ( blackhole_queue != END_TSO_QUEUE ) {
+ blackholes_need_checking = rtsTrue;
+ }
+
+ // And save the current errno in this thread.
+ // XXX: possibly bogus for SMP because this thread might already
+ // be running again, see code below.
+ t->saved_errno = errno;
+
+#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
// blocked, we are in limbo - the TSO is now owned by whatever it
// is blocked on, and may in fact already have been woken up,
// immediately and return to normaility.
if (ret == ThreadBlocked) {
IF_DEBUG(scheduler,
- debugBelch("--<< thread %d (%s) stopped: blocked\n",
- t->id, whatNext_strs[t->what_next]));
+ sched_belch("--<< thread %d (%s) stopped: blocked\n",
+ t->id, whatNext_strs[t->what_next]));
continue;
}
#endif
- ASSERT_CAPABILITY_INVARIANTS(cap,task);
-
- // And save the current errno in this thread.
- t->saved_errno = errno;
+ ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
// ----------------------------------------------------------------------
CCCS = CCS_SYSTEM;
#endif
- // We have run some Haskell code: there might be blackhole-blocked
- // threads to wake up now.
- // Lock-free test here should be ok, we're just setting a flag.
- if ( blackhole_queue != END_TSO_QUEUE ) {
- blackholes_need_checking = rtsTrue;
- }
-
#if defined(THREADED_RTS)
IF_DEBUG(scheduler,debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()););
#elif !defined(GRAN) && !defined(PARALLEL_HASKELL)
case ThreadFinished:
if (scheduleHandleThreadFinished(cap, task, t)) return cap;
- ASSERT_CAPABILITY_INVARIANTS(cap,task);
+ ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
break;
default:
}
if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
- if (ready_to_gc) { scheduleDoGC(cap,task,rtsFalse); }
+ if (ready_to_gc) {
+ scheduleDoGC(cap,task,rtsFalse,GetRoots);
+#if defined(THREADED_RTS)
+ cap = task->cap; // reload cap, it might have changed
+#endif
+ }
} /* end of while() */
IF_PAR_DEBUG(verbose,
* Push work to other Capabilities if we have some.
* -------------------------------------------------------------------------- */
-#ifdef SMP
+#if defined(THREADED_RTS)
static void
-schedulePushWork(Capability *cap USED_WHEN_SMP,
- Task *task USED_WHEN_SMP)
+schedulePushWork(Capability *cap USED_IF_THREADS,
+ Task *task USED_IF_THREADS)
{
Capability *free_caps[n_capabilities], *cap0;
nat i, n_free_caps;
- // Check whether we have more threads on our run queue that we
- // could hand to another Capability.
- if (emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE) {
+ // Check whether we have more threads on our run queue, or sparks
+ // in our pool, that we could hand to another Capability.
+ if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
+ && sparkPoolSizeCap(cap) < 2) {
return;
}
if (n_free_caps > 0) {
StgTSO *prev, *t, *next;
+ rtsBool pushed_to_all;
+
IF_DEBUG(scheduler, sched_belch("excess threads on run queue and %d free capabilities, sharing...", n_free_caps));
- prev = cap->run_queue_hd;
- t = prev->link;
- prev->link = END_TSO_QUEUE;
i = 0;
- for (; t != END_TSO_QUEUE; t = next) {
- next = t->link;
- t->link = END_TSO_QUEUE;
- if (t->what_next == ThreadRelocated
- || t->bound == task) { // don't move my bound thread
- prev->link = t;
- prev = t;
- } else if (i == n_free_caps) {
- i = 0;
- // keep one for us
- prev->link = t;
- prev = t;
- } else {
- appendToRunQueue(free_caps[i],t);
- if (t->bound) { t->bound->cap = free_caps[i]; }
- i++;
+ pushed_to_all = rtsFalse;
+
+ if (cap->run_queue_hd != END_TSO_QUEUE) {
+ prev = cap->run_queue_hd;
+ t = prev->link;
+ prev->link = END_TSO_QUEUE;
+ for (; t != END_TSO_QUEUE; t = next) {
+ next = t->link;
+ t->link = END_TSO_QUEUE;
+ if (t->what_next == ThreadRelocated
+ || t->bound == task) { // don't move my bound thread
+ prev->link = t;
+ prev = t;
+ } else if (i == n_free_caps) {
+ pushed_to_all = rtsTrue;
+ i = 0;
+ // keep one for us
+ prev->link = t;
+ prev = t;
+ } else {
+ IF_DEBUG(scheduler, sched_belch("pushing thread %d to capability %d", t->id, free_caps[i]->no));
+ appendToRunQueue(free_caps[i],t);
+ if (t->bound) { t->bound->cap = free_caps[i]; }
+ i++;
+ }
+ }
+ cap->run_queue_tl = prev;
+ }
+
+ // If there are some free capabilities that we didn't push any
+ // threads to, then try to push a spark to each one.
+ if (!pushed_to_all) {
+ StgClosure *spark;
+ // i is the next free capability to push to
+ for (; i < n_free_caps; i++) {
+ if (emptySparkPoolCap(free_caps[i])) {
+ spark = findSpark(cap);
+ if (spark != NULL) {
+ IF_DEBUG(scheduler, sched_belch("pushing spark %p to capability %d", spark, free_caps[i]->no));
+ newSpark(&(free_caps[i]->r), spark);
+ }
+ }
}
}
- cap->run_queue_tl = prev;
// release the capabilities
for (i = 0; i < n_free_caps; i++) {
* Start any pending signal handlers
* ------------------------------------------------------------------------- */
+#if defined(RTS_USER_SIGNALS) && (!defined(THREADED_RTS) || defined(mingw32_HOST_OS))
static void
scheduleStartSignalHandlers(Capability *cap)
{
-#if defined(RTS_USER_SIGNALS) && (!defined(THREADED_RTS) || defined(mingw32_HOST_OS))
if (signals_pending()) { // safe outside the lock
startSignalHandlers(cap);
}
-#endif
}
+#else
+static void
+scheduleStartSignalHandlers(Capability *cap STG_UNUSED)
+{
+}
+#endif
/* ----------------------------------------------------------------------------
* Check for blocked threads that can be woken up.
* ------------------------------------------------------------------------- */
static void
-scheduleCheckBlockedThreads(Capability *cap USED_WHEN_NON_THREADED_RTS)
+scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
{
#if !defined(THREADED_RTS)
//
{
#if defined(PARALLEL_HASKELL)
- // ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
+ // ToDo: add deadlock detection in GUM (similar to THREADED_RTS) -- HWL
return;
#endif
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
- scheduleDoGC( cap, task, rtsTrue/*force major GC*/ );
+ scheduleDoGC( cap, task, rtsTrue/*force major GC*/, GetRoots );
+#if defined(THREADED_RTS)
+ cap = task->cap; // reload cap, it might have changed
+#endif
+
recent_activity = ACTIVITY_DONE_GC;
if ( !emptyRunQueue(cap) ) return;
if (cap->r.rCurrentNursery->u.back != NULL) {
cap->r.rCurrentNursery->u.back->link = bd;
} else {
-#if !defined(SMP)
+#if !defined(THREADED_RTS)
ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
g0s0 == cap->r.rNursery);
#endif
// has tidied up its stack and placed itself on whatever queue
// it needs to be on.
-#if !defined(SMP)
+#if !defined(THREADED_RTS)
ASSERT(t->why_blocked != NotBlocked);
- // This might not be true under SMP: we don't have
+ // This might not be true under THREADED_RTS: we don't have
// exclusive access to this TSO, so someone might have
// woken it up by now. This actually happens: try
// conc023 +RTS -N2.
if (performHeapProfile ||
(RtsFlags.ProfFlags.profileInterval==0 &&
RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
+
+ // checking black holes is necessary before GC, otherwise
+ // there may be threads that are unreachable except by the
+ // blackhole queue, which the GC will consider to be
+ // deadlocked.
+ scheduleCheckBlackHoles(&MainCapability);
+
+ IF_DEBUG(scheduler, sched_belch("garbage collecting before heap census"));
GarbageCollect(GetRoots, rtsTrue);
+
+ IF_DEBUG(scheduler, sched_belch("performing heap census"));
heapCensus();
+
performHeapProfile = rtsFalse;
return rtsTrue; // true <=> we already GC'd
}
* -------------------------------------------------------------------------- */
static void
-scheduleDoGC( Capability *cap, Task *task USED_WHEN_SMP, rtsBool force_major )
+scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS,
+ rtsBool force_major, void (*get_roots)(evac_fn))
{
StgTSO *t;
-#ifdef SMP
+#ifdef THREADED_RTS
static volatile StgWord waiting_for_gc;
rtsBool was_waiting;
nat i;
#endif
-#ifdef SMP
+#ifdef THREADED_RTS
// In order to GC, there must be no threads running Haskell code.
// Therefore, the GC thread needs to hold *all* the capabilities,
// and release them after the GC has completed.
if (was_waiting) {
do {
IF_DEBUG(scheduler, sched_belch("someone else is trying to GC..."));
- yieldCapability(&cap,task);
+ if (cap) yieldCapability(&cap,task);
} while (waiting_for_gc);
- return;
+ return; // NOTE: task->cap might have changed here
}
for (i=0; i < n_capabilities; i++) {
// ATOMICALLY_FRAME, aborting the (nested)
// transaction, and saving the stack of any
// partially-evaluated thunks on the heap.
- raiseAsync_(cap, t, NULL, rtsTrue);
+ raiseAsync_(&capabilities[0], t, NULL, rtsTrue, NULL);
#ifdef REG_R1
ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
}
// so this happens periodically:
- scheduleCheckBlackHoles(cap);
+ if (cap) scheduleCheckBlackHoles(cap);
IF_DEBUG(scheduler, printAllThreads());
#if defined(THREADED_RTS)
IF_DEBUG(scheduler,sched_belch("doing GC"));
#endif
- GarbageCollect(GetRoots, force_major);
+ GarbageCollect(get_roots, force_major);
-#if defined(SMP)
+#if defined(THREADED_RTS)
// release our stash of capabilities.
for (i = 0; i < n_capabilities; i++) {
if (cap != &capabilities[i]) {
releaseCapability(&capabilities[i]);
}
}
- task->cap = cap;
+ if (cap) {
+ task->cap = cap;
+ } else {
+ task->cap = NULL;
+ }
#endif
#if defined(GRAN)
* ------------------------------------------------------------------------- */
StgBool
-isThreadBound(StgTSO* tso USED_WHEN_THREADED_RTS)
+isThreadBound(StgTSO* tso USED_IF_THREADS)
{
#if defined(THREADED_RTS)
return (tso->bound != NULL);
* Singleton fork(). Do not copy any running threads.
* ------------------------------------------------------------------------- */
-#if !defined(mingw32_HOST_OS) && !defined(SMP)
+#if !defined(mingw32_HOST_OS)
#define FORKPROCESS_PRIMOP_SUPPORTED
#endif
StgTSO* t,*next;
Capability *cap;
+#if defined(THREADED_RTS)
+ if (RtsFlags.ParFlags.nNodes > 1) {
+ errorBelch("forking not supported with +RTS -N<n> greater than 1");
+ stg_exit(EXIT_FAILURE);
+ }
+#endif
+
IF_DEBUG(scheduler,sched_belch("forking!"));
// ToDo: for SMP, we should probably acquire *all* the capabilities
}
RELEASE_LOCK(&sched_mutex);
+ cap->suspended_ccalling_tasks = NULL;
+
+#if defined(THREADED_RTS)
// wipe our spare workers list.
cap->spare_workers = NULL;
+ cap->returning_tasks_hd = NULL;
+ cap->returning_tasks_tl = NULL;
+#endif
cap = rts_evalStableIO(cap, entry, NULL); // run the action
rts_checkSchedStatus("forkProcess",cap);
// XXX this might not be necessary --SDM
tso->what_next = ThreadRunGHC;
- threadPaused(tso);
+ threadPaused(cap,tso);
if(tso->blocked_exceptions == NULL) {
tso->why_blocked = BlockedOnCCall;
cap->in_haskell = rtsTrue;
errno = saved_errno;
+ /* We might have GC'd, mark the TSO dirty again */
+ dirtyTSO(tso);
+
+ IF_DEBUG(sanity, checkTSO(tso));
+
return &cap->r;
}
tso->why_blocked = NotBlocked;
tso->blocked_exceptions = NULL;
+ tso->flags = TSO_DIRTY;
tso->saved_errno = 0;
tso->bound = NULL;
cap = schedule(cap,task);
ASSERT(task->stat != NoStatus);
- ASSERT_CAPABILITY_INVARIANTS(cap,task);
+ ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
IF_DEBUG(scheduler, sched_belch("bound thread (%d) finished", task->tso->id));
return cap;
/* A capability holds the state a native thread needs in
* order to execute STG code. At least one capability is
- * floating around (only SMP builds have more than one).
+ * floating around (only THREADED_RTS builds have more than one).
*/
initCapabilities();
initTaskManager();
-#if defined(SMP)
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
+ initSparkPools();
+#endif
+
+#if defined(THREADED_RTS)
/*
* Eagerly start one worker to run each Capability, except for
* Capability 0. The idea is that we're probably going to start a
}
#endif
-#if /* defined(SMP) ||*/ defined(PARALLEL_HASKELL)
- initSparkPools();
-#endif
-
RELEASE_LOCK(&sched_mutex);
}
}
#if !defined(THREADED_RTS)
- evac((StgClosure **)&blocked_queue_hd);
- evac((StgClosure **)&blocked_queue_tl);
- evac((StgClosure **)&sleeping_queue);
+ evac((StgClosure **)(void *)&blocked_queue_hd);
+ evac((StgClosure **)(void *)&blocked_queue_tl);
+ evac((StgClosure **)(void *)&sleeping_queue);
#endif
#endif
- evac((StgClosure **)&blackhole_queue);
+ // evac((StgClosure **)&blackhole_queue);
-#if defined(PARALLEL_HASKELL) || defined(GRAN)
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
markSparkQueue(evac);
#endif
static void (*extra_roots)(evac_fn);
+static void
+performGC_(rtsBool force_major, void (*get_roots)(evac_fn))
+{
+ Task *task = myTask();
+
+ if (task == NULL) {
+ ACQUIRE_LOCK(&sched_mutex);
+ task = newBoundTask();
+ RELEASE_LOCK(&sched_mutex);
+ scheduleDoGC(NULL,task,force_major, get_roots);
+ boundTaskExiting(task);
+ } else {
+ scheduleDoGC(NULL,task,force_major, get_roots);
+ }
+}
+
void
performGC(void)
{
-#ifdef THREADED_RTS
- // ToDo: we have to grab all the capabilities here.
- errorBelch("performGC not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
- /* Obligated to hold this lock upon entry */
- GarbageCollect(GetRoots,rtsFalse);
+ performGC_(rtsFalse, GetRoots);
}
void
performMajorGC(void)
{
-#ifdef THREADED_RTS
- errorBelch("performMayjorGC not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
- GarbageCollect(GetRoots,rtsTrue);
+ performGC_(rtsTrue, GetRoots);
}
static void
void
performGCWithRoots(void (*get_roots)(evac_fn))
{
-#ifdef THREADED_RTS
- errorBelch("performGCWithRoots not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
extra_roots = get_roots;
- GarbageCollect(AllRoots,rtsFalse);
+ performGC_(rtsFalse, AllRoots);
}
/* -----------------------------------------------------------------------------
* CATCH_FRAME on the stack. In either case, we strip the entire
* stack and replace the thread with a zombie.
*
- * ToDo: in SMP mode, this function is only safe if either (a) we hold
- * all the Capabilities (eg. in GC), or (b) we own the Capability that
- * the TSO is currently blocked on or on the run queue of.
+ * ToDo: in THREADED_RTS mode, this function is only safe if either
+ * (a) we hold all the Capabilities (eg. in GC, or if there is only
+ * one Capability), or (b) we own the Capability that the TSO is
+ * currently blocked on or on the run queue of.
*
* -------------------------------------------------------------------------- */
void
raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception)
{
- raiseAsync_(cap, tso, exception, rtsFalse);
+ raiseAsync_(cap, tso, exception, rtsFalse, NULL);
+}
+
+void
+suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here)
+{
+ raiseAsync_(cap, tso, NULL, rtsFalse, stop_here);
}
static void
raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
- rtsBool stop_at_atomically)
+ rtsBool stop_at_atomically, StgPtr stop_here)
{
StgRetInfoTable *info;
- StgPtr sp;
+ StgPtr sp, frame;
+ nat i;
// Thread already dead?
if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
// Remove it from any blocking queues
unblockThread(cap,tso);
+ // mark it dirty; we're about to change its stack.
+ dirtyTSO(tso);
+
sp = tso->sp;
// The stack freezing code assumes there's a closure pointer on
sp[0] = (W_)&stg_dummy_ret_closure;
}
- while (1) {
- nat i;
+ frame = sp + 1;
+ while (stop_here == NULL || frame < stop_here) {
// 1. Let the top of the stack be the "current closure"
//
// NB: if we pass an ATOMICALLY_FRAME then abort the associated
// transaction
-
- StgPtr frame;
-
- frame = sp + 1;
info = get_ret_itbl((StgClosure *)frame);
-
- while (info->i.type != UPDATE_FRAME
- && (info->i.type != CATCH_FRAME || exception == NULL)
- && info->i.type != STOP_FRAME
- && (info->i.type != ATOMICALLY_FRAME || stop_at_atomically == rtsFalse))
- {
- if (info->i.type == CATCH_RETRY_FRAME || info->i.type == ATOMICALLY_FRAME) {
- // IF we find an ATOMICALLY_FRAME then we abort the
- // current transaction and propagate the exception. In
- // this case (unlike ordinary exceptions) we do not care
- // whether the transaction is valid or not because its
- // possible validity cannot have caused the exception
- // and will not be visible after the abort.
- IF_DEBUG(stm,
- debugBelch("Found atomically block delivering async exception\n"));
- stmAbortTransaction(tso -> trec);
- tso -> trec = stmGetEnclosingTRec(tso -> trec);
- }
- frame += stack_frame_sizeW((StgClosure *)frame);
- info = get_ret_itbl((StgClosure *)frame);
- }
-
+
switch (info->i.type) {
-
- case ATOMICALLY_FRAME:
- ASSERT(stop_at_atomically);
- ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
- stmCondemnTransaction(tso -> trec);
-#ifdef REG_R1
- tso->sp = frame;
-#else
- // R1 is not a register: the return convention for IO in
- // this case puts the return value on the stack, so we
- // need to set up the stack to return to the atomically
- // frame properly...
- tso->sp = frame - 2;
- tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
- tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
-#endif
- tso->what_next = ThreadRunGHC;
- return;
- case CATCH_FRAME:
- // If we find a CATCH_FRAME, and we've got an exception to raise,
- // then build the THUNK raise(exception), and leave it on
- // top of the CATCH_FRAME ready to enter.
- //
- {
-#ifdef PROFILING
- StgCatchFrame *cf = (StgCatchFrame *)frame;
-#endif
- StgThunk *raise;
-
- // we've got an exception to raise, so let's pass it to the
- // handler in this frame.
- //
- raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+MIN_UPD_SIZE);
- TICK_ALLOC_SE_THK(1,0);
- SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
- raise->payload[0] = exception;
-
- // throw away the stack from Sp up to the CATCH_FRAME.
- //
- sp = frame - 1;
-
- /* Ensure that async excpetions are blocked now, so we don't get
- * a surprise exception before we get around to executing the
- * handler.
- */
- if (tso->blocked_exceptions == NULL) {
- tso->blocked_exceptions = END_TSO_QUEUE;
- }
-
- /* Put the newly-built THUNK on top of the stack, ready to execute
- * when the thread restarts.
- */
- sp[0] = (W_)raise;
- sp[-1] = (W_)&stg_enter_info;
- tso->sp = sp-1;
- tso->what_next = ThreadRunGHC;
- IF_DEBUG(sanity, checkTSO(tso));
- return;
- }
-
case UPDATE_FRAME:
{
StgAP_STACK * ap;
printObj((StgClosure *)ap);
);
- // Replace the updatee with an indirection - happily
- // this will also wake up any threads currently
- // waiting on the result.
+ // Replace the updatee with an indirection
//
// Warning: if we're in a loop, more than one update frame on
// the stack may point to the same object. Be careful not to
}
sp += sizeofW(StgUpdateFrame) - 1;
sp[0] = (W_)ap; // push onto stack
- break;
+ frame = sp + 1;
+ continue; //no need to bump frame
}
-
+
case STOP_FRAME:
// We've stripped the entire stack, the thread is now dead.
- sp += sizeofW(StgStopFrame);
tso->what_next = ThreadKilled;
- tso->sp = sp;
+ tso->sp = frame + sizeofW(StgStopFrame);
return;
+
+ case CATCH_FRAME:
+ // If we find a CATCH_FRAME, and we've got an exception to raise,
+ // then build the THUNK raise(exception), and leave it on
+ // top of the CATCH_FRAME ready to enter.
+ //
+ {
+#ifdef PROFILING
+ StgCatchFrame *cf = (StgCatchFrame *)frame;
+#endif
+ StgThunk *raise;
+
+ if (exception == NULL) break;
+
+ // we've got an exception to raise, so let's pass it to the
+ // handler in this frame.
+ //
+ raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
+ TICK_ALLOC_SE_THK(1,0);
+ SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
+ raise->payload[0] = exception;
+
+ // throw away the stack from Sp up to the CATCH_FRAME.
+ //
+ sp = frame - 1;
+
+ /* Ensure that async excpetions are blocked now, so we don't get
+ * a surprise exception before we get around to executing the
+ * handler.
+ */
+ if (tso->blocked_exceptions == NULL) {
+ tso->blocked_exceptions = END_TSO_QUEUE;
+ }
+
+ /* Put the newly-built THUNK on top of the stack, ready to execute
+ * when the thread restarts.
+ */
+ sp[0] = (W_)raise;
+ sp[-1] = (W_)&stg_enter_info;
+ tso->sp = sp-1;
+ tso->what_next = ThreadRunGHC;
+ IF_DEBUG(sanity, checkTSO(tso));
+ return;
+ }
+
+ case ATOMICALLY_FRAME:
+ if (stop_at_atomically) {
+ ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
+ stmCondemnTransaction(cap, tso -> trec);
+#ifdef REG_R1
+ tso->sp = frame;
+#else
+ // R1 is not a register: the return convention for IO in
+ // this case puts the return value on the stack, so we
+ // need to set up the stack to return to the atomically
+ // frame properly...
+ tso->sp = frame - 2;
+ tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
+ tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
+#endif
+ tso->what_next = ThreadRunGHC;
+ return;
+ }
+ // Not stop_at_atomically... fall through and abort the
+ // transaction.
+
+ case CATCH_RETRY_FRAME:
+ // IF we find an ATOMICALLY_FRAME then we abort the
+ // current transaction and propagate the exception. In
+ // this case (unlike ordinary exceptions) we do not care
+ // whether the transaction is valid or not because its
+ // possible validity cannot have caused the exception
+ // and will not be visible after the abort.
+ IF_DEBUG(stm,
+ debugBelch("Found atomically block delivering async exception\n"));
+ StgTRecHeader *trec = tso -> trec;
+ StgTRecHeader *outer = stmGetEnclosingTRec(trec);
+ stmAbortTransaction(cap, trec);
+ tso -> trec = outer;
+ break;
default:
- barf("raiseAsync");
+ break;
}
+
+ // move on to the next stack frame
+ frame += stack_frame_sizeW((StgClosure *)frame);
}
- barf("raiseAsync");
+
+ // if we got here, then we stopped at stop_here
+ ASSERT(stop_here != NULL);
}
/* -----------------------------------------------------------------------------
// thunks which are currently under evaluataion.
//
- //
+ // OLD COMMENT (we don't have MIN_UPD_SIZE now):
// LDV profiling: stg_raise_info has THUNK as its closure
// type. Since a THUNK takes at least MIN_UPD_SIZE words in its
// payload, MIN_UPD_SIZE is more approprate than 1. It seems that
// Only create raise_closure if we need to.
if (raise_closure == NULL) {
raise_closure =
- (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+MIN_UPD_SIZE);
+ (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
SET_HDR(raise_closure, &stg_raise_info, CCCS);
raise_closure->payload[0] = exception;
}
}
}
+ debugBelch("other threads:\n");
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->why_blocked != NotBlocked) {
printThreadStatus(t);