#include "PosixSource.h"
#include "Rts.h"
+
+#include "sm/Storage.h"
#include "Threads.h"
#include "Trace.h"
#include "RaiseAsync.h"
-#include "SMP.h"
#include "Schedule.h"
-#include "LdvProfile.h"
#include "Updates.h"
#include "STM.h"
-#include "Sanity.h"
+#include "sm/Sanity.h"
#include "Profiling.h"
#if defined(mingw32_HOST_OS)
#include "win32/IOManager.h"
StgTSO *tso,
StgClosure *exception,
rtsBool stop_at_atomically,
- StgPtr stop_here);
+ StgUpdateFrame *stop_here);
static void removeFromQueues(Capability *cap, StgTSO *tso);
void
throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
{
- throwToSingleThreaded_(cap, tso, exception, rtsFalse, NULL);
+ throwToSingleThreaded_(cap, tso, exception, rtsFalse);
}
void
throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
- rtsBool stop_at_atomically, StgPtr stop_here)
+ rtsBool stop_at_atomically)
{
// Thread already dead?
if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
// Remove it from any blocking queues
removeFromQueues(cap,tso);
- raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
+ raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
}
void
-suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here)
+suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
{
// Thread already dead?
if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
nat
throwTo (Capability *cap, // the Capability we hold
- StgTSO *source, // the TSO sending the exception
+ StgTSO *source, // the TSO sending the exception (or NULL)
StgTSO *target, // the TSO receiving the exception
StgClosure *exception, // the exception closure
/*[out]*/ void **out USED_IF_THREADS)
{
StgWord status;
+ ASSERT(target != END_TSO_QUEUE);
+
// follow ThreadRelocated links in the target first
while (target->what_next == ThreadRelocated) {
target = target->_link;
// ASSERT(get_itbl(target)->type == TSO);
}
- debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
- (unsigned long)source->id, (unsigned long)target->id);
+ if (source != NULL) {
+ debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
+ (unsigned long)source->id, (unsigned long)target->id);
+ } else {
+ debugTrace(DEBUG_sched, "throwTo: from RTS to thread %lu",
+ (unsigned long)target->id);
+ }
#ifdef DEBUG
- if (traceClass(DEBUG_sched)) {
- debugTraceBegin("throwTo: target");
- printThreadStatus(target);
- debugTraceEnd();
- }
+ traceThreadStatus(DEBUG_sched, target);
#endif
goto check_target;
debugTrace(DEBUG_sched, "throwTo: retrying...");
check_target:
+ ASSERT(target != END_TSO_QUEUE);
+
// Thread already dead?
if (target->what_next == ThreadComplete
|| target->what_next == ThreadKilled) {
target = target->_link;
goto retry;
}
+ // check again for ThreadComplete and ThreadKilled. This
+ // cooperates with scheduleHandleThreadFinished to ensure
+ // that we never miss any threads that are throwing an
+ // exception to a thread in the process of terminating.
+ if (target->what_next == ThreadComplete
+ || target->what_next == ThreadKilled) {
+ unlockTSO(target);
+ return THROWTO_SUCCESS;
+ }
blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
// Unblocking BlockedOnSTM threads requires the TSO to be
// locked; see STM.c:unpark_tso().
if (target->why_blocked != BlockedOnSTM) {
+ unlockTSO(target);
goto retry;
}
if ((target->flags & TSO_BLOCKEX) &&
// thread is blocking exceptions, and block on its
// blocked_exception queue.
lockTSO(target);
+ if (target->why_blocked != BlockedOnCCall &&
+ target->why_blocked != BlockedOnCCall_NoUnblockExc) {
+ unlockTSO(target);
+ goto retry;
+ }
blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
static void
blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
{
- debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
- setTSOLink(cap, source, target->blocked_exceptions);
- target->blocked_exceptions = source;
- dirty_TSO(cap,target); // we modified the blocked_exceptions queue
-
- source->block_info.tso = target;
- write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
- source->why_blocked = BlockedOnException;
+ if (source != NULL) {
+ debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
+ setTSOLink(cap, source, target->blocked_exceptions);
+ target->blocked_exceptions = source;
+ dirty_TSO(cap,target); // we modified the blocked_exceptions queue
+
+ source->block_info.tso = target;
+ write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
+ source->why_blocked = BlockedOnException;
+ }
}
{
StgTSO *source;
+ if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
+ if (tso->blocked_exceptions != END_TSO_QUEUE) {
+ awakenBlockedExceptionQueue(cap,tso);
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
if (tso->blocked_exceptions != END_TSO_QUEUE &&
(tso->flags & TSO_BLOCKEX) != 0) {
debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
return 0;
}
+// awakenBlockedExceptionQueue(): Just wake up the whole queue of
+// blocked exceptions and let them try again.
+
void
awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
{
- if (tso->blocked_exceptions != END_TSO_QUEUE) {
- lockTSO(tso);
- awakenBlockedQueue(cap, tso->blocked_exceptions);
- tso->blocked_exceptions = END_TSO_QUEUE;
- unlockTSO(tso);
- }
+ lockTSO(tso);
+ awakenBlockedQueue(cap, tso->blocked_exceptions);
+ tso->blocked_exceptions = END_TSO_QUEUE;
+ unlockTSO(tso);
}
static void
}
done:
- tso->_link = END_TSO_QUEUE; // no write barrier reqd
- tso->why_blocked = NotBlocked;
- tso->block_info.closure = NULL;
- appendToRunQueue(cap,tso);
-
- // We might have just migrated this TSO to our Capability:
- if (tso->bound) {
- tso->bound->cap = cap;
- }
- tso->cap = cap;
+ unblockOne(cap, tso);
}
/* -----------------------------------------------------------------------------
static void
raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
- rtsBool stop_at_atomically, StgPtr stop_here)
+ rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
{
StgRetInfoTable *info;
StgPtr sp, frame;
+ StgClosure *updatee;
nat i;
debugTrace(DEBUG_sched,
#if defined(PROFILING)
/*
* Debugging tool: on raising an exception, show where we are.
- * See also Exception.cmm:raisezh_fast.
+ * See also Exception.cmm:stg_raisezh.
* This wasn't done for asynchronous exceptions originally; see #1450
*/
if (RtsFlags.ProfFlags.showCCSOnException)
// layers should deal with that.
ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
+ if (stop_here != NULL) {
+ updatee = stop_here->updatee;
+ } else {
+ updatee = NULL;
+ }
+
// The stack freezing code assumes there's a closure pointer on
// the top of the stack, so we have to arrange that this is the case...
//
}
frame = sp + 1;
- while (stop_here == NULL || frame < stop_here) {
+ while (stop_here == NULL || frame < (StgPtr)stop_here) {
// 1. Let the top of the stack be the "current closure"
//
// fun field.
//
words = frame - sp - 1;
- ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
+ ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
ap->size = words;
ap->fun = (StgClosure *)sp[0];
// printObj((StgClosure *)ap);
// );
- // Perform the update
- // TODO: this may waste some work, if the thunk has
- // already been updated by another thread.
- UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
- (StgClosure *)ap);
+ if (((StgUpdateFrame *)frame)->updatee == updatee) {
+ // If this update frame points to the same closure as
+ // the update frame further down the stack
+ // (stop_here), then don't perform the update. We
+ // want to keep the blackhole in this case, so we can
+ // detect and report the loop (#2783).
+ ap = (StgAP_STACK*)updatee;
+ } else {
+ // Perform the update
+ // TODO: this may waste some work, if the thunk has
+ // already been updated by another thread.
+ UPD_IND(cap, ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
+ }
sp += sizeofW(StgUpdateFrame) - 1;
sp[0] = (W_)ap; // push onto stack
// we've got an exception to raise, so let's pass it to the
// handler in this frame.
//
- raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
+ raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
TICK_ALLOC_SE_THK(1,0);
SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
raise->payload[0] = exception;
case ATOMICALLY_FRAME:
if (stop_at_atomically) {
- ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
+ ASSERT(tso->trec->enclosing_trec == NO_TREC);
stmCondemnTransaction(cap, tso -> trec);
- tso->sp = frame;
+ tso->sp = frame - 2;
+ // The ATOMICALLY_FRAME expects to be returned a
+ // result from the transaction, which it stores in the
+ // stack frame. Hence we arrange to return a dummy
+ // result, so that the GC doesn't get upset (#3578).
+ // Perhaps a better way would be to have a different
+ // ATOMICALLY_FRAME instance for condemned
+ // transactions, but I don't fully understand the
+ // interaction with STM invariants.
+ tso->sp[1] = (W_)&stg_NO_TREC_closure;
+ tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
tso->what_next = ThreadRunGHC;
return;
}
{
StgTRecHeader *trec = tso -> trec;
- StgTRecHeader *outer = stmGetEnclosingTRec(trec);
+ StgTRecHeader *outer = trec -> enclosing_trec;
debugTrace(DEBUG_stm,
"found atomically block delivering async exception");
stmAbortTransaction(cap, trec);