X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2FRaiseAsync.c;h=a0f78eeff728d517c7c3f41ea844e8ece11d7d47;hp=74e9dba635147e47ed8445c72c229583538cf6b4;hb=0856ac59cfb455d32a3042317fdba0f5e85cab9c;hpb=ce76b92f22b677fd6d978751be811d34fa141c84 diff --git a/rts/RaiseAsync.c b/rts/RaiseAsync.c index 74e9dba..a0f78ee 100644 --- a/rts/RaiseAsync.c +++ b/rts/RaiseAsync.c @@ -8,12 +8,12 @@ #include "PosixSource.h" #include "Rts.h" + +#include "sm/Storage.h" #include "Threads.h" #include "Trace.h" #include "RaiseAsync.h" -#include "SMP.h" #include "Schedule.h" -#include "LdvProfile.h" #include "Updates.h" #include "STM.h" #include "Sanity.h" @@ -26,7 +26,7 @@ static void raiseAsync (Capability *cap, StgTSO *tso, StgClosure *exception, rtsBool stop_at_atomically, - StgPtr stop_here); + StgUpdateFrame *stop_here); static void removeFromQueues(Capability *cap, StgTSO *tso); @@ -55,12 +55,12 @@ static void performBlockedException (Capability *cap, void throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception) { - throwToSingleThreaded_(cap, tso, exception, rtsFalse, NULL); + throwToSingleThreaded_(cap, tso, exception, rtsFalse); } void throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception, - rtsBool stop_at_atomically, StgPtr stop_here) + rtsBool stop_at_atomically) { // Thread already dead? if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) { @@ -70,11 +70,11 @@ throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception, // Remove it from any blocking queues removeFromQueues(cap,tso); - raiseAsync(cap, tso, exception, stop_at_atomically, stop_here); + raiseAsync(cap, tso, exception, stop_at_atomically, NULL); } void -suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here) +suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here) { // Thread already dead? if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) { @@ -161,11 +161,7 @@ throwTo (Capability *cap, // the Capability we hold (unsigned long)source->id, (unsigned long)target->id); #ifdef DEBUG - if (traceClass(DEBUG_sched)) { - debugTraceBegin("throwTo: target"); - printThreadStatus(target); - debugTraceEnd(); - } + traceThreadStatus(DEBUG_sched, target); #endif goto check_target; @@ -264,6 +260,15 @@ check_target: target = target->_link; goto retry; } + // check again for ThreadComplete and ThreadKilled. This + // cooperates with scheduleHandleThreadFinished to ensure + // that we never miss any threads that are throwing an + // exception to a thread in the process of terminating. + if (target->what_next == ThreadComplete + || target->what_next == ThreadKilled) { + unlockTSO(target); + return THROWTO_SUCCESS; + } blockedThrowTo(cap,source,target); *out = target; return THROWTO_BLOCKED; @@ -415,6 +420,7 @@ check_target: // Unblocking BlockedOnSTM threads requires the TSO to be // locked; see STM.c:unpark_tso(). if (target->why_blocked != BlockedOnSTM) { + unlockTSO(target); goto retry; } if ((target->flags & TSO_BLOCKEX) && @@ -436,6 +442,11 @@ check_target: // thread is blocking exceptions, and block on its // blocked_exception queue. lockTSO(target); + if (target->why_blocked != BlockedOnCCall && + target->why_blocked != BlockedOnCCall_NoUnblockExc) { + unlockTSO(target); + goto retry; + } blockedThrowTo(cap,source,target); *out = target; return THROWTO_BLOCKED; @@ -512,6 +523,15 @@ maybePerformBlockedException (Capability *cap, StgTSO *tso) { StgTSO *source; + if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) { + if (tso->blocked_exceptions != END_TSO_QUEUE) { + awakenBlockedExceptionQueue(cap,tso); + return 1; + } else { + return 0; + } + } + if (tso->blocked_exceptions != END_TSO_QUEUE && (tso->flags & TSO_BLOCKEX) != 0) { debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id); @@ -543,15 +563,16 @@ maybePerformBlockedException (Capability *cap, StgTSO *tso) return 0; } +// awakenBlockedExceptionQueue(): Just wake up the whole queue of +// blocked exceptions and let them try again. + void awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso) { - if (tso->blocked_exceptions != END_TSO_QUEUE) { - lockTSO(tso); - awakenBlockedQueue(cap, tso->blocked_exceptions); - tso->blocked_exceptions = END_TSO_QUEUE; - unlockTSO(tso); - } + lockTSO(tso); + awakenBlockedQueue(cap, tso->blocked_exceptions); + tso->blocked_exceptions = END_TSO_QUEUE; + unlockTSO(tso); } static void @@ -576,12 +597,8 @@ performBlockedException (Capability *cap, StgTSO *source, StgTSO *target) This is for use when we raise an exception in another thread, which may be blocked. - Precondition: we have exclusive access to the TSO, which entails - holding a lock on the object that owns the queue, if the TSO is - blocked. e.g. if the thread is blocked on an MVar, we must hold a - lock on the MVar before calling removeFromQueues(). - - This has nothing to do with the UnblockThread event in GranSim. -- HWL + Precondition: we have exclusive access to the TSO, via the same set + of conditions as throwToSingleThreaded() (c.f.). -------------------------------------------------------------------------- */ static void @@ -606,9 +623,6 @@ removeFromQueues(Capability *cap, StgTSO *tso) goto done; case BlockedOnBlackHole: - // we have exclusive access to this TSO, which implies that we - // must hold sched_mutex: - ASSERT_LOCK_HELD(&sched_mutex); removeThreadFromQueue(cap, &blackhole_queue, tso); goto done; @@ -654,16 +668,7 @@ removeFromQueues(Capability *cap, StgTSO *tso) } done: - tso->_link = END_TSO_QUEUE; // no write barrier reqd - tso->why_blocked = NotBlocked; - tso->block_info.closure = NULL; - appendToRunQueue(cap,tso); - - // We might have just migrated this TSO to our Capability: - if (tso->bound) { - tso->bound->cap = cap; - } - tso->cap = cap; + unblockOne(cap, tso); } /* ----------------------------------------------------------------------------- @@ -705,10 +710,11 @@ removeFromQueues(Capability *cap, StgTSO *tso) static void raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, - rtsBool stop_at_atomically, StgPtr stop_here) + rtsBool stop_at_atomically, StgUpdateFrame *stop_here) { StgRetInfoTable *info; StgPtr sp, frame; + StgClosure *updatee; nat i; debugTrace(DEBUG_sched, @@ -717,7 +723,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, #if defined(PROFILING) /* * Debugging tool: on raising an exception, show where we are. - * See also Exception.cmm:raisezh_fast. + * See also Exception.cmm:stg_raisezh. * This wasn't done for asynchronous exceptions originally; see #1450 */ if (RtsFlags.ProfFlags.showCCSOnException) @@ -735,6 +741,12 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, // layers should deal with that. ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled); + if (stop_here != NULL) { + updatee = stop_here->updatee; + } else { + updatee = NULL; + } + // The stack freezing code assumes there's a closure pointer on // the top of the stack, so we have to arrange that this is the case... // @@ -746,7 +758,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, } frame = sp + 1; - while (stop_here == NULL || frame < stop_here) { + while (stop_here == NULL || frame < (StgPtr)stop_here) { // 1. Let the top of the stack be the "current closure" // @@ -800,11 +812,19 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, // printObj((StgClosure *)ap); // ); - // Perform the update - // TODO: this may waste some work, if the thunk has - // already been updated by another thread. - UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee, - (StgClosure *)ap); + if (((StgUpdateFrame *)frame)->updatee == updatee) { + // If this update frame points to the same closure as + // the update frame further down the stack + // (stop_here), then don't perform the update. We + // want to keep the blackhole in this case, so we can + // detect and report the loop (#2783). + ap = (StgAP_STACK*)updatee; + } else { + // Perform the update + // TODO: this may waste some work, if the thunk has + // already been updated by another thread. + UPD_IND(((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap); + } sp += sizeofW(StgUpdateFrame) - 1; sp[0] = (W_)ap; // push onto stack @@ -864,9 +884,19 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, case ATOMICALLY_FRAME: if (stop_at_atomically) { - ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC); + ASSERT(tso->trec->enclosing_trec == NO_TREC); stmCondemnTransaction(cap, tso -> trec); - tso->sp = frame; + tso->sp = frame - 2; + // The ATOMICALLY_FRAME expects to be returned a + // result from the transaction, which it stores in the + // stack frame. Hence we arrange to return a dummy + // result, so that the GC doesn't get upset (#3578). + // Perhaps a better way would be to have a different + // ATOMICALLY_FRAME instance for condemned + // transactions, but I don't fully understand the + // interaction with STM invariants. + tso->sp[1] = (W_)&stg_NO_TREC_closure; + tso->sp[0] = (W_)&stg_gc_unpt_r1_info; tso->what_next = ThreadRunGHC; return; } @@ -884,7 +914,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception, { StgTRecHeader *trec = tso -> trec; - StgTRecHeader *outer = stmGetEnclosingTRec(trec); + StgTRecHeader *outer = trec -> enclosing_trec; debugTrace(DEBUG_stm, "found atomically block delivering async exception"); stmAbortTransaction(cap, trec);