1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
12 #include "sm/Storage.h"
15 #include "RaiseAsync.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
26 static void raiseAsync (Capability *cap,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
34 static void blockedThrowTo (Capability *cap,
35 StgTSO *target, MessageThrowTo *msg);
37 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
38 Capability *target_cap USED_IF_THREADS,
39 MessageThrowTo *msg USED_IF_THREADS);
41 /* -----------------------------------------------------------------------------
44 This version of throwTo is safe to use if and only if one of the
49 - all the other threads in the system are stopped (eg. during GC).
51 - we surely own the target TSO (eg. we just took it from the
52 run queue of the current capability, or we are running it).
54 It doesn't cater for blocking the source thread until the exception
56 -------------------------------------------------------------------------- */
59 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
61 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
65 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
66 rtsBool stop_at_atomically)
70 // Thread already dead?
71 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
75 // Remove it from any blocking queues
76 removeFromQueues(cap,tso);
78 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
82 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
86 // Thread already dead?
87 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
91 // Remove it from any blocking queues
92 removeFromQueues(cap,tso);
94 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
97 /* -----------------------------------------------------------------------------
100 This function may be used to throw an exception from one thread to
101 another, during the course of normal execution. This is a tricky
102 task: the target thread might be running on another CPU, or it
103 may be blocked and could be woken up at any point by another CPU.
104 We have some delicate synchronisation to do.
106 The underlying scheme when multiple Capabilities are in use is
107 message passing: when the target of a throwTo is on another
108 Capability, we send a message (a MessageThrowTo closure) to that
111 If the throwTo needs to block because the target TSO is masking
112 exceptions (the TSO_BLOCKEX flag), then the message is placed on
113 the blocked_exceptions queue attached to the target TSO. When the
114 target TSO enters the unmasked state again, it must check the
115 queue. The blocked_exceptions queue is not locked; only the
116 Capability owning the TSO may modify it.
118 To make things simpler for throwTo, we always create the message
119 first before deciding what to do. The message may get sent, or it
120 may get attached to a TSO's blocked_exceptions queue, or the
121 exception may get thrown immediately and the message dropped,
122 depending on the current state of the target.
124 Currently we send a message if the target belongs to another
125 Capability, and it is
127 - NotBlocked, BlockedOnMsgWakeup, BlockedOnMsgThrowTo,
130 - or it is masking exceptions (TSO_BLOCKEX)
132 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
133 BlockedOnBlackHole then we acquire ownership of the TSO by locking
134 its parent container (e.g. the MVar) and then raise the exception.
135 We might change these cases to be more message-passing-like in the
140 NULL exception was raised, ok to continue
142 MessageThrowTo * exception was not raised; the source TSO
143 should now put itself in the state
144 BlockedOnMsgThrowTo, and when it is ready
145 it should unlock the mssage using
146 unlockClosure(msg, &stg_MSG_THROWTO_info);
147 If it decides not to raise the exception after
148 all, it can revoke it safely with
149 unlockClosure(msg, &stg_MSG_NULL_info);
151 -------------------------------------------------------------------------- */
154 throwTo (Capability *cap, // the Capability we hold
155 StgTSO *source, // the TSO sending the exception (or NULL)
156 StgTSO *target, // the TSO receiving the exception
157 StgClosure *exception) // the exception closure
161 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
162 // message starts locked; the caller has to unlock it when it is
164 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
165 msg->source = source;
166 msg->target = target;
167 msg->exception = exception;
169 switch (throwToMsg(cap, msg))
171 case THROWTO_SUCCESS:
173 case THROWTO_BLOCKED:
181 throwToMsg (Capability *cap, MessageThrowTo *msg)
184 StgTSO *target = msg->target;
185 Capability *target_cap;
191 debugTrace(DEBUG_sched, "throwTo: retrying...");
194 ASSERT(target != END_TSO_QUEUE);
196 // follow ThreadRelocated links in the target first
197 target = deRefTSO(target);
199 // Thread already dead?
200 if (target->what_next == ThreadComplete
201 || target->what_next == ThreadKilled) {
202 return THROWTO_SUCCESS;
205 debugTraceCap(DEBUG_sched, cap,
206 "throwTo: from thread %lu to thread %lu",
207 (unsigned long)msg->source->id,
208 (unsigned long)msg->target->id);
211 traceThreadStatus(DEBUG_sched, target);
214 target_cap = target->cap;
215 if (target->cap != cap) {
216 throwToSendMsg(cap, target_cap, msg);
217 return THROWTO_BLOCKED;
220 status = target->why_blocked;
224 case BlockedOnMsgWakeup:
225 /* if status==NotBlocked, and target->cap == cap, then
226 we own this TSO and can raise the exception.
228 How do we establish this condition? Very carefully.
231 P = (status == NotBlocked)
232 Q = (tso->cap == cap)
234 Now, if P & Q are true, then the TSO is locked and owned by
235 this capability. No other OS thread can steal it.
237 If P==0 and Q==1: the TSO is blocked, but attached to this
238 capabilty, and it can be stolen by another capability.
240 If P==1 and Q==0: the TSO is runnable on another
241 capability. At any time, the TSO may change from runnable
242 to blocked and vice versa, while it remains owned by
245 Suppose we test like this:
251 this is defeated by another capability stealing a blocked
252 TSO from us to wake it up (Schedule.c:unblockOne()). The
253 other thread is doing
258 assuming arbitrary reordering, we could see this
268 so we need a memory barrier:
275 this avoids the problematic case. There are other cases
276 to consider, but this is the tricky one.
278 Note that we must be sure that unblockOne() does the
279 writes in the correct order: Q before P. The memory
280 barrier ensures that if we have seen the write to P, we
281 have also seen the write to Q.
285 if ((target->flags & TSO_BLOCKEX) == 0) {
286 // It's on our run queue and not blocking exceptions
287 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
288 return THROWTO_SUCCESS;
290 blockedThrowTo(cap,target,msg);
291 return THROWTO_BLOCKED;
295 case BlockedOnMsgThrowTo:
297 const StgInfoTable *i;
300 m = target->block_info.throwto;
302 // target is local to this cap, but has sent a throwto
303 // message to another cap.
305 // The source message is locked. We need to revoke the
306 // target's message so that we can raise the exception, so
307 // we attempt to lock it.
309 // There's a possibility of a deadlock if two threads are both
310 // trying to throwTo each other (or more generally, a cycle of
311 // threads). To break the symmetry we compare the addresses
312 // of the MessageThrowTo objects, and the one for which m <
313 // msg gets to spin, while the other can only try to lock
314 // once, but must then back off and unlock both before trying
317 i = lockClosure((StgClosure *)m);
319 i = tryLockClosure((StgClosure *)m);
321 // debugBelch("collision\n");
322 throwToSendMsg(cap, target->cap, msg);
323 return THROWTO_BLOCKED;
327 if (i == &stg_MSG_NULL_info) {
328 // we know there's a MSG_TRY_WAKEUP on the way, so we
329 // might as well just do it now. The message will
330 // be a no-op when it arrives.
331 unlockClosure((StgClosure*)m, i);
332 tryWakeupThread(cap, target);
336 if (i != &stg_MSG_THROWTO_info) {
337 // if it's a MSG_NULL, this TSO has been woken up by another Cap
338 unlockClosure((StgClosure*)m, i);
342 if ((target->flags & TSO_BLOCKEX) &&
343 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
344 unlockClosure((StgClosure*)m, i);
345 blockedThrowTo(cap,target,msg);
346 return THROWTO_BLOCKED;
349 // nobody else can wake up this TSO after we claim the message
350 unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
352 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
353 return THROWTO_SUCCESS;
359 To establish ownership of this TSO, we need to acquire a
360 lock on the MVar that it is blocked on.
363 StgInfoTable *info USED_IF_THREADS;
365 mvar = (StgMVar *)target->block_info.closure;
367 // ASSUMPTION: tso->block_info must always point to a
368 // closure. In the threaded RTS it does.
369 switch (get_itbl(mvar)->type) {
377 info = lockClosure((StgClosure *)mvar);
379 if (target->what_next == ThreadRelocated) {
380 target = target->_link;
381 unlockClosure((StgClosure *)mvar,info);
384 // we have the MVar, let's check whether the thread
385 // is still blocked on the same MVar.
386 if (target->why_blocked != BlockedOnMVar
387 || (StgMVar *)target->block_info.closure != mvar) {
388 unlockClosure((StgClosure *)mvar, info);
392 if ((target->flags & TSO_BLOCKEX) &&
393 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
394 blockedThrowTo(cap,target,msg);
395 unlockClosure((StgClosure *)mvar, info);
396 return THROWTO_BLOCKED;
398 removeThreadFromMVarQueue(cap, mvar, target);
399 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
400 if (info == &stg_MVAR_CLEAN_info) {
401 dirty_MVAR(&cap->r,(StgClosure*)mvar);
403 unlockClosure((StgClosure *)mvar, &stg_MVAR_DIRTY_info);
404 return THROWTO_SUCCESS;
408 case BlockedOnBlackHole:
410 // Revoke the message by replacing it with IND. We're not
411 // locking anything here, so we might still get a TRY_WAKEUP
412 // message from the owner of the blackhole some time in the
413 // future, but that doesn't matter.
414 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
415 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
416 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
417 return THROWTO_SUCCESS;
422 // Unblocking BlockedOnSTM threads requires the TSO to be
423 // locked; see STM.c:unpark_tso().
424 if (target->why_blocked != BlockedOnSTM) {
428 if ((target->flags & TSO_BLOCKEX) &&
429 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
430 blockedThrowTo(cap,target,msg);
432 return THROWTO_BLOCKED;
434 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
436 return THROWTO_SUCCESS;
440 case BlockedOnCCall_NoUnblockExc:
441 blockedThrowTo(cap,target,msg);
442 return THROWTO_BLOCKED;
444 #ifndef THREADEDED_RTS
448 #if defined(mingw32_HOST_OS)
449 case BlockedOnDoProc:
451 if ((target->flags & TSO_BLOCKEX) &&
452 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
453 blockedThrowTo(cap,target,msg);
454 return THROWTO_BLOCKED;
456 removeFromQueues(cap,target);
457 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
458 return THROWTO_SUCCESS;
463 barf("throwTo: unrecognised why_blocked value");
469 throwToSendMsg (Capability *cap STG_UNUSED,
470 Capability *target_cap USED_IF_THREADS,
471 MessageThrowTo *msg USED_IF_THREADS)
475 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
477 sendMessage(cap, target_cap, (Message*)msg);
481 // Block a throwTo message on the target TSO's blocked_exceptions
482 // queue. The current Capability must own the target TSO in order to
483 // modify the blocked_exceptions queue.
485 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
487 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
488 (unsigned long)target->id);
490 ASSERT(target->cap == cap);
492 msg->link = target->blocked_exceptions;
493 target->blocked_exceptions = msg;
494 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
497 /* -----------------------------------------------------------------------------
498 Waking up threads blocked in throwTo
500 There are two ways to do this: maybePerformBlockedException() will
501 perform the throwTo() for the thread at the head of the queue
502 immediately, and leave the other threads on the queue.
503 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
504 before raising an exception.
506 awakenBlockedExceptionQueue() will wake up all the threads in the
507 queue, but not perform any throwTo() immediately. This might be
508 more appropriate when the target thread is the one actually running
511 Returns: non-zero if an exception was raised, zero otherwise.
512 -------------------------------------------------------------------------- */
515 maybePerformBlockedException (Capability *cap, StgTSO *tso)
518 const StgInfoTable *i;
520 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
521 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
522 awakenBlockedExceptionQueue(cap,tso);
529 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
530 (tso->flags & TSO_BLOCKEX) != 0) {
531 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
534 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
535 && ((tso->flags & TSO_BLOCKEX) == 0
536 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
538 // We unblock just the first thread on the queue, and perform
539 // its throw immediately.
541 msg = tso->blocked_exceptions;
542 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
543 i = lockClosure((StgClosure*)msg);
544 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
545 if (i == &stg_MSG_NULL_info) {
546 unlockClosure((StgClosure*)msg,i);
550 throwToSingleThreaded(cap, msg->target, msg->exception);
551 unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
552 tryWakeupThread(cap, msg->source);
558 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
559 // blocked exceptions.
562 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
565 const StgInfoTable *i;
567 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
568 msg = (MessageThrowTo*)msg->link) {
569 i = lockClosure((StgClosure *)msg);
570 if (i != &stg_MSG_NULL_info) {
571 unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
572 tryWakeupThread(cap, msg->source);
574 unlockClosure((StgClosure *)msg,i);
577 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
580 /* -----------------------------------------------------------------------------
581 Remove a thread from blocking queues.
583 This is for use when we raise an exception in another thread, which
586 Precondition: we have exclusive access to the TSO, via the same set
587 of conditions as throwToSingleThreaded() (c.f.).
588 -------------------------------------------------------------------------- */
591 removeFromQueues(Capability *cap, StgTSO *tso)
593 switch (tso->why_blocked) {
599 // Be careful: nothing to do here! We tell the scheduler that the
600 // thread is runnable and we leave it to the stack-walking code to
601 // abort the transaction while unwinding the stack. We should
602 // perhaps have a debugging test to make sure that this really
603 // happens and that the 'zombie' transaction does not get
608 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
609 // we aren't doing a write barrier here: the MVar is supposed to
610 // be already locked, so replacing the info pointer would unlock it.
613 case BlockedOnBlackHole:
617 case BlockedOnMsgWakeup:
619 // kill the message, atomically:
620 OVERWRITE_INFO(tso->block_info.wakeup, &stg_IND_info);
624 case BlockedOnMsgThrowTo:
626 MessageThrowTo *m = tso->block_info.throwto;
627 // The message is locked by us, unless we got here via
628 // deleteAllThreads(), in which case we own all the
630 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
632 // unlock and revoke it at the same time
633 unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
637 #if !defined(THREADED_RTS)
640 #if defined(mingw32_HOST_OS)
641 case BlockedOnDoProc:
643 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
644 #if defined(mingw32_HOST_OS)
645 /* (Cooperatively) signal that the worker thread should abort
648 abandonWorkRequest(tso->block_info.async_result->reqID);
653 removeThreadFromQueue(cap, &sleeping_queue, tso);
658 barf("removeFromQueues: %d", tso->why_blocked);
662 unblockOne(cap, tso);
665 /* -----------------------------------------------------------------------------
668 * The following function implements the magic for raising an
669 * asynchronous exception in an existing thread.
671 * We first remove the thread from any queue on which it might be
672 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
673 * TSO blocked_exception queues.
675 * We strip the stack down to the innermost CATCH_FRAME, building
676 * thunks in the heap for all the active computations, so they can
677 * be restarted if necessary. When we reach a CATCH_FRAME, we build
678 * an application of the handler to the exception, and push it on
679 * the top of the stack.
681 * How exactly do we save all the active computations? We create an
682 * AP_STACK for every UpdateFrame on the stack. Entering one of these
683 * AP_STACKs pushes everything from the corresponding update frame
684 * upwards onto the stack. (Actually, it pushes everything up to the
685 * next update frame plus a pointer to the next AP_STACK object.
686 * Entering the next AP_STACK object pushes more onto the stack until we
687 * reach the last AP_STACK object - at which point the stack should look
688 * exactly as it did when we killed the TSO and we can continue
689 * execution by entering the closure on top of the stack.
691 * We can also kill a thread entirely - this happens if either (a) the
692 * exception passed to raiseAsync is NULL, or (b) there's no
693 * CATCH_FRAME on the stack. In either case, we strip the entire
694 * stack and replace the thread with a zombie.
696 * ToDo: in THREADED_RTS mode, this function is only safe if either
697 * (a) we hold all the Capabilities (eg. in GC, or if there is only
698 * one Capability), or (b) we own the Capability that the TSO is
699 * currently blocked on or on the run queue of.
701 * -------------------------------------------------------------------------- */
704 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
705 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
707 StgRetInfoTable *info;
712 debugTraceCap(DEBUG_sched, cap,
713 "raising exception in thread %ld.", (long)tso->id);
715 #if defined(PROFILING)
717 * Debugging tool: on raising an exception, show where we are.
718 * See also Exception.cmm:stg_raisezh.
719 * This wasn't done for asynchronous exceptions originally; see #1450
721 if (RtsFlags.ProfFlags.showCCSOnException)
723 fprintCCS_stderr(tso->prof.CCCS);
726 // ASSUMES: the thread is not already complete or dead, or
727 // ThreadRelocated. Upper layers should deal with that.
728 ASSERT(tso->what_next != ThreadComplete &&
729 tso->what_next != ThreadKilled &&
730 tso->what_next != ThreadRelocated);
732 // only if we own this TSO (except that deleteThread() calls this
733 ASSERT(tso->cap == cap);
736 if (tso->why_blocked != NotBlocked && tso->why_blocked != BlockedOnMsgWakeup) {
737 tso->why_blocked = NotBlocked;
738 appendToRunQueue(cap,tso);
741 // mark it dirty; we're about to change its stack.
746 if (stop_here != NULL) {
747 updatee = stop_here->updatee;
752 // The stack freezing code assumes there's a closure pointer on
753 // the top of the stack, so we have to arrange that this is the case...
755 if (sp[0] == (W_)&stg_enter_info) {
759 sp[0] = (W_)&stg_dummy_ret_closure;
763 while (stop_here == NULL || frame < (StgPtr)stop_here) {
765 // 1. Let the top of the stack be the "current closure"
767 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
770 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
771 // current closure applied to the chunk of stack up to (but not
772 // including) the update frame. This closure becomes the "current
773 // closure". Go back to step 2.
775 // 4. If it's a CATCH_FRAME, then leave the exception handler on
776 // top of the stack applied to the exception.
778 // 5. If it's a STOP_FRAME, then kill the thread.
780 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
783 info = get_ret_itbl((StgClosure *)frame);
785 switch (info->i.type) {
792 // First build an AP_STACK consisting of the stack chunk above the
793 // current update frame, with the top word on the stack as the
796 words = frame - sp - 1;
797 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
800 ap->fun = (StgClosure *)sp[0];
802 for(i=0; i < (nat)words; ++i) {
803 ap->payload[i] = (StgClosure *)*sp++;
806 SET_HDR(ap,&stg_AP_STACK_info,
807 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
808 TICK_ALLOC_UP_THK(words+1,0);
810 //IF_DEBUG(scheduler,
811 // debugBelch("sched: Updating ");
812 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
813 // debugBelch(" with ");
814 // printObj((StgClosure *)ap);
817 if (((StgUpdateFrame *)frame)->updatee == updatee) {
818 // If this update frame points to the same closure as
819 // the update frame further down the stack
820 // (stop_here), then don't perform the update. We
821 // want to keep the blackhole in this case, so we can
822 // detect and report the loop (#2783).
823 ap = (StgAP_STACK*)updatee;
825 // Perform the update
826 // TODO: this may waste some work, if the thunk has
827 // already been updated by another thread.
828 updateThunk(cap, tso,
829 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
832 sp += sizeofW(StgUpdateFrame) - 1;
833 sp[0] = (W_)ap; // push onto stack
835 continue; //no need to bump frame
840 // We've stripped the entire stack, the thread is now dead.
841 tso->what_next = ThreadKilled;
842 tso->sp = frame + sizeofW(StgStopFrame);
847 // If we find a CATCH_FRAME, and we've got an exception to raise,
848 // then build the THUNK raise(exception), and leave it on
849 // top of the CATCH_FRAME ready to enter.
853 StgCatchFrame *cf = (StgCatchFrame *)frame;
857 if (exception == NULL) break;
859 // we've got an exception to raise, so let's pass it to the
860 // handler in this frame.
862 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
863 TICK_ALLOC_SE_THK(1,0);
864 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
865 raise->payload[0] = exception;
867 // throw away the stack from Sp up to the CATCH_FRAME.
871 /* Ensure that async excpetions are blocked now, so we don't get
872 * a surprise exception before we get around to executing the
875 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
877 /* Put the newly-built THUNK on top of the stack, ready to execute
878 * when the thread restarts.
881 sp[-1] = (W_)&stg_enter_info;
883 tso->what_next = ThreadRunGHC;
884 IF_DEBUG(sanity, checkTSO(tso));
888 case ATOMICALLY_FRAME:
889 if (stop_at_atomically) {
890 ASSERT(tso->trec->enclosing_trec == NO_TREC);
891 stmCondemnTransaction(cap, tso -> trec);
893 // The ATOMICALLY_FRAME expects to be returned a
894 // result from the transaction, which it stores in the
895 // stack frame. Hence we arrange to return a dummy
896 // result, so that the GC doesn't get upset (#3578).
897 // Perhaps a better way would be to have a different
898 // ATOMICALLY_FRAME instance for condemned
899 // transactions, but I don't fully understand the
900 // interaction with STM invariants.
901 tso->sp[1] = (W_)&stg_NO_TREC_closure;
902 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
903 tso->what_next = ThreadRunGHC;
906 // Not stop_at_atomically... fall through and abort the
909 case CATCH_STM_FRAME:
910 case CATCH_RETRY_FRAME:
911 // IF we find an ATOMICALLY_FRAME then we abort the
912 // current transaction and propagate the exception. In
913 // this case (unlike ordinary exceptions) we do not care
914 // whether the transaction is valid or not because its
915 // possible validity cannot have caused the exception
916 // and will not be visible after the abort.
919 StgTRecHeader *trec = tso -> trec;
920 StgTRecHeader *outer = trec -> enclosing_trec;
921 debugTraceCap(DEBUG_stm, cap,
922 "found atomically block delivering async exception");
923 stmAbortTransaction(cap, trec);
924 stmFreeAbortedTRec(cap, trec);
933 // move on to the next stack frame
934 frame += stack_frame_sizeW((StgClosure *)frame);
937 // if we got here, then we stopped at stop_here
938 ASSERT(stop_here != NULL);