1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
12 #include "sm/Storage.h"
15 #include "RaiseAsync.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
26 static void raiseAsync (Capability *cap,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
36 static void blockedThrowTo (Capability *cap,
37 StgTSO *target, MessageThrowTo *msg);
39 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
40 Capability *target_cap USED_IF_THREADS,
41 MessageThrowTo *msg USED_IF_THREADS);
43 /* -----------------------------------------------------------------------------
46 This version of throwTo is safe to use if and only if one of the
51 - all the other threads in the system are stopped (eg. during GC).
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
56 It doesn't cater for blocking the source thread until the exception
58 -------------------------------------------------------------------------- */
61 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
63 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
67 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
68 rtsBool stop_at_atomically)
72 // Thread already dead?
73 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
77 // Remove it from any blocking queues
78 removeFromQueues(cap,tso);
80 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
84 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
88 // Thread already dead?
89 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
93 // Remove it from any blocking queues
94 removeFromQueues(cap,tso);
96 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
99 /* -----------------------------------------------------------------------------
102 This function may be used to throw an exception from one thread to
103 another, during the course of normal execution. This is a tricky
104 task: the target thread might be running on another CPU, or it
105 may be blocked and could be woken up at any point by another CPU.
106 We have some delicate synchronisation to do.
108 The underlying scheme when multiple Capabilities are in use is
109 message passing: when the target of a throwTo is on another
110 Capability, we send a message (a MessageThrowTo closure) to that
113 If the throwTo needs to block because the target TSO is masking
114 exceptions (the TSO_BLOCKEX flag), then the message is placed on
115 the blocked_exceptions queue attached to the target TSO. When the
116 target TSO enters the unmasked state again, it must check the
117 queue. The blocked_exceptions queue is not locked; only the
118 Capability owning the TSO may modify it.
120 To make things simpler for throwTo, we always create the message
121 first before deciding what to do. The message may get sent, or it
122 may get attached to a TSO's blocked_exceptions queue, or the
123 exception may get thrown immediately and the message dropped,
124 depending on the current state of the target.
126 Currently we send a message if the target belongs to another
127 Capability, and it is
129 - NotBlocked, BlockedOnMsgThrowTo,
132 - or it is masking exceptions (TSO_BLOCKEX)
134 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
135 BlockedOnBlackHole then we acquire ownership of the TSO by locking
136 its parent container (e.g. the MVar) and then raise the exception.
137 We might change these cases to be more message-passing-like in the
142 NULL exception was raised, ok to continue
144 MessageThrowTo * exception was not raised; the source TSO
145 should now put itself in the state
146 BlockedOnMsgThrowTo, and when it is ready
147 it should unlock the mssage using
148 unlockClosure(msg, &stg_MSG_THROWTO_info);
149 If it decides not to raise the exception after
150 all, it can revoke it safely with
151 unlockClosure(msg, &stg_MSG_NULL_info);
153 -------------------------------------------------------------------------- */
156 throwTo (Capability *cap, // the Capability we hold
157 StgTSO *source, // the TSO sending the exception (or NULL)
158 StgTSO *target, // the TSO receiving the exception
159 StgClosure *exception) // the exception closure
163 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
164 // message starts locked; the caller has to unlock it when it is
166 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
167 msg->source = source;
168 msg->target = target;
169 msg->exception = exception;
171 switch (throwToMsg(cap, msg))
173 case THROWTO_SUCCESS:
175 case THROWTO_BLOCKED:
183 throwToMsg (Capability *cap, MessageThrowTo *msg)
186 StgTSO *target = msg->target;
187 Capability *target_cap;
193 debugTrace(DEBUG_sched, "throwTo: retrying...");
196 ASSERT(target != END_TSO_QUEUE);
198 // follow ThreadRelocated links in the target first
199 target = deRefTSO(target);
201 // Thread already dead?
202 if (target->what_next == ThreadComplete
203 || target->what_next == ThreadKilled) {
204 return THROWTO_SUCCESS;
207 debugTraceCap(DEBUG_sched, cap,
208 "throwTo: from thread %lu to thread %lu",
209 (unsigned long)msg->source->id,
210 (unsigned long)msg->target->id);
213 traceThreadStatus(DEBUG_sched, target);
216 target_cap = target->cap;
217 if (target->cap != cap) {
218 throwToSendMsg(cap, target_cap, msg);
219 return THROWTO_BLOCKED;
222 status = target->why_blocked;
227 if ((target->flags & TSO_BLOCKEX) == 0) {
228 // It's on our run queue and not blocking exceptions
229 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
230 return THROWTO_SUCCESS;
232 blockedThrowTo(cap,target,msg);
233 return THROWTO_BLOCKED;
237 case BlockedOnMsgThrowTo:
239 const StgInfoTable *i;
242 m = target->block_info.throwto;
244 // target is local to this cap, but has sent a throwto
245 // message to another cap.
247 // The source message is locked. We need to revoke the
248 // target's message so that we can raise the exception, so
249 // we attempt to lock it.
251 // There's a possibility of a deadlock if two threads are both
252 // trying to throwTo each other (or more generally, a cycle of
253 // threads). To break the symmetry we compare the addresses
254 // of the MessageThrowTo objects, and the one for which m <
255 // msg gets to spin, while the other can only try to lock
256 // once, but must then back off and unlock both before trying
259 i = lockClosure((StgClosure *)m);
261 i = tryLockClosure((StgClosure *)m);
263 // debugBelch("collision\n");
264 throwToSendMsg(cap, target->cap, msg);
265 return THROWTO_BLOCKED;
269 if (i == &stg_MSG_NULL_info) {
270 // we know there's a MSG_TRY_WAKEUP on the way, so we
271 // might as well just do it now. The message will
272 // be a no-op when it arrives.
273 unlockClosure((StgClosure*)m, i);
274 tryWakeupThread_(cap, target);
278 if (i != &stg_MSG_THROWTO_info) {
279 // if it's a MSG_NULL, this TSO has been woken up by another Cap
280 unlockClosure((StgClosure*)m, i);
284 if ((target->flags & TSO_BLOCKEX) &&
285 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
286 unlockClosure((StgClosure*)m, i);
287 blockedThrowTo(cap,target,msg);
288 return THROWTO_BLOCKED;
291 // nobody else can wake up this TSO after we claim the message
292 unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
294 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
295 return THROWTO_SUCCESS;
301 To establish ownership of this TSO, we need to acquire a
302 lock on the MVar that it is blocked on.
305 StgInfoTable *info USED_IF_THREADS;
307 mvar = (StgMVar *)target->block_info.closure;
309 // ASSUMPTION: tso->block_info must always point to a
310 // closure. In the threaded RTS it does.
311 switch (get_itbl(mvar)->type) {
319 info = lockClosure((StgClosure *)mvar);
321 if (target->what_next == ThreadRelocated) {
322 target = target->_link;
323 unlockClosure((StgClosure *)mvar,info);
326 // we have the MVar, let's check whether the thread
327 // is still blocked on the same MVar.
328 if (target->why_blocked != BlockedOnMVar
329 || (StgMVar *)target->block_info.closure != mvar) {
330 unlockClosure((StgClosure *)mvar, info);
334 if (target->_link == END_TSO_QUEUE) {
335 // the MVar operation has already completed. There is a
336 // MSG_TRY_WAKEUP on the way, but we can just wake up the
337 // thread now anyway and ignore the message when it
339 unlockClosure((StgClosure *)mvar, info);
340 tryWakeupThread_(cap, target);
344 if ((target->flags & TSO_BLOCKEX) &&
345 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
346 blockedThrowTo(cap,target,msg);
347 unlockClosure((StgClosure *)mvar, info);
348 return THROWTO_BLOCKED;
350 // revoke the MVar operation
351 removeFromMVarBlockedQueue(target);
352 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
353 unlockClosure((StgClosure *)mvar, info);
354 return THROWTO_SUCCESS;
358 case BlockedOnBlackHole:
360 if (target->flags & TSO_BLOCKEX) {
361 // BlockedOnBlackHole is not interruptible.
362 blockedThrowTo(cap,target,msg);
363 return THROWTO_BLOCKED;
365 // Revoke the message by replacing it with IND. We're not
366 // locking anything here, so we might still get a TRY_WAKEUP
367 // message from the owner of the blackhole some time in the
368 // future, but that doesn't matter.
369 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
370 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
371 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
372 return THROWTO_SUCCESS;
378 // Unblocking BlockedOnSTM threads requires the TSO to be
379 // locked; see STM.c:unpark_tso().
380 if (target->why_blocked != BlockedOnSTM) {
384 if ((target->flags & TSO_BLOCKEX) &&
385 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
386 blockedThrowTo(cap,target,msg);
388 return THROWTO_BLOCKED;
390 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
392 return THROWTO_SUCCESS;
396 case BlockedOnCCall_NoUnblockExc:
397 blockedThrowTo(cap,target,msg);
398 return THROWTO_BLOCKED;
400 #ifndef THREADEDED_RTS
404 #if defined(mingw32_HOST_OS)
405 case BlockedOnDoProc:
407 if ((target->flags & TSO_BLOCKEX) &&
408 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
409 blockedThrowTo(cap,target,msg);
410 return THROWTO_BLOCKED;
412 removeFromQueues(cap,target);
413 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
414 return THROWTO_SUCCESS;
419 barf("throwTo: unrecognised why_blocked value");
425 throwToSendMsg (Capability *cap STG_UNUSED,
426 Capability *target_cap USED_IF_THREADS,
427 MessageThrowTo *msg USED_IF_THREADS)
431 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
433 sendMessage(cap, target_cap, (Message*)msg);
437 // Block a throwTo message on the target TSO's blocked_exceptions
438 // queue. The current Capability must own the target TSO in order to
439 // modify the blocked_exceptions queue.
441 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
443 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
444 (unsigned long)target->id);
446 ASSERT(target->cap == cap);
448 msg->link = target->blocked_exceptions;
449 target->blocked_exceptions = msg;
450 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
453 /* -----------------------------------------------------------------------------
454 Waking up threads blocked in throwTo
456 There are two ways to do this: maybePerformBlockedException() will
457 perform the throwTo() for the thread at the head of the queue
458 immediately, and leave the other threads on the queue.
459 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
460 before raising an exception.
462 awakenBlockedExceptionQueue() will wake up all the threads in the
463 queue, but not perform any throwTo() immediately. This might be
464 more appropriate when the target thread is the one actually running
467 Returns: non-zero if an exception was raised, zero otherwise.
468 -------------------------------------------------------------------------- */
471 maybePerformBlockedException (Capability *cap, StgTSO *tso)
474 const StgInfoTable *i;
476 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
477 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
478 awakenBlockedExceptionQueue(cap,tso);
485 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
486 (tso->flags & TSO_BLOCKEX) != 0) {
487 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
490 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
491 && ((tso->flags & TSO_BLOCKEX) == 0
492 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
494 // We unblock just the first thread on the queue, and perform
495 // its throw immediately.
497 msg = tso->blocked_exceptions;
498 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
499 i = lockClosure((StgClosure*)msg);
500 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
501 if (i == &stg_MSG_NULL_info) {
502 unlockClosure((StgClosure*)msg,i);
506 throwToSingleThreaded(cap, msg->target, msg->exception);
507 unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
508 tryWakeupThread(cap, msg->source);
514 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
515 // blocked exceptions.
518 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
521 const StgInfoTable *i;
523 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
524 msg = (MessageThrowTo*)msg->link) {
525 i = lockClosure((StgClosure *)msg);
526 if (i != &stg_MSG_NULL_info) {
527 unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
528 tryWakeupThread(cap, msg->source);
530 unlockClosure((StgClosure *)msg,i);
533 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
536 /* -----------------------------------------------------------------------------
537 Remove a thread from blocking queues.
539 This is for use when we raise an exception in another thread, which
542 Precondition: we have exclusive access to the TSO, via the same set
543 of conditions as throwToSingleThreaded() (c.f.).
544 -------------------------------------------------------------------------- */
547 removeFromMVarBlockedQueue (StgTSO *tso)
549 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
550 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
552 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
553 // already removed from this MVar
557 // Assume the MVar is locked. (not assertable; sometimes it isn't
558 // actually WHITEHOLE'd).
560 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
561 // isn't doubly-linked so we can't actually remove it; instead we
562 // just overwrite it with an IND if possible and let the GC short
563 // it out. However, we have to be careful to maintain the deque
566 if (mvar->head == q) {
567 mvar->head = q->link;
568 q->header.info = &stg_IND_info;
569 if (mvar->tail == q) {
570 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
573 else if (mvar->tail == q) {
574 // we can't replace it with an IND in this case, because then
575 // we lose the tail pointer when the GC shorts out the IND.
576 // So we use MSG_NULL as a kind of non-dupable indirection;
577 // these are ignored by takeMVar/putMVar.
578 q->header.info = &stg_MSG_NULL_info;
581 q->header.info = &stg_IND_info;
584 // revoke the MVar operation
585 tso->_link = END_TSO_QUEUE;
589 removeFromQueues(Capability *cap, StgTSO *tso)
591 switch (tso->why_blocked) {
594 case ThreadMigrating:
598 // Be careful: nothing to do here! We tell the scheduler that the
599 // thread is runnable and we leave it to the stack-walking code to
600 // abort the transaction while unwinding the stack. We should
601 // perhaps have a debugging test to make sure that this really
602 // happens and that the 'zombie' transaction does not get
607 removeFromMVarBlockedQueue(tso);
610 case BlockedOnBlackHole:
614 case BlockedOnMsgThrowTo:
616 MessageThrowTo *m = tso->block_info.throwto;
617 // The message is locked by us, unless we got here via
618 // deleteAllThreads(), in which case we own all the
620 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
622 // unlock and revoke it at the same time
623 unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
627 #if !defined(THREADED_RTS)
630 #if defined(mingw32_HOST_OS)
631 case BlockedOnDoProc:
633 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
634 #if defined(mingw32_HOST_OS)
635 /* (Cooperatively) signal that the worker thread should abort
638 abandonWorkRequest(tso->block_info.async_result->reqID);
643 removeThreadFromQueue(cap, &sleeping_queue, tso);
648 barf("removeFromQueues: %d", tso->why_blocked);
652 tso->why_blocked = NotBlocked;
653 appendToRunQueue(cap, tso);
656 /* -----------------------------------------------------------------------------
659 * The following function implements the magic for raising an
660 * asynchronous exception in an existing thread.
662 * We first remove the thread from any queue on which it might be
663 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
664 * TSO blocked_exception queues.
666 * We strip the stack down to the innermost CATCH_FRAME, building
667 * thunks in the heap for all the active computations, so they can
668 * be restarted if necessary. When we reach a CATCH_FRAME, we build
669 * an application of the handler to the exception, and push it on
670 * the top of the stack.
672 * How exactly do we save all the active computations? We create an
673 * AP_STACK for every UpdateFrame on the stack. Entering one of these
674 * AP_STACKs pushes everything from the corresponding update frame
675 * upwards onto the stack. (Actually, it pushes everything up to the
676 * next update frame plus a pointer to the next AP_STACK object.
677 * Entering the next AP_STACK object pushes more onto the stack until we
678 * reach the last AP_STACK object - at which point the stack should look
679 * exactly as it did when we killed the TSO and we can continue
680 * execution by entering the closure on top of the stack.
682 * We can also kill a thread entirely - this happens if either (a) the
683 * exception passed to raiseAsync is NULL, or (b) there's no
684 * CATCH_FRAME on the stack. In either case, we strip the entire
685 * stack and replace the thread with a zombie.
687 * ToDo: in THREADED_RTS mode, this function is only safe if either
688 * (a) we hold all the Capabilities (eg. in GC, or if there is only
689 * one Capability), or (b) we own the Capability that the TSO is
690 * currently blocked on or on the run queue of.
692 * -------------------------------------------------------------------------- */
695 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
696 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
698 StgRetInfoTable *info;
703 debugTraceCap(DEBUG_sched, cap,
704 "raising exception in thread %ld.", (long)tso->id);
706 #if defined(PROFILING)
708 * Debugging tool: on raising an exception, show where we are.
709 * See also Exception.cmm:stg_raisezh.
710 * This wasn't done for asynchronous exceptions originally; see #1450
712 if (RtsFlags.ProfFlags.showCCSOnException)
714 fprintCCS_stderr(tso->prof.CCCS);
717 // ASSUMES: the thread is not already complete or dead, or
718 // ThreadRelocated. Upper layers should deal with that.
719 ASSERT(tso->what_next != ThreadComplete &&
720 tso->what_next != ThreadKilled &&
721 tso->what_next != ThreadRelocated);
723 // only if we own this TSO (except that deleteThread() calls this
724 ASSERT(tso->cap == cap);
727 if (tso->why_blocked != NotBlocked) {
728 tso->why_blocked = NotBlocked;
729 appendToRunQueue(cap,tso);
732 // mark it dirty; we're about to change its stack.
737 if (stop_here != NULL) {
738 updatee = stop_here->updatee;
743 // The stack freezing code assumes there's a closure pointer on
744 // the top of the stack, so we have to arrange that this is the case...
746 if (sp[0] == (W_)&stg_enter_info) {
750 sp[0] = (W_)&stg_dummy_ret_closure;
754 while (stop_here == NULL || frame < (StgPtr)stop_here) {
756 // 1. Let the top of the stack be the "current closure"
758 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
761 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
762 // current closure applied to the chunk of stack up to (but not
763 // including) the update frame. This closure becomes the "current
764 // closure". Go back to step 2.
766 // 4. If it's a CATCH_FRAME, then leave the exception handler on
767 // top of the stack applied to the exception.
769 // 5. If it's a STOP_FRAME, then kill the thread.
771 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
774 info = get_ret_itbl((StgClosure *)frame);
776 switch (info->i.type) {
783 // First build an AP_STACK consisting of the stack chunk above the
784 // current update frame, with the top word on the stack as the
787 words = frame - sp - 1;
788 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
791 ap->fun = (StgClosure *)sp[0];
793 for(i=0; i < (nat)words; ++i) {
794 ap->payload[i] = (StgClosure *)*sp++;
797 SET_HDR(ap,&stg_AP_STACK_info,
798 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
799 TICK_ALLOC_UP_THK(words+1,0);
801 //IF_DEBUG(scheduler,
802 // debugBelch("sched: Updating ");
803 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
804 // debugBelch(" with ");
805 // printObj((StgClosure *)ap);
808 if (((StgUpdateFrame *)frame)->updatee == updatee) {
809 // If this update frame points to the same closure as
810 // the update frame further down the stack
811 // (stop_here), then don't perform the update. We
812 // want to keep the blackhole in this case, so we can
813 // detect and report the loop (#2783).
814 ap = (StgAP_STACK*)updatee;
816 // Perform the update
817 // TODO: this may waste some work, if the thunk has
818 // already been updated by another thread.
819 updateThunk(cap, tso,
820 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
823 sp += sizeofW(StgUpdateFrame) - 1;
824 sp[0] = (W_)ap; // push onto stack
826 continue; //no need to bump frame
831 // We've stripped the entire stack, the thread is now dead.
832 tso->what_next = ThreadKilled;
833 tso->sp = frame + sizeofW(StgStopFrame);
838 // If we find a CATCH_FRAME, and we've got an exception to raise,
839 // then build the THUNK raise(exception), and leave it on
840 // top of the CATCH_FRAME ready to enter.
843 StgCatchFrame *cf = (StgCatchFrame *)frame;
846 if (exception == NULL) break;
848 // we've got an exception to raise, so let's pass it to the
849 // handler in this frame.
851 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
852 TICK_ALLOC_SE_THK(1,0);
853 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
854 raise->payload[0] = exception;
856 // throw away the stack from Sp up to the CATCH_FRAME.
860 /* Ensure that async excpetions are blocked now, so we don't get
861 * a surprise exception before we get around to executing the
864 tso->flags |= TSO_BLOCKEX;
865 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
866 tso->flags &= ~TSO_INTERRUPTIBLE;
868 tso->flags |= TSO_INTERRUPTIBLE;
871 /* Put the newly-built THUNK on top of the stack, ready to execute
872 * when the thread restarts.
875 sp[-1] = (W_)&stg_enter_info;
877 tso->what_next = ThreadRunGHC;
878 IF_DEBUG(sanity, checkTSO(tso));
882 case ATOMICALLY_FRAME:
883 if (stop_at_atomically) {
884 ASSERT(tso->trec->enclosing_trec == NO_TREC);
885 stmCondemnTransaction(cap, tso -> trec);
887 // The ATOMICALLY_FRAME expects to be returned a
888 // result from the transaction, which it stores in the
889 // stack frame. Hence we arrange to return a dummy
890 // result, so that the GC doesn't get upset (#3578).
891 // Perhaps a better way would be to have a different
892 // ATOMICALLY_FRAME instance for condemned
893 // transactions, but I don't fully understand the
894 // interaction with STM invariants.
895 tso->sp[1] = (W_)&stg_NO_TREC_closure;
896 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
897 tso->what_next = ThreadRunGHC;
900 // Not stop_at_atomically... fall through and abort the
903 case CATCH_STM_FRAME:
904 case CATCH_RETRY_FRAME:
905 // IF we find an ATOMICALLY_FRAME then we abort the
906 // current transaction and propagate the exception. In
907 // this case (unlike ordinary exceptions) we do not care
908 // whether the transaction is valid or not because its
909 // possible validity cannot have caused the exception
910 // and will not be visible after the abort.
913 StgTRecHeader *trec = tso -> trec;
914 StgTRecHeader *outer = trec -> enclosing_trec;
915 debugTraceCap(DEBUG_stm, cap,
916 "found atomically block delivering async exception");
917 stmAbortTransaction(cap, trec);
918 stmFreeAbortedTRec(cap, trec);
927 // move on to the next stack frame
928 frame += stack_frame_sizeW((StgClosure *)frame);
931 // if we got here, then we stopped at stop_here
932 ASSERT(stop_here != NULL);