1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
12 #include "sm/Storage.h"
15 #include "RaiseAsync.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
25 static void raiseAsync (Capability *cap,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
33 static void blockedThrowTo (Capability *cap,
34 StgTSO *target, MessageThrowTo *msg);
36 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
37 Capability *target_cap USED_IF_THREADS,
38 MessageThrowTo *msg USED_IF_THREADS);
40 static void performBlockedException (Capability *cap, MessageThrowTo *msg);
42 /* -----------------------------------------------------------------------------
45 This version of throwTo is safe to use if and only if one of the
50 - all the other threads in the system are stopped (eg. during GC).
52 - we surely own the target TSO (eg. we just took it from the
53 run queue of the current capability, or we are running it).
55 It doesn't cater for blocking the source thread until the exception
57 -------------------------------------------------------------------------- */
60 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
62 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
66 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
67 rtsBool stop_at_atomically)
69 // Thread already dead?
70 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
74 // Remove it from any blocking queues
75 removeFromQueues(cap,tso);
77 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
81 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
83 // Thread already dead?
84 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
88 // Remove it from any blocking queues
89 removeFromQueues(cap,tso);
91 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
94 /* -----------------------------------------------------------------------------
97 This function may be used to throw an exception from one thread to
98 another, during the course of normal execution. This is a tricky
99 task: the target thread might be running on another CPU, or it
100 may be blocked and could be woken up at any point by another CPU.
101 We have some delicate synchronisation to do.
103 The underlying scheme when multiple Capabilities are in use is
104 message passing: when the target of a throwTo is on another
105 Capability, we send a message (a MessageThrowTo closure) to that
108 If the throwTo needs to block because the target TSO is masking
109 exceptions (the TSO_BLOCKEX flag), then the message is placed on
110 the blocked_exceptions queue attached to the target TSO. When the
111 target TSO enters the unmasked state again, it must check the
112 queue. The blocked_exceptions queue is not locked; only the
113 Capability owning the TSO may modify it.
115 To make things simpler for throwTo, we always create the message
116 first before deciding what to do. The message may get sent, or it
117 may get attached to a TSO's blocked_exceptions queue, or the
118 exception may get thrown immediately and the message dropped,
119 depending on the current state of the target.
121 Currently we send a message if the target belongs to another
122 Capability, and it is
124 - NotBlocked, BlockedOnMsgWakeup, BlockedOnMsgThrowTo,
127 - or it is masking exceptions (TSO_BLOCKEX)
129 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
130 BlockedOnBlackHole then we acquire ownership of the TSO by locking
131 its parent container (e.g. the MVar) and then raise the exception.
132 We might change these cases to be more message-passing-like in the
137 NULL exception was raised, ok to continue
139 MessageThrowTo * exception was not raised; the source TSO
140 should now put itself in the state
141 BlockedOnMsgThrowTo, and when it is ready
142 it should unlock the mssage using
143 unlockClosure(msg, &stg_MSG_THROWTO_info);
144 If it decides not to raise the exception after
145 all, it can revoke it safely with
146 unlockClosure(msg, &stg_IND_info);
148 -------------------------------------------------------------------------- */
151 throwTo (Capability *cap, // the Capability we hold
152 StgTSO *source, // the TSO sending the exception (or NULL)
153 StgTSO *target, // the TSO receiving the exception
154 StgClosure *exception) // the exception closure
158 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
159 // message starts locked; the caller has to unlock it when it is
161 msg->header.info = &stg_WHITEHOLE_info;
162 msg->source = source;
163 msg->target = target;
164 msg->exception = exception;
166 switch (throwToMsg(cap, msg))
168 case THROWTO_SUCCESS:
170 case THROWTO_BLOCKED:
178 throwToMsg (Capability *cap, MessageThrowTo *msg)
181 StgTSO *target = msg->target;
183 ASSERT(target != END_TSO_QUEUE);
185 // follow ThreadRelocated links in the target first
186 while (target->what_next == ThreadRelocated) {
187 target = target->_link;
188 // No, it might be a WHITEHOLE:
189 // ASSERT(get_itbl(target)->type == TSO);
192 debugTraceCap(DEBUG_sched, cap,
193 "throwTo: from thread %lu to thread %lu",
194 (unsigned long)msg->source->id,
195 (unsigned long)msg->target->id);
198 traceThreadStatus(DEBUG_sched, target);
204 debugTrace(DEBUG_sched, "throwTo: retrying...");
207 ASSERT(target != END_TSO_QUEUE);
209 // Thread already dead?
210 if (target->what_next == ThreadComplete
211 || target->what_next == ThreadKilled) {
212 return THROWTO_SUCCESS;
215 status = target->why_blocked;
219 case BlockedOnMsgWakeup:
220 /* if status==NotBlocked, and target->cap == cap, then
221 we own this TSO and can raise the exception.
223 How do we establish this condition? Very carefully.
226 P = (status == NotBlocked)
227 Q = (tso->cap == cap)
229 Now, if P & Q are true, then the TSO is locked and owned by
230 this capability. No other OS thread can steal it.
232 If P==0 and Q==1: the TSO is blocked, but attached to this
233 capabilty, and it can be stolen by another capability.
235 If P==1 and Q==0: the TSO is runnable on another
236 capability. At any time, the TSO may change from runnable
237 to blocked and vice versa, while it remains owned by
240 Suppose we test like this:
246 this is defeated by another capability stealing a blocked
247 TSO from us to wake it up (Schedule.c:unblockOne()). The
248 other thread is doing
253 assuming arbitrary reordering, we could see this
263 so we need a memory barrier:
270 this avoids the problematic case. There are other cases
271 to consider, but this is the tricky one.
273 Note that we must be sure that unblockOne() does the
274 writes in the correct order: Q before P. The memory
275 barrier ensures that if we have seen the write to P, we
276 have also seen the write to Q.
279 Capability *target_cap;
282 target_cap = target->cap;
283 if (target_cap != cap) {
284 throwToSendMsg(cap, target_cap, msg);
285 return THROWTO_BLOCKED;
287 if ((target->flags & TSO_BLOCKEX) == 0) {
288 // It's on our run queue and not blocking exceptions
289 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
290 return THROWTO_SUCCESS;
292 blockedThrowTo(cap,target,msg);
293 return THROWTO_BLOCKED;
298 case BlockedOnMsgThrowTo:
300 Capability *target_cap;
301 const StgInfoTable *i;
304 m = target->block_info.throwto;
306 // target is local to this cap, but has sent a throwto
307 // message to another cap.
309 // The source message is locked. We need to revoke the
310 // target's message so that we can raise the exception, so
311 // we attempt to lock it.
313 // There's a possibility of a deadlock if two threads are both
314 // trying to throwTo each other (or more generally, a cycle of
315 // threads). To break the symmetry we compare the addresses
316 // of the MessageThrowTo objects, and the one for which m <
317 // msg gets to spin, while the other can only try to lock
318 // once, but must then back off and unlock both before trying
321 i = lockClosure((StgClosure *)m);
323 i = tryLockClosure((StgClosure *)m);
325 // debugBelch("collision\n");
326 throwToSendMsg(cap, target->cap, msg);
327 return THROWTO_BLOCKED;
331 if (i != &stg_MSG_THROWTO_info) {
332 // if it's an IND, this TSO has been woken up by another Cap
333 unlockClosure((StgClosure*)m, i);
337 target_cap = target->cap;
338 if (target_cap != cap) {
339 unlockClosure((StgClosure*)m, i);
340 throwToSendMsg(cap, target_cap, msg);
341 return THROWTO_BLOCKED;
344 if ((target->flags & TSO_BLOCKEX) &&
345 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
346 unlockClosure((StgClosure*)m, i);
347 blockedThrowTo(cap,target,msg);
348 return THROWTO_BLOCKED;
351 // nobody else can wake up this TSO after we claim the message
352 unlockClosure((StgClosure*)m, &stg_IND_info);
354 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
355 unblockOne(cap, target);
356 return THROWTO_SUCCESS;
362 To establish ownership of this TSO, we need to acquire a
363 lock on the MVar that it is blocked on.
366 StgInfoTable *info USED_IF_THREADS;
368 mvar = (StgMVar *)target->block_info.closure;
370 // ASSUMPTION: tso->block_info must always point to a
371 // closure. In the threaded RTS it does.
372 switch (get_itbl(mvar)->type) {
380 info = lockClosure((StgClosure *)mvar);
382 if (target->what_next == ThreadRelocated) {
383 target = target->_link;
384 unlockClosure((StgClosure *)mvar,info);
387 // we have the MVar, let's check whether the thread
388 // is still blocked on the same MVar.
389 if (target->why_blocked != BlockedOnMVar
390 || (StgMVar *)target->block_info.closure != mvar) {
391 unlockClosure((StgClosure *)mvar, info);
395 if ((target->flags & TSO_BLOCKEX) &&
396 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
397 Capability *target_cap = target->cap;
398 if (target->cap != cap) {
399 throwToSendMsg(cap,target_cap,msg);
401 blockedThrowTo(cap,target,msg);
403 unlockClosure((StgClosure *)mvar, info);
404 return THROWTO_BLOCKED;
406 removeThreadFromMVarQueue(cap, mvar, target);
407 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
408 unblockOne(cap, target);
409 unlockClosure((StgClosure *)mvar, info);
410 return THROWTO_SUCCESS;
414 case BlockedOnBlackHole:
416 ACQUIRE_LOCK(&sched_mutex);
417 // double checking the status after the memory barrier:
418 if (target->why_blocked != BlockedOnBlackHole) {
419 RELEASE_LOCK(&sched_mutex);
423 if (target->flags & TSO_BLOCKEX) {
424 Capability *target_cap = target->cap;
425 if (target->cap != cap) {
426 throwToSendMsg(cap,target_cap,msg);
428 blockedThrowTo(cap,target,msg);
430 RELEASE_LOCK(&sched_mutex);
431 return THROWTO_BLOCKED; // caller releases lock
433 removeThreadFromQueue(cap, &blackhole_queue, target);
434 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
435 unblockOne(cap, target);
436 RELEASE_LOCK(&sched_mutex);
437 return THROWTO_SUCCESS;
443 // Unblocking BlockedOnSTM threads requires the TSO to be
444 // locked; see STM.c:unpark_tso().
445 if (target->why_blocked != BlockedOnSTM) {
449 if ((target->flags & TSO_BLOCKEX) &&
450 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
451 Capability *target_cap = target->cap;
452 if (target->cap != cap) {
453 throwToSendMsg(cap,target_cap,msg);
455 blockedThrowTo(cap,target,msg);
458 return THROWTO_BLOCKED;
460 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
461 unblockOne(cap, target);
463 return THROWTO_SUCCESS;
467 case BlockedOnCCall_NoUnblockExc:
469 Capability *target_cap;
471 target_cap = target->cap;
472 if (target_cap != cap) {
473 throwToSendMsg(cap, target_cap, msg);
474 return THROWTO_BLOCKED;
477 blockedThrowTo(cap,target,msg);
478 return THROWTO_BLOCKED;
481 #ifndef THREADEDED_RTS
485 #if defined(mingw32_HOST_OS)
486 case BlockedOnDoProc:
488 if ((target->flags & TSO_BLOCKEX) &&
489 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
490 blockedThrowTo(cap,target,msg);
491 return THROWTO_BLOCKED;
493 removeFromQueues(cap,target);
494 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
495 return THROWTO_SUCCESS;
500 barf("throwTo: unrecognised why_blocked value");
506 throwToSendMsg (Capability *cap STG_UNUSED,
507 Capability *target_cap USED_IF_THREADS,
508 MessageThrowTo *msg USED_IF_THREADS)
512 debugTrace(DEBUG_sched, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
514 sendMessage(target_cap, (Message*)msg);
518 // Block a throwTo message on the target TSO's blocked_exceptions
519 // queue. The current Capability must own the target TSO in order to
520 // modify the blocked_exceptions queue.
522 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
524 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
525 (unsigned long)target->id);
527 ASSERT(target->cap == cap);
529 msg->link = (Message*)target->blocked_exceptions;
530 target->blocked_exceptions = msg;
531 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
534 /* -----------------------------------------------------------------------------
535 Waking up threads blocked in throwTo
537 There are two ways to do this: maybePerformBlockedException() will
538 perform the throwTo() for the thread at the head of the queue
539 immediately, and leave the other threads on the queue.
540 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
541 before raising an exception.
543 awakenBlockedExceptionQueue() will wake up all the threads in the
544 queue, but not perform any throwTo() immediately. This might be
545 more appropriate when the target thread is the one actually running
548 Returns: non-zero if an exception was raised, zero otherwise.
549 -------------------------------------------------------------------------- */
552 maybePerformBlockedException (Capability *cap, StgTSO *tso)
555 const StgInfoTable *i;
557 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
558 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
559 awakenBlockedExceptionQueue(cap,tso);
566 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
567 (tso->flags & TSO_BLOCKEX) != 0) {
568 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
571 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
572 && ((tso->flags & TSO_BLOCKEX) == 0
573 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
575 // We unblock just the first thread on the queue, and perform
576 // its throw immediately.
578 msg = tso->blocked_exceptions;
579 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
580 i = lockClosure((StgClosure*)msg);
581 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
582 if (i == &stg_IND_info) {
583 unlockClosure((StgClosure*)msg,i);
587 performBlockedException(cap, msg);
588 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
589 unlockClosure((StgClosure*)msg,&stg_IND_info);
595 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
596 // blocked exceptions and let them try again.
599 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
602 const StgInfoTable *i;
604 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
605 msg = (MessageThrowTo*)msg->link) {
606 i = lockClosure((StgClosure *)msg);
607 if (i != &stg_IND_info) {
608 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
610 unlockClosure((StgClosure *)msg,i);
612 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
616 performBlockedException (Capability *cap, MessageThrowTo *msg)
620 source = msg->source;
622 ASSERT(source->why_blocked == BlockedOnMsgThrowTo);
623 ASSERT(source->block_info.closure == (StgClosure *)msg);
624 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
625 ASSERT(((StgTSO *)source->sp[1])->id == msg->target->id);
626 // check ids not pointers, because the thread might be relocated
628 throwToSingleThreaded(cap, msg->target, msg->exception);
632 /* -----------------------------------------------------------------------------
633 Remove a thread from blocking queues.
635 This is for use when we raise an exception in another thread, which
638 Precondition: we have exclusive access to the TSO, via the same set
639 of conditions as throwToSingleThreaded() (c.f.).
640 -------------------------------------------------------------------------- */
643 removeFromQueues(Capability *cap, StgTSO *tso)
645 switch (tso->why_blocked) {
651 // Be careful: nothing to do here! We tell the scheduler that the
652 // thread is runnable and we leave it to the stack-walking code to
653 // abort the transaction while unwinding the stack. We should
654 // perhaps have a debugging test to make sure that this really
655 // happens and that the 'zombie' transaction does not get
660 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
663 case BlockedOnBlackHole:
664 removeThreadFromQueue(cap, &blackhole_queue, tso);
667 case BlockedOnMsgWakeup:
669 // kill the message, atomically:
670 tso->block_info.wakeup->header.info = &stg_IND_info;
674 case BlockedOnMsgThrowTo:
676 MessageThrowTo *m = tso->block_info.throwto;
677 // The message is locked by us, unless we got here via
678 // deleteAllThreads(), in which case we own all the
680 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
682 // unlock and revoke it at the same time
683 unlockClosure((StgClosure*)m,&stg_IND_info);
687 #if !defined(THREADED_RTS)
690 #if defined(mingw32_HOST_OS)
691 case BlockedOnDoProc:
693 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
694 #if defined(mingw32_HOST_OS)
695 /* (Cooperatively) signal that the worker thread should abort
698 abandonWorkRequest(tso->block_info.async_result->reqID);
703 removeThreadFromQueue(cap, &sleeping_queue, tso);
708 barf("removeFromQueues: %d", tso->why_blocked);
712 unblockOne(cap, tso);
715 /* -----------------------------------------------------------------------------
718 * The following function implements the magic for raising an
719 * asynchronous exception in an existing thread.
721 * We first remove the thread from any queue on which it might be
722 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
724 * We strip the stack down to the innermost CATCH_FRAME, building
725 * thunks in the heap for all the active computations, so they can
726 * be restarted if necessary. When we reach a CATCH_FRAME, we build
727 * an application of the handler to the exception, and push it on
728 * the top of the stack.
730 * How exactly do we save all the active computations? We create an
731 * AP_STACK for every UpdateFrame on the stack. Entering one of these
732 * AP_STACKs pushes everything from the corresponding update frame
733 * upwards onto the stack. (Actually, it pushes everything up to the
734 * next update frame plus a pointer to the next AP_STACK object.
735 * Entering the next AP_STACK object pushes more onto the stack until we
736 * reach the last AP_STACK object - at which point the stack should look
737 * exactly as it did when we killed the TSO and we can continue
738 * execution by entering the closure on top of the stack.
740 * We can also kill a thread entirely - this happens if either (a) the
741 * exception passed to raiseAsync is NULL, or (b) there's no
742 * CATCH_FRAME on the stack. In either case, we strip the entire
743 * stack and replace the thread with a zombie.
745 * ToDo: in THREADED_RTS mode, this function is only safe if either
746 * (a) we hold all the Capabilities (eg. in GC, or if there is only
747 * one Capability), or (b) we own the Capability that the TSO is
748 * currently blocked on or on the run queue of.
750 * -------------------------------------------------------------------------- */
753 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
754 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
756 StgRetInfoTable *info;
761 debugTrace(DEBUG_sched,
762 "raising exception in thread %ld.", (long)tso->id);
764 #if defined(PROFILING)
766 * Debugging tool: on raising an exception, show where we are.
767 * See also Exception.cmm:stg_raisezh.
768 * This wasn't done for asynchronous exceptions originally; see #1450
770 if (RtsFlags.ProfFlags.showCCSOnException)
772 fprintCCS_stderr(tso->prof.CCCS);
776 while (tso->what_next == ThreadRelocated) {
780 // mark it dirty; we're about to change its stack.
785 // ASSUMES: the thread is not already complete or dead. Upper
786 // layers should deal with that.
787 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
789 if (stop_here != NULL) {
790 updatee = stop_here->updatee;
795 // The stack freezing code assumes there's a closure pointer on
796 // the top of the stack, so we have to arrange that this is the case...
798 if (sp[0] == (W_)&stg_enter_info) {
802 sp[0] = (W_)&stg_dummy_ret_closure;
806 while (stop_here == NULL || frame < (StgPtr)stop_here) {
808 // 1. Let the top of the stack be the "current closure"
810 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
813 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
814 // current closure applied to the chunk of stack up to (but not
815 // including) the update frame. This closure becomes the "current
816 // closure". Go back to step 2.
818 // 4. If it's a CATCH_FRAME, then leave the exception handler on
819 // top of the stack applied to the exception.
821 // 5. If it's a STOP_FRAME, then kill the thread.
823 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
826 info = get_ret_itbl((StgClosure *)frame);
828 switch (info->i.type) {
835 // First build an AP_STACK consisting of the stack chunk above the
836 // current update frame, with the top word on the stack as the
839 words = frame - sp - 1;
840 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
843 ap->fun = (StgClosure *)sp[0];
845 for(i=0; i < (nat)words; ++i) {
846 ap->payload[i] = (StgClosure *)*sp++;
849 SET_HDR(ap,&stg_AP_STACK_info,
850 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
851 TICK_ALLOC_UP_THK(words+1,0);
853 //IF_DEBUG(scheduler,
854 // debugBelch("sched: Updating ");
855 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
856 // debugBelch(" with ");
857 // printObj((StgClosure *)ap);
860 if (((StgUpdateFrame *)frame)->updatee == updatee) {
861 // If this update frame points to the same closure as
862 // the update frame further down the stack
863 // (stop_here), then don't perform the update. We
864 // want to keep the blackhole in this case, so we can
865 // detect and report the loop (#2783).
866 ap = (StgAP_STACK*)updatee;
868 // Perform the update
869 // TODO: this may waste some work, if the thunk has
870 // already been updated by another thread.
871 UPD_IND(cap, ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
874 sp += sizeofW(StgUpdateFrame) - 1;
875 sp[0] = (W_)ap; // push onto stack
877 continue; //no need to bump frame
882 // We've stripped the entire stack, the thread is now dead.
883 tso->what_next = ThreadKilled;
884 tso->sp = frame + sizeofW(StgStopFrame);
889 // If we find a CATCH_FRAME, and we've got an exception to raise,
890 // then build the THUNK raise(exception), and leave it on
891 // top of the CATCH_FRAME ready to enter.
895 StgCatchFrame *cf = (StgCatchFrame *)frame;
899 if (exception == NULL) break;
901 // we've got an exception to raise, so let's pass it to the
902 // handler in this frame.
904 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
905 TICK_ALLOC_SE_THK(1,0);
906 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
907 raise->payload[0] = exception;
909 // throw away the stack from Sp up to the CATCH_FRAME.
913 /* Ensure that async excpetions are blocked now, so we don't get
914 * a surprise exception before we get around to executing the
917 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
919 /* Put the newly-built THUNK on top of the stack, ready to execute
920 * when the thread restarts.
923 sp[-1] = (W_)&stg_enter_info;
925 tso->what_next = ThreadRunGHC;
926 IF_DEBUG(sanity, checkTSO(tso));
930 case ATOMICALLY_FRAME:
931 if (stop_at_atomically) {
932 ASSERT(tso->trec->enclosing_trec == NO_TREC);
933 stmCondemnTransaction(cap, tso -> trec);
935 // The ATOMICALLY_FRAME expects to be returned a
936 // result from the transaction, which it stores in the
937 // stack frame. Hence we arrange to return a dummy
938 // result, so that the GC doesn't get upset (#3578).
939 // Perhaps a better way would be to have a different
940 // ATOMICALLY_FRAME instance for condemned
941 // transactions, but I don't fully understand the
942 // interaction with STM invariants.
943 tso->sp[1] = (W_)&stg_NO_TREC_closure;
944 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
945 tso->what_next = ThreadRunGHC;
948 // Not stop_at_atomically... fall through and abort the
951 case CATCH_STM_FRAME:
952 case CATCH_RETRY_FRAME:
953 // IF we find an ATOMICALLY_FRAME then we abort the
954 // current transaction and propagate the exception. In
955 // this case (unlike ordinary exceptions) we do not care
956 // whether the transaction is valid or not because its
957 // possible validity cannot have caused the exception
958 // and will not be visible after the abort.
961 StgTRecHeader *trec = tso -> trec;
962 StgTRecHeader *outer = trec -> enclosing_trec;
963 debugTrace(DEBUG_stm,
964 "found atomically block delivering async exception");
965 stmAbortTransaction(cap, trec);
966 stmFreeAbortedTRec(cap, trec);
975 // move on to the next stack frame
976 frame += stack_frame_sizeW((StgClosure *)frame);
979 // if we got here, then we stopped at stop_here
980 ASSERT(stop_here != NULL);