1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
12 #include "sm/Storage.h"
15 #include "RaiseAsync.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
21 #if defined(mingw32_HOST_OS)
22 #include "win32/IOManager.h"
25 static void raiseAsync (Capability *cap,
27 StgClosure *exception,
28 rtsBool stop_at_atomically,
29 StgUpdateFrame *stop_here);
31 static void removeFromQueues(Capability *cap, StgTSO *tso);
33 static void blockedThrowTo (Capability *cap,
34 StgTSO *target, MessageThrowTo *msg);
36 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
37 Capability *target_cap USED_IF_THREADS,
38 MessageThrowTo *msg USED_IF_THREADS);
40 static void performBlockedException (Capability *cap, MessageThrowTo *msg);
42 /* -----------------------------------------------------------------------------
45 This version of throwTo is safe to use if and only if one of the
50 - all the other threads in the system are stopped (eg. during GC).
52 - we surely own the target TSO (eg. we just took it from the
53 run queue of the current capability, or we are running it).
55 It doesn't cater for blocking the source thread until the exception
57 -------------------------------------------------------------------------- */
60 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
62 throwToSingleThreaded_(cap, tso, exception, rtsFalse);
66 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
67 rtsBool stop_at_atomically)
69 // Thread already dead?
70 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
73 while (tso->what_next == ThreadRelocated) {
77 // Remove it from any blocking queues
78 removeFromQueues(cap,tso);
80 raiseAsync(cap, tso, exception, stop_at_atomically, NULL);
84 suspendComputation(Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
86 // Thread already dead?
87 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
90 while (tso->what_next == ThreadRelocated) {
94 // Remove it from any blocking queues
95 removeFromQueues(cap,tso);
97 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
100 /* -----------------------------------------------------------------------------
103 This function may be used to throw an exception from one thread to
104 another, during the course of normal execution. This is a tricky
105 task: the target thread might be running on another CPU, or it
106 may be blocked and could be woken up at any point by another CPU.
107 We have some delicate synchronisation to do.
109 The underlying scheme when multiple Capabilities are in use is
110 message passing: when the target of a throwTo is on another
111 Capability, we send a message (a MessageThrowTo closure) to that
114 If the throwTo needs to block because the target TSO is masking
115 exceptions (the TSO_BLOCKEX flag), then the message is placed on
116 the blocked_exceptions queue attached to the target TSO. When the
117 target TSO enters the unmasked state again, it must check the
118 queue. The blocked_exceptions queue is not locked; only the
119 Capability owning the TSO may modify it.
121 To make things simpler for throwTo, we always create the message
122 first before deciding what to do. The message may get sent, or it
123 may get attached to a TSO's blocked_exceptions queue, or the
124 exception may get thrown immediately and the message dropped,
125 depending on the current state of the target.
127 Currently we send a message if the target belongs to another
128 Capability, and it is
130 - NotBlocked, BlockedOnMsgWakeup, BlockedOnMsgThrowTo,
133 - or it is masking exceptions (TSO_BLOCKEX)
135 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
136 BlockedOnBlackHole then we acquire ownership of the TSO by locking
137 its parent container (e.g. the MVar) and then raise the exception.
138 We might change these cases to be more message-passing-like in the
143 NULL exception was raised, ok to continue
145 MessageThrowTo * exception was not raised; the source TSO
146 should now put itself in the state
147 BlockedOnMsgThrowTo, and when it is ready
148 it should unlock the mssage using
149 unlockClosure(msg, &stg_MSG_THROWTO_info);
150 If it decides not to raise the exception after
151 all, it can revoke it safely with
152 unlockClosure(msg, &stg_IND_info);
154 -------------------------------------------------------------------------- */
157 throwTo (Capability *cap, // the Capability we hold
158 StgTSO *source, // the TSO sending the exception (or NULL)
159 StgTSO *target, // the TSO receiving the exception
160 StgClosure *exception) // the exception closure
164 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
165 // message starts locked; the caller has to unlock it when it is
167 msg->header.info = &stg_WHITEHOLE_info;
168 msg->source = source;
169 msg->target = target;
170 msg->exception = exception;
172 switch (throwToMsg(cap, msg))
174 case THROWTO_SUCCESS:
176 case THROWTO_BLOCKED:
184 throwToMsg (Capability *cap, MessageThrowTo *msg)
187 StgTSO *target = msg->target;
189 ASSERT(target != END_TSO_QUEUE);
191 // follow ThreadRelocated links in the target first
192 while (target->what_next == ThreadRelocated) {
193 target = target->_link;
194 // No, it might be a WHITEHOLE:
195 // ASSERT(get_itbl(target)->type == TSO);
198 debugTraceCap(DEBUG_sched, cap,
199 "throwTo: from thread %lu to thread %lu",
200 (unsigned long)msg->source->id,
201 (unsigned long)msg->target->id);
204 traceThreadStatus(DEBUG_sched, target);
210 debugTrace(DEBUG_sched, "throwTo: retrying...");
213 ASSERT(target != END_TSO_QUEUE);
215 // Thread already dead?
216 if (target->what_next == ThreadComplete
217 || target->what_next == ThreadKilled) {
218 return THROWTO_SUCCESS;
221 status = target->why_blocked;
225 case BlockedOnMsgWakeup:
226 /* if status==NotBlocked, and target->cap == cap, then
227 we own this TSO and can raise the exception.
229 How do we establish this condition? Very carefully.
232 P = (status == NotBlocked)
233 Q = (tso->cap == cap)
235 Now, if P & Q are true, then the TSO is locked and owned by
236 this capability. No other OS thread can steal it.
238 If P==0 and Q==1: the TSO is blocked, but attached to this
239 capabilty, and it can be stolen by another capability.
241 If P==1 and Q==0: the TSO is runnable on another
242 capability. At any time, the TSO may change from runnable
243 to blocked and vice versa, while it remains owned by
246 Suppose we test like this:
252 this is defeated by another capability stealing a blocked
253 TSO from us to wake it up (Schedule.c:unblockOne()). The
254 other thread is doing
259 assuming arbitrary reordering, we could see this
269 so we need a memory barrier:
276 this avoids the problematic case. There are other cases
277 to consider, but this is the tricky one.
279 Note that we must be sure that unblockOne() does the
280 writes in the correct order: Q before P. The memory
281 barrier ensures that if we have seen the write to P, we
282 have also seen the write to Q.
285 Capability *target_cap;
288 target_cap = target->cap;
289 if (target_cap != cap) {
290 throwToSendMsg(cap, target_cap, msg);
291 return THROWTO_BLOCKED;
293 if ((target->flags & TSO_BLOCKEX) == 0) {
294 // It's on our run queue and not blocking exceptions
295 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
296 return THROWTO_SUCCESS;
298 blockedThrowTo(cap,target,msg);
299 return THROWTO_BLOCKED;
304 case BlockedOnMsgThrowTo:
306 Capability *target_cap;
307 const StgInfoTable *i;
310 m = target->block_info.throwto;
312 // target is local to this cap, but has sent a throwto
313 // message to another cap.
315 // The source message is locked. We need to revoke the
316 // target's message so that we can raise the exception, so
317 // we attempt to lock it.
319 // There's a possibility of a deadlock if two threads are both
320 // trying to throwTo each other (or more generally, a cycle of
321 // threads). To break the symmetry we compare the addresses
322 // of the MessageThrowTo objects, and the one for which m <
323 // msg gets to spin, while the other can only try to lock
324 // once, but must then back off and unlock both before trying
327 i = lockClosure((StgClosure *)m);
329 i = tryLockClosure((StgClosure *)m);
331 // debugBelch("collision\n");
332 throwToSendMsg(cap, target->cap, msg);
333 return THROWTO_BLOCKED;
337 if (i != &stg_MSG_THROWTO_info) {
338 // if it's an IND, this TSO has been woken up by another Cap
339 unlockClosure((StgClosure*)m, i);
343 target_cap = target->cap;
344 if (target_cap != cap) {
345 unlockClosure((StgClosure*)m, i);
346 throwToSendMsg(cap, target_cap, msg);
347 return THROWTO_BLOCKED;
350 if ((target->flags & TSO_BLOCKEX) &&
351 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
352 unlockClosure((StgClosure*)m, i);
353 blockedThrowTo(cap,target,msg);
354 return THROWTO_BLOCKED;
357 // nobody else can wake up this TSO after we claim the message
358 unlockClosure((StgClosure*)m, &stg_IND_info);
360 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
361 unblockOne(cap, target);
362 return THROWTO_SUCCESS;
368 To establish ownership of this TSO, we need to acquire a
369 lock on the MVar that it is blocked on.
372 StgInfoTable *info USED_IF_THREADS;
374 mvar = (StgMVar *)target->block_info.closure;
376 // ASSUMPTION: tso->block_info must always point to a
377 // closure. In the threaded RTS it does.
378 switch (get_itbl(mvar)->type) {
386 info = lockClosure((StgClosure *)mvar);
388 if (target->what_next == ThreadRelocated) {
389 target = target->_link;
390 unlockClosure((StgClosure *)mvar,info);
393 // we have the MVar, let's check whether the thread
394 // is still blocked on the same MVar.
395 if (target->why_blocked != BlockedOnMVar
396 || (StgMVar *)target->block_info.closure != mvar) {
397 unlockClosure((StgClosure *)mvar, info);
401 if ((target->flags & TSO_BLOCKEX) &&
402 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
403 Capability *target_cap = target->cap;
404 if (target->cap != cap) {
405 throwToSendMsg(cap,target_cap,msg);
407 blockedThrowTo(cap,target,msg);
409 unlockClosure((StgClosure *)mvar, info);
410 return THROWTO_BLOCKED;
412 removeThreadFromMVarQueue(cap, mvar, target);
413 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
414 unblockOne(cap, target);
415 unlockClosure((StgClosure *)mvar, info);
416 return THROWTO_SUCCESS;
420 case BlockedOnBlackHole:
422 ACQUIRE_LOCK(&sched_mutex);
423 // double checking the status after the memory barrier:
424 if (target->why_blocked != BlockedOnBlackHole) {
425 RELEASE_LOCK(&sched_mutex);
429 if (target->flags & TSO_BLOCKEX) {
430 Capability *target_cap = target->cap;
431 if (target->cap != cap) {
432 throwToSendMsg(cap,target_cap,msg);
434 blockedThrowTo(cap,target,msg);
436 RELEASE_LOCK(&sched_mutex);
437 return THROWTO_BLOCKED; // caller releases lock
439 removeThreadFromQueue(cap, &blackhole_queue, target);
440 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
441 unblockOne(cap, target);
442 RELEASE_LOCK(&sched_mutex);
443 return THROWTO_SUCCESS;
449 // Unblocking BlockedOnSTM threads requires the TSO to be
450 // locked; see STM.c:unpark_tso().
451 if (target->why_blocked != BlockedOnSTM) {
455 if ((target->flags & TSO_BLOCKEX) &&
456 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
457 Capability *target_cap = target->cap;
458 if (target->cap != cap) {
459 throwToSendMsg(cap,target_cap,msg);
461 blockedThrowTo(cap,target,msg);
464 return THROWTO_BLOCKED;
466 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
467 unblockOne(cap, target);
469 return THROWTO_SUCCESS;
473 case BlockedOnCCall_NoUnblockExc:
475 Capability *target_cap;
477 target_cap = target->cap;
478 if (target_cap != cap) {
479 throwToSendMsg(cap, target_cap, msg);
480 return THROWTO_BLOCKED;
483 blockedThrowTo(cap,target,msg);
484 return THROWTO_BLOCKED;
487 #ifndef THREADEDED_RTS
491 #if defined(mingw32_HOST_OS)
492 case BlockedOnDoProc:
494 if ((target->flags & TSO_BLOCKEX) &&
495 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
496 blockedThrowTo(cap,target,msg);
497 return THROWTO_BLOCKED;
499 removeFromQueues(cap,target);
500 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
501 return THROWTO_SUCCESS;
506 barf("throwTo: unrecognised why_blocked value");
512 throwToSendMsg (Capability *cap STG_UNUSED,
513 Capability *target_cap USED_IF_THREADS,
514 MessageThrowTo *msg USED_IF_THREADS)
518 debugTrace(DEBUG_sched, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
520 sendMessage(target_cap, (Message*)msg);
524 // Block a throwTo message on the target TSO's blocked_exceptions
525 // queue. The current Capability must own the target TSO in order to
526 // modify the blocked_exceptions queue.
528 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
530 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
531 (unsigned long)target->id);
533 ASSERT(target->cap == cap);
535 msg->link = (Message*)target->blocked_exceptions;
536 target->blocked_exceptions = msg;
537 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
540 /* -----------------------------------------------------------------------------
541 Waking up threads blocked in throwTo
543 There are two ways to do this: maybePerformBlockedException() will
544 perform the throwTo() for the thread at the head of the queue
545 immediately, and leave the other threads on the queue.
546 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
547 before raising an exception.
549 awakenBlockedExceptionQueue() will wake up all the threads in the
550 queue, but not perform any throwTo() immediately. This might be
551 more appropriate when the target thread is the one actually running
554 Returns: non-zero if an exception was raised, zero otherwise.
555 -------------------------------------------------------------------------- */
558 maybePerformBlockedException (Capability *cap, StgTSO *tso)
561 const StgInfoTable *i;
563 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
564 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
565 awakenBlockedExceptionQueue(cap,tso);
572 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
573 (tso->flags & TSO_BLOCKEX) != 0) {
574 debugTrace(DEBUG_sched, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
577 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
578 && ((tso->flags & TSO_BLOCKEX) == 0
579 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
581 // We unblock just the first thread on the queue, and perform
582 // its throw immediately.
584 msg = tso->blocked_exceptions;
585 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
586 i = lockClosure((StgClosure*)msg);
587 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
588 if (i == &stg_IND_info) {
589 unlockClosure((StgClosure*)msg,i);
593 performBlockedException(cap, msg);
594 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
595 unlockClosure((StgClosure*)msg,&stg_IND_info);
601 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
602 // blocked exceptions and let them try again.
605 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
608 const StgInfoTable *i;
610 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
611 msg = (MessageThrowTo*)msg->link) {
612 i = lockClosure((StgClosure *)msg);
613 if (i != &stg_IND_info) {
614 unblockOne_(cap, msg->source, rtsFalse/*no migrate*/);
616 unlockClosure((StgClosure *)msg,i);
618 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
622 performBlockedException (Capability *cap, MessageThrowTo *msg)
626 source = msg->source;
628 ASSERT(source->why_blocked == BlockedOnMsgThrowTo);
629 ASSERT(source->block_info.closure == (StgClosure *)msg);
630 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
631 ASSERT(((StgTSO *)source->sp[1])->id == msg->target->id);
632 // check ids not pointers, because the thread might be relocated
634 throwToSingleThreaded(cap, msg->target, msg->exception);
638 /* -----------------------------------------------------------------------------
639 Remove a thread from blocking queues.
641 This is for use when we raise an exception in another thread, which
644 Precondition: we have exclusive access to the TSO, via the same set
645 of conditions as throwToSingleThreaded() (c.f.).
646 -------------------------------------------------------------------------- */
649 removeFromQueues(Capability *cap, StgTSO *tso)
651 switch (tso->why_blocked) {
657 // Be careful: nothing to do here! We tell the scheduler that the
658 // thread is runnable and we leave it to the stack-walking code to
659 // abort the transaction while unwinding the stack. We should
660 // perhaps have a debugging test to make sure that this really
661 // happens and that the 'zombie' transaction does not get
666 removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
669 case BlockedOnBlackHole:
670 removeThreadFromQueue(cap, &blackhole_queue, tso);
673 case BlockedOnMsgWakeup:
675 // kill the message, atomically:
676 tso->block_info.wakeup->header.info = &stg_IND_info;
680 case BlockedOnMsgThrowTo:
682 MessageThrowTo *m = tso->block_info.throwto;
683 // The message is locked by us, unless we got here via
684 // deleteAllThreads(), in which case we own all the
686 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
688 // unlock and revoke it at the same time
689 unlockClosure((StgClosure*)m,&stg_IND_info);
693 #if !defined(THREADED_RTS)
696 #if defined(mingw32_HOST_OS)
697 case BlockedOnDoProc:
699 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
700 #if defined(mingw32_HOST_OS)
701 /* (Cooperatively) signal that the worker thread should abort
704 abandonWorkRequest(tso->block_info.async_result->reqID);
709 removeThreadFromQueue(cap, &sleeping_queue, tso);
714 barf("removeFromQueues: %d", tso->why_blocked);
718 unblockOne(cap, tso);
721 /* -----------------------------------------------------------------------------
724 * The following function implements the magic for raising an
725 * asynchronous exception in an existing thread.
727 * We first remove the thread from any queue on which it might be
728 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
730 * We strip the stack down to the innermost CATCH_FRAME, building
731 * thunks in the heap for all the active computations, so they can
732 * be restarted if necessary. When we reach a CATCH_FRAME, we build
733 * an application of the handler to the exception, and push it on
734 * the top of the stack.
736 * How exactly do we save all the active computations? We create an
737 * AP_STACK for every UpdateFrame on the stack. Entering one of these
738 * AP_STACKs pushes everything from the corresponding update frame
739 * upwards onto the stack. (Actually, it pushes everything up to the
740 * next update frame plus a pointer to the next AP_STACK object.
741 * Entering the next AP_STACK object pushes more onto the stack until we
742 * reach the last AP_STACK object - at which point the stack should look
743 * exactly as it did when we killed the TSO and we can continue
744 * execution by entering the closure on top of the stack.
746 * We can also kill a thread entirely - this happens if either (a) the
747 * exception passed to raiseAsync is NULL, or (b) there's no
748 * CATCH_FRAME on the stack. In either case, we strip the entire
749 * stack and replace the thread with a zombie.
751 * ToDo: in THREADED_RTS mode, this function is only safe if either
752 * (a) we hold all the Capabilities (eg. in GC, or if there is only
753 * one Capability), or (b) we own the Capability that the TSO is
754 * currently blocked on or on the run queue of.
756 * -------------------------------------------------------------------------- */
759 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
760 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
762 StgRetInfoTable *info;
767 debugTrace(DEBUG_sched,
768 "raising exception in thread %ld.", (long)tso->id);
770 #if defined(PROFILING)
772 * Debugging tool: on raising an exception, show where we are.
773 * See also Exception.cmm:stg_raisezh.
774 * This wasn't done for asynchronous exceptions originally; see #1450
776 if (RtsFlags.ProfFlags.showCCSOnException)
778 fprintCCS_stderr(tso->prof.CCCS);
781 // ASSUMES: the thread is not already complete or dead, or
782 // ThreadRelocated. Upper layers should deal with that.
783 ASSERT(tso->what_next != ThreadComplete &&
784 tso->what_next != ThreadKilled &&
785 tso->what_next != ThreadRelocated);
787 // mark it dirty; we're about to change its stack.
792 if (stop_here != NULL) {
793 updatee = stop_here->updatee;
798 // The stack freezing code assumes there's a closure pointer on
799 // the top of the stack, so we have to arrange that this is the case...
801 if (sp[0] == (W_)&stg_enter_info) {
805 sp[0] = (W_)&stg_dummy_ret_closure;
809 while (stop_here == NULL || frame < (StgPtr)stop_here) {
811 // 1. Let the top of the stack be the "current closure"
813 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
816 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
817 // current closure applied to the chunk of stack up to (but not
818 // including) the update frame. This closure becomes the "current
819 // closure". Go back to step 2.
821 // 4. If it's a CATCH_FRAME, then leave the exception handler on
822 // top of the stack applied to the exception.
824 // 5. If it's a STOP_FRAME, then kill the thread.
826 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
829 info = get_ret_itbl((StgClosure *)frame);
831 switch (info->i.type) {
838 // First build an AP_STACK consisting of the stack chunk above the
839 // current update frame, with the top word on the stack as the
842 words = frame - sp - 1;
843 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
846 ap->fun = (StgClosure *)sp[0];
848 for(i=0; i < (nat)words; ++i) {
849 ap->payload[i] = (StgClosure *)*sp++;
852 SET_HDR(ap,&stg_AP_STACK_info,
853 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
854 TICK_ALLOC_UP_THK(words+1,0);
856 //IF_DEBUG(scheduler,
857 // debugBelch("sched: Updating ");
858 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
859 // debugBelch(" with ");
860 // printObj((StgClosure *)ap);
863 if (((StgUpdateFrame *)frame)->updatee == updatee) {
864 // If this update frame points to the same closure as
865 // the update frame further down the stack
866 // (stop_here), then don't perform the update. We
867 // want to keep the blackhole in this case, so we can
868 // detect and report the loop (#2783).
869 ap = (StgAP_STACK*)updatee;
871 // Perform the update
872 // TODO: this may waste some work, if the thunk has
873 // already been updated by another thread.
874 UPD_IND(cap, ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
877 sp += sizeofW(StgUpdateFrame) - 1;
878 sp[0] = (W_)ap; // push onto stack
880 continue; //no need to bump frame
885 // We've stripped the entire stack, the thread is now dead.
886 tso->what_next = ThreadKilled;
887 tso->sp = frame + sizeofW(StgStopFrame);
892 // If we find a CATCH_FRAME, and we've got an exception to raise,
893 // then build the THUNK raise(exception), and leave it on
894 // top of the CATCH_FRAME ready to enter.
898 StgCatchFrame *cf = (StgCatchFrame *)frame;
902 if (exception == NULL) break;
904 // we've got an exception to raise, so let's pass it to the
905 // handler in this frame.
907 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
908 TICK_ALLOC_SE_THK(1,0);
909 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
910 raise->payload[0] = exception;
912 // throw away the stack from Sp up to the CATCH_FRAME.
916 /* Ensure that async excpetions are blocked now, so we don't get
917 * a surprise exception before we get around to executing the
920 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
922 /* Put the newly-built THUNK on top of the stack, ready to execute
923 * when the thread restarts.
926 sp[-1] = (W_)&stg_enter_info;
928 tso->what_next = ThreadRunGHC;
929 IF_DEBUG(sanity, checkTSO(tso));
933 case ATOMICALLY_FRAME:
934 if (stop_at_atomically) {
935 ASSERT(tso->trec->enclosing_trec == NO_TREC);
936 stmCondemnTransaction(cap, tso -> trec);
938 // The ATOMICALLY_FRAME expects to be returned a
939 // result from the transaction, which it stores in the
940 // stack frame. Hence we arrange to return a dummy
941 // result, so that the GC doesn't get upset (#3578).
942 // Perhaps a better way would be to have a different
943 // ATOMICALLY_FRAME instance for condemned
944 // transactions, but I don't fully understand the
945 // interaction with STM invariants.
946 tso->sp[1] = (W_)&stg_NO_TREC_closure;
947 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
948 tso->what_next = ThreadRunGHC;
951 // Not stop_at_atomically... fall through and abort the
954 case CATCH_STM_FRAME:
955 case CATCH_RETRY_FRAME:
956 // IF we find an ATOMICALLY_FRAME then we abort the
957 // current transaction and propagate the exception. In
958 // this case (unlike ordinary exceptions) we do not care
959 // whether the transaction is valid or not because its
960 // possible validity cannot have caused the exception
961 // and will not be visible after the abort.
964 StgTRecHeader *trec = tso -> trec;
965 StgTRecHeader *outer = trec -> enclosing_trec;
966 debugTrace(DEBUG_stm,
967 "found atomically block delivering async exception");
968 stmAbortTransaction(cap, trec);
969 stmFreeAbortedTRec(cap, trec);
978 // move on to the next stack frame
979 frame += stack_frame_sizeW((StgClosure *)frame);
982 // if we got here, then we stopped at stop_here
983 ASSERT(stop_here != NULL);