1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
12 #include "sm/Storage.h"
15 #include "RaiseAsync.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
26 static void raiseAsync (Capability *cap,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
36 static void blockedThrowTo (Capability *cap,
37 StgTSO *target, MessageThrowTo *msg);
39 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
40 Capability *target_cap USED_IF_THREADS,
41 MessageThrowTo *msg USED_IF_THREADS);
43 /* -----------------------------------------------------------------------------
46 This version of throwTo is safe to use if and only if one of the
51 - all the other threads in the system are stopped (eg. during GC).
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
56 It doesn't cater for blocking the source thread until the exception
58 -------------------------------------------------------------------------- */
61 throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
62 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
66 // Thread already dead?
67 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
71 // Remove it from any blocking queues
72 removeFromQueues(cap,tso);
74 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
78 throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
80 throwToSingleThreaded__(cap, tso, exception, rtsFalse, NULL);
84 throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
85 rtsBool stop_at_atomically)
87 throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
91 suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
93 throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
96 /* -----------------------------------------------------------------------------
99 This function may be used to throw an exception from one thread to
100 another, during the course of normal execution. This is a tricky
101 task: the target thread might be running on another CPU, or it
102 may be blocked and could be woken up at any point by another CPU.
103 We have some delicate synchronisation to do.
105 The underlying scheme when multiple Capabilities are in use is
106 message passing: when the target of a throwTo is on another
107 Capability, we send a message (a MessageThrowTo closure) to that
110 If the throwTo needs to block because the target TSO is masking
111 exceptions (the TSO_BLOCKEX flag), then the message is placed on
112 the blocked_exceptions queue attached to the target TSO. When the
113 target TSO enters the unmasked state again, it must check the
114 queue. The blocked_exceptions queue is not locked; only the
115 Capability owning the TSO may modify it.
117 To make things simpler for throwTo, we always create the message
118 first before deciding what to do. The message may get sent, or it
119 may get attached to a TSO's blocked_exceptions queue, or the
120 exception may get thrown immediately and the message dropped,
121 depending on the current state of the target.
123 Currently we send a message if the target belongs to another
124 Capability, and it is
126 - NotBlocked, BlockedOnMsgThrowTo,
127 BlockedOnCCall_Interruptible
129 - or it is masking exceptions (TSO_BLOCKEX)
131 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
132 BlockedOnBlackHole then we acquire ownership of the TSO by locking
133 its parent container (e.g. the MVar) and then raise the exception.
134 We might change these cases to be more message-passing-like in the
139 NULL exception was raised, ok to continue
141 MessageThrowTo * exception was not raised; the source TSO
142 should now put itself in the state
143 BlockedOnMsgThrowTo, and when it is ready
144 it should unlock the mssage using
145 unlockClosure(msg, &stg_MSG_THROWTO_info);
146 If it decides not to raise the exception after
147 all, it can revoke it safely with
148 unlockClosure(msg, &stg_MSG_NULL_info);
150 -------------------------------------------------------------------------- */
153 throwTo (Capability *cap, // the Capability we hold
154 StgTSO *source, // the TSO sending the exception (or NULL)
155 StgTSO *target, // the TSO receiving the exception
156 StgClosure *exception) // the exception closure
160 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
161 // message starts locked; the caller has to unlock it when it is
163 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
164 msg->source = source;
165 msg->target = target;
166 msg->exception = exception;
168 switch (throwToMsg(cap, msg))
170 case THROWTO_SUCCESS:
172 case THROWTO_BLOCKED:
180 throwToMsg (Capability *cap, MessageThrowTo *msg)
183 StgTSO *target = msg->target;
184 Capability *target_cap;
190 debugTrace(DEBUG_sched, "throwTo: retrying...");
193 ASSERT(target != END_TSO_QUEUE);
195 // follow ThreadRelocated links in the target first
196 target = deRefTSO(target);
198 // Thread already dead?
199 if (target->what_next == ThreadComplete
200 || target->what_next == ThreadKilled) {
201 return THROWTO_SUCCESS;
204 debugTraceCap(DEBUG_sched, cap,
205 "throwTo: from thread %lu to thread %lu",
206 (unsigned long)msg->source->id,
207 (unsigned long)msg->target->id);
210 traceThreadStatus(DEBUG_sched, target);
213 target_cap = target->cap;
214 if (target->cap != cap) {
215 throwToSendMsg(cap, target_cap, msg);
216 return THROWTO_BLOCKED;
219 status = target->why_blocked;
224 if ((target->flags & TSO_BLOCKEX) == 0) {
225 // It's on our run queue and not blocking exceptions
226 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
227 return THROWTO_SUCCESS;
229 blockedThrowTo(cap,target,msg);
230 return THROWTO_BLOCKED;
234 case BlockedOnMsgThrowTo:
236 const StgInfoTable *i;
239 m = target->block_info.throwto;
241 // target is local to this cap, but has sent a throwto
242 // message to another cap.
244 // The source message is locked. We need to revoke the
245 // target's message so that we can raise the exception, so
246 // we attempt to lock it.
248 // There's a possibility of a deadlock if two threads are both
249 // trying to throwTo each other (or more generally, a cycle of
250 // threads). To break the symmetry we compare the addresses
251 // of the MessageThrowTo objects, and the one for which m <
252 // msg gets to spin, while the other can only try to lock
253 // once, but must then back off and unlock both before trying
256 i = lockClosure((StgClosure *)m);
258 i = tryLockClosure((StgClosure *)m);
260 // debugBelch("collision\n");
261 throwToSendMsg(cap, target->cap, msg);
262 return THROWTO_BLOCKED;
266 if (i == &stg_MSG_NULL_info) {
267 // we know there's a MSG_TRY_WAKEUP on the way, so we
268 // might as well just do it now. The message will
269 // be a no-op when it arrives.
270 unlockClosure((StgClosure*)m, i);
271 tryWakeupThread_(cap, target);
275 if (i != &stg_MSG_THROWTO_info) {
276 // if it's a MSG_NULL, this TSO has been woken up by another Cap
277 unlockClosure((StgClosure*)m, i);
281 if ((target->flags & TSO_BLOCKEX) &&
282 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
283 unlockClosure((StgClosure*)m, i);
284 blockedThrowTo(cap,target,msg);
285 return THROWTO_BLOCKED;
288 // nobody else can wake up this TSO after we claim the message
289 unlockClosure((StgClosure*)m, &stg_MSG_NULL_info);
291 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
292 return THROWTO_SUCCESS;
298 To establish ownership of this TSO, we need to acquire a
299 lock on the MVar that it is blocked on.
302 StgInfoTable *info USED_IF_THREADS;
304 mvar = (StgMVar *)target->block_info.closure;
306 // ASSUMPTION: tso->block_info must always point to a
307 // closure. In the threaded RTS it does.
308 switch (get_itbl(mvar)->type) {
316 info = lockClosure((StgClosure *)mvar);
318 if (target->what_next == ThreadRelocated) {
319 target = target->_link;
320 unlockClosure((StgClosure *)mvar,info);
323 // we have the MVar, let's check whether the thread
324 // is still blocked on the same MVar.
325 if (target->why_blocked != BlockedOnMVar
326 || (StgMVar *)target->block_info.closure != mvar) {
327 unlockClosure((StgClosure *)mvar, info);
331 if (target->_link == END_TSO_QUEUE) {
332 // the MVar operation has already completed. There is a
333 // MSG_TRY_WAKEUP on the way, but we can just wake up the
334 // thread now anyway and ignore the message when it
336 unlockClosure((StgClosure *)mvar, info);
337 tryWakeupThread_(cap, target);
341 if ((target->flags & TSO_BLOCKEX) &&
342 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
343 blockedThrowTo(cap,target,msg);
344 unlockClosure((StgClosure *)mvar, info);
345 return THROWTO_BLOCKED;
347 // revoke the MVar operation
348 removeFromMVarBlockedQueue(target);
349 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
350 unlockClosure((StgClosure *)mvar, info);
351 return THROWTO_SUCCESS;
355 case BlockedOnBlackHole:
357 if (target->flags & TSO_BLOCKEX) {
358 // BlockedOnBlackHole is not interruptible.
359 blockedThrowTo(cap,target,msg);
360 return THROWTO_BLOCKED;
362 // Revoke the message by replacing it with IND. We're not
363 // locking anything here, so we might still get a TRY_WAKEUP
364 // message from the owner of the blackhole some time in the
365 // future, but that doesn't matter.
366 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
367 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
368 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
369 return THROWTO_SUCCESS;
375 // Unblocking BlockedOnSTM threads requires the TSO to be
376 // locked; see STM.c:unpark_tso().
377 if (target->why_blocked != BlockedOnSTM) {
381 if ((target->flags & TSO_BLOCKEX) &&
382 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
383 blockedThrowTo(cap,target,msg);
385 return THROWTO_BLOCKED;
387 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
389 return THROWTO_SUCCESS;
392 case BlockedOnCCall_Interruptible:
396 // walk suspended_ccalls to find the correct worker thread
398 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
399 if (incall->suspended_tso == target) {
405 blockedThrowTo(cap, target, msg);
406 if (!((target->flags & TSO_BLOCKEX) &&
407 ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
408 interruptWorkerTask(task);
410 return THROWTO_BLOCKED;
412 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
418 blockedThrowTo(cap,target,msg);
419 return THROWTO_BLOCKED;
421 #ifndef THREADEDED_RTS
425 #if defined(mingw32_HOST_OS)
426 case BlockedOnDoProc:
428 if ((target->flags & TSO_BLOCKEX) &&
429 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
430 blockedThrowTo(cap,target,msg);
431 return THROWTO_BLOCKED;
433 removeFromQueues(cap,target);
434 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
435 return THROWTO_SUCCESS;
440 barf("throwTo: unrecognised why_blocked value");
446 throwToSendMsg (Capability *cap STG_UNUSED,
447 Capability *target_cap USED_IF_THREADS,
448 MessageThrowTo *msg USED_IF_THREADS)
452 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
454 sendMessage(cap, target_cap, (Message*)msg);
458 // Block a throwTo message on the target TSO's blocked_exceptions
459 // queue. The current Capability must own the target TSO in order to
460 // modify the blocked_exceptions queue.
462 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
464 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
465 (unsigned long)target->id);
467 ASSERT(target->cap == cap);
469 msg->link = target->blocked_exceptions;
470 target->blocked_exceptions = msg;
471 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
474 /* -----------------------------------------------------------------------------
475 Waking up threads blocked in throwTo
477 There are two ways to do this: maybePerformBlockedException() will
478 perform the throwTo() for the thread at the head of the queue
479 immediately, and leave the other threads on the queue.
480 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
481 before raising an exception.
483 awakenBlockedExceptionQueue() will wake up all the threads in the
484 queue, but not perform any throwTo() immediately. This might be
485 more appropriate when the target thread is the one actually running
488 Returns: non-zero if an exception was raised, zero otherwise.
489 -------------------------------------------------------------------------- */
492 maybePerformBlockedException (Capability *cap, StgTSO *tso)
495 const StgInfoTable *i;
497 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
498 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
499 awakenBlockedExceptionQueue(cap,tso);
506 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
507 (tso->flags & TSO_BLOCKEX) != 0) {
508 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
511 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
512 && ((tso->flags & TSO_BLOCKEX) == 0
513 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
515 // We unblock just the first thread on the queue, and perform
516 // its throw immediately.
518 msg = tso->blocked_exceptions;
519 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
520 i = lockClosure((StgClosure*)msg);
521 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
522 if (i == &stg_MSG_NULL_info) {
523 unlockClosure((StgClosure*)msg,i);
527 throwToSingleThreaded(cap, msg->target, msg->exception);
528 unlockClosure((StgClosure*)msg,&stg_MSG_NULL_info);
529 tryWakeupThread(cap, msg->source);
535 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
536 // blocked exceptions.
539 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
542 const StgInfoTable *i;
544 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
545 msg = (MessageThrowTo*)msg->link) {
546 i = lockClosure((StgClosure *)msg);
547 if (i != &stg_MSG_NULL_info) {
548 unlockClosure((StgClosure *)msg,&stg_MSG_NULL_info);
549 tryWakeupThread(cap, msg->source);
551 unlockClosure((StgClosure *)msg,i);
554 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
557 /* -----------------------------------------------------------------------------
558 Remove a thread from blocking queues.
560 This is for use when we raise an exception in another thread, which
563 Precondition: we have exclusive access to the TSO, via the same set
564 of conditions as throwToSingleThreaded() (c.f.).
565 -------------------------------------------------------------------------- */
568 removeFromMVarBlockedQueue (StgTSO *tso)
570 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
571 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
573 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
574 // already removed from this MVar
578 // Assume the MVar is locked. (not assertable; sometimes it isn't
579 // actually WHITEHOLE'd).
581 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
582 // isn't doubly-linked so we can't actually remove it; instead we
583 // just overwrite it with an IND if possible and let the GC short
584 // it out. However, we have to be careful to maintain the deque
587 if (mvar->head == q) {
588 mvar->head = q->link;
589 q->header.info = &stg_IND_info;
590 if (mvar->tail == q) {
591 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
594 else if (mvar->tail == q) {
595 // we can't replace it with an IND in this case, because then
596 // we lose the tail pointer when the GC shorts out the IND.
597 // So we use MSG_NULL as a kind of non-dupable indirection;
598 // these are ignored by takeMVar/putMVar.
599 q->header.info = &stg_MSG_NULL_info;
602 q->header.info = &stg_IND_info;
605 // revoke the MVar operation
606 tso->_link = END_TSO_QUEUE;
610 removeFromQueues(Capability *cap, StgTSO *tso)
612 switch (tso->why_blocked) {
615 case ThreadMigrating:
619 // Be careful: nothing to do here! We tell the scheduler that the
620 // thread is runnable and we leave it to the stack-walking code to
621 // abort the transaction while unwinding the stack. We should
622 // perhaps have a debugging test to make sure that this really
623 // happens and that the 'zombie' transaction does not get
628 removeFromMVarBlockedQueue(tso);
631 case BlockedOnBlackHole:
635 case BlockedOnMsgThrowTo:
637 MessageThrowTo *m = tso->block_info.throwto;
638 // The message is locked by us, unless we got here via
639 // deleteAllThreads(), in which case we own all the
641 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
643 // unlock and revoke it at the same time
644 unlockClosure((StgClosure*)m,&stg_MSG_NULL_info);
648 #if !defined(THREADED_RTS)
651 #if defined(mingw32_HOST_OS)
652 case BlockedOnDoProc:
654 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
655 #if defined(mingw32_HOST_OS)
656 /* (Cooperatively) signal that the worker thread should abort
659 abandonWorkRequest(tso->block_info.async_result->reqID);
664 removeThreadFromQueue(cap, &sleeping_queue, tso);
669 barf("removeFromQueues: %d", tso->why_blocked);
673 tso->why_blocked = NotBlocked;
674 appendToRunQueue(cap, tso);
677 /* -----------------------------------------------------------------------------
680 * The following function implements the magic for raising an
681 * asynchronous exception in an existing thread.
683 * We first remove the thread from any queue on which it might be
684 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
685 * TSO blocked_exception queues.
687 * We strip the stack down to the innermost CATCH_FRAME, building
688 * thunks in the heap for all the active computations, so they can
689 * be restarted if necessary. When we reach a CATCH_FRAME, we build
690 * an application of the handler to the exception, and push it on
691 * the top of the stack.
693 * How exactly do we save all the active computations? We create an
694 * AP_STACK for every UpdateFrame on the stack. Entering one of these
695 * AP_STACKs pushes everything from the corresponding update frame
696 * upwards onto the stack. (Actually, it pushes everything up to the
697 * next update frame plus a pointer to the next AP_STACK object.
698 * Entering the next AP_STACK object pushes more onto the stack until we
699 * reach the last AP_STACK object - at which point the stack should look
700 * exactly as it did when we killed the TSO and we can continue
701 * execution by entering the closure on top of the stack.
703 * We can also kill a thread entirely - this happens if either (a) the
704 * exception passed to raiseAsync is NULL, or (b) there's no
705 * CATCH_FRAME on the stack. In either case, we strip the entire
706 * stack and replace the thread with a zombie.
708 * ToDo: in THREADED_RTS mode, this function is only safe if either
709 * (a) we hold all the Capabilities (eg. in GC, or if there is only
710 * one Capability), or (b) we own the Capability that the TSO is
711 * currently blocked on or on the run queue of.
713 * -------------------------------------------------------------------------- */
716 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
717 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
719 StgRetInfoTable *info;
724 debugTraceCap(DEBUG_sched, cap,
725 "raising exception in thread %ld.", (long)tso->id);
727 #if defined(PROFILING)
729 * Debugging tool: on raising an exception, show where we are.
730 * See also Exception.cmm:stg_raisezh.
731 * This wasn't done for asynchronous exceptions originally; see #1450
733 if (RtsFlags.ProfFlags.showCCSOnException)
735 fprintCCS_stderr(tso->prof.CCCS);
738 // ASSUMES: the thread is not already complete or dead, or
739 // ThreadRelocated. Upper layers should deal with that.
740 ASSERT(tso->what_next != ThreadComplete &&
741 tso->what_next != ThreadKilled &&
742 tso->what_next != ThreadRelocated);
744 // only if we own this TSO (except that deleteThread() calls this
745 ASSERT(tso->cap == cap);
748 if (tso->why_blocked != NotBlocked) {
749 tso->why_blocked = NotBlocked;
750 appendToRunQueue(cap,tso);
753 // mark it dirty; we're about to change its stack.
758 if (stop_here != NULL) {
759 updatee = stop_here->updatee;
764 // The stack freezing code assumes there's a closure pointer on
765 // the top of the stack, so we have to arrange that this is the case...
767 if (sp[0] == (W_)&stg_enter_info) {
771 sp[0] = (W_)&stg_dummy_ret_closure;
775 while (stop_here == NULL || frame < (StgPtr)stop_here) {
777 // 1. Let the top of the stack be the "current closure"
779 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
782 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
783 // current closure applied to the chunk of stack up to (but not
784 // including) the update frame. This closure becomes the "current
785 // closure". Go back to step 2.
787 // 4. If it's a CATCH_FRAME, then leave the exception handler on
788 // top of the stack applied to the exception.
790 // 5. If it's a STOP_FRAME, then kill the thread.
792 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
795 info = get_ret_itbl((StgClosure *)frame);
797 switch (info->i.type) {
804 // First build an AP_STACK consisting of the stack chunk above the
805 // current update frame, with the top word on the stack as the
808 words = frame - sp - 1;
809 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
812 ap->fun = (StgClosure *)sp[0];
814 for(i=0; i < (nat)words; ++i) {
815 ap->payload[i] = (StgClosure *)*sp++;
818 SET_HDR(ap,&stg_AP_STACK_info,
819 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
820 TICK_ALLOC_UP_THK(words+1,0);
822 //IF_DEBUG(scheduler,
823 // debugBelch("sched: Updating ");
824 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
825 // debugBelch(" with ");
826 // printObj((StgClosure *)ap);
829 if (((StgUpdateFrame *)frame)->updatee == updatee) {
830 // If this update frame points to the same closure as
831 // the update frame further down the stack
832 // (stop_here), then don't perform the update. We
833 // want to keep the blackhole in this case, so we can
834 // detect and report the loop (#2783).
835 ap = (StgAP_STACK*)updatee;
837 // Perform the update
838 // TODO: this may waste some work, if the thunk has
839 // already been updated by another thread.
840 updateThunk(cap, tso,
841 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
844 sp += sizeofW(StgUpdateFrame) - 1;
845 sp[0] = (W_)ap; // push onto stack
847 continue; //no need to bump frame
852 // We've stripped the entire stack, the thread is now dead.
853 tso->what_next = ThreadKilled;
854 tso->sp = frame + sizeofW(StgStopFrame);
859 // If we find a CATCH_FRAME, and we've got an exception to raise,
860 // then build the THUNK raise(exception), and leave it on
861 // top of the CATCH_FRAME ready to enter.
864 StgCatchFrame *cf = (StgCatchFrame *)frame;
867 if (exception == NULL) break;
869 // we've got an exception to raise, so let's pass it to the
870 // handler in this frame.
872 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
873 TICK_ALLOC_SE_THK(1,0);
874 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
875 raise->payload[0] = exception;
877 // throw away the stack from Sp up to the CATCH_FRAME.
881 /* Ensure that async excpetions are blocked now, so we don't get
882 * a surprise exception before we get around to executing the
885 tso->flags |= TSO_BLOCKEX;
886 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
887 tso->flags &= ~TSO_INTERRUPTIBLE;
889 tso->flags |= TSO_INTERRUPTIBLE;
892 /* Put the newly-built THUNK on top of the stack, ready to execute
893 * when the thread restarts.
896 sp[-1] = (W_)&stg_enter_info;
898 tso->what_next = ThreadRunGHC;
899 IF_DEBUG(sanity, checkTSO(tso));
903 case ATOMICALLY_FRAME:
904 if (stop_at_atomically) {
905 ASSERT(tso->trec->enclosing_trec == NO_TREC);
906 stmCondemnTransaction(cap, tso -> trec);
908 // The ATOMICALLY_FRAME expects to be returned a
909 // result from the transaction, which it stores in the
910 // stack frame. Hence we arrange to return a dummy
911 // result, so that the GC doesn't get upset (#3578).
912 // Perhaps a better way would be to have a different
913 // ATOMICALLY_FRAME instance for condemned
914 // transactions, but I don't fully understand the
915 // interaction with STM invariants.
916 tso->sp[1] = (W_)&stg_NO_TREC_closure;
917 tso->sp[0] = (W_)&stg_gc_unpt_r1_info;
918 tso->what_next = ThreadRunGHC;
921 // Not stop_at_atomically... fall through and abort the
924 case CATCH_STM_FRAME:
925 case CATCH_RETRY_FRAME:
926 // IF we find an ATOMICALLY_FRAME then we abort the
927 // current transaction and propagate the exception. In
928 // this case (unlike ordinary exceptions) we do not care
929 // whether the transaction is valid or not because its
930 // possible validity cannot have caused the exception
931 // and will not be visible after the abort.
934 StgTRecHeader *trec = tso -> trec;
935 StgTRecHeader *outer = trec -> enclosing_trec;
936 debugTraceCap(DEBUG_stm, cap,
937 "found atomically block delivering async exception");
938 stmAbortTransaction(cap, trec);
939 stmFreeAbortedTRec(cap, trec);
948 // move on to the next stack frame
949 frame += stack_frame_sizeW((StgClosure *)frame);
952 // if we got here, then we stopped at stop_here
953 ASSERT(stop_here != NULL);