1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
12 #include "sm/Storage.h"
15 #include "RaiseAsync.h"
19 #include "sm/Sanity.h"
20 #include "Profiling.h"
22 #if defined(mingw32_HOST_OS)
23 #include "win32/IOManager.h"
26 static StgTSO* raiseAsync (Capability *cap,
28 StgClosure *exception,
29 rtsBool stop_at_atomically,
30 StgUpdateFrame *stop_here);
32 static void removeFromQueues(Capability *cap, StgTSO *tso);
34 static void removeFromMVarBlockedQueue (StgTSO *tso);
36 static void blockedThrowTo (Capability *cap,
37 StgTSO *target, MessageThrowTo *msg);
39 static void throwToSendMsg (Capability *cap USED_IF_THREADS,
40 Capability *target_cap USED_IF_THREADS,
41 MessageThrowTo *msg USED_IF_THREADS);
43 /* -----------------------------------------------------------------------------
46 This version of throwTo is safe to use if and only if one of the
51 - all the other threads in the system are stopped (eg. during GC).
53 - we surely own the target TSO (eg. we just took it from the
54 run queue of the current capability, or we are running it).
56 It doesn't cater for blocking the source thread until the exception
58 -------------------------------------------------------------------------- */
61 throwToSingleThreaded__ (Capability *cap, StgTSO *tso, StgClosure *exception,
62 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
64 // Thread already dead?
65 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
69 // Remove it from any blocking queues
70 removeFromQueues(cap,tso);
72 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
76 throwToSingleThreaded (Capability *cap, StgTSO *tso, StgClosure *exception)
78 throwToSingleThreaded__(cap, tso, exception, rtsFalse, NULL);
82 throwToSingleThreaded_ (Capability *cap, StgTSO *tso, StgClosure *exception,
83 rtsBool stop_at_atomically)
85 throwToSingleThreaded__ (cap, tso, exception, stop_at_atomically, NULL);
88 void // cannot return a different TSO
89 suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
91 throwToSingleThreaded__ (cap, tso, NULL, rtsFalse, stop_here);
94 /* -----------------------------------------------------------------------------
97 This function may be used to throw an exception from one thread to
98 another, during the course of normal execution. This is a tricky
99 task: the target thread might be running on another CPU, or it
100 may be blocked and could be woken up at any point by another CPU.
101 We have some delicate synchronisation to do.
103 The underlying scheme when multiple Capabilities are in use is
104 message passing: when the target of a throwTo is on another
105 Capability, we send a message (a MessageThrowTo closure) to that
108 If the throwTo needs to block because the target TSO is masking
109 exceptions (the TSO_BLOCKEX flag), then the message is placed on
110 the blocked_exceptions queue attached to the target TSO. When the
111 target TSO enters the unmasked state again, it must check the
112 queue. The blocked_exceptions queue is not locked; only the
113 Capability owning the TSO may modify it.
115 To make things simpler for throwTo, we always create the message
116 first before deciding what to do. The message may get sent, or it
117 may get attached to a TSO's blocked_exceptions queue, or the
118 exception may get thrown immediately and the message dropped,
119 depending on the current state of the target.
121 Currently we send a message if the target belongs to another
122 Capability, and it is
124 - NotBlocked, BlockedOnMsgThrowTo,
125 BlockedOnCCall_Interruptible
127 - or it is masking exceptions (TSO_BLOCKEX)
129 Currently, if the target is BlockedOnMVar, BlockedOnSTM, or
130 BlockedOnBlackHole then we acquire ownership of the TSO by locking
131 its parent container (e.g. the MVar) and then raise the exception.
132 We might change these cases to be more message-passing-like in the
137 NULL exception was raised, ok to continue
139 MessageThrowTo * exception was not raised; the source TSO
140 should now put itself in the state
141 BlockedOnMsgThrowTo, and when it is ready
142 it should unlock the mssage using
143 unlockClosure(msg, &stg_MSG_THROWTO_info);
144 If it decides not to raise the exception after
145 all, it can revoke it safely with
146 unlockClosure(msg, &stg_MSG_NULL_info);
148 -------------------------------------------------------------------------- */
151 throwTo (Capability *cap, // the Capability we hold
152 StgTSO *source, // the TSO sending the exception (or NULL)
153 StgTSO *target, // the TSO receiving the exception
154 StgClosure *exception) // the exception closure
158 msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
159 // message starts locked; the caller has to unlock it when it is
161 SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
162 msg->source = source;
163 msg->target = target;
164 msg->exception = exception;
166 switch (throwToMsg(cap, msg))
168 case THROWTO_SUCCESS:
170 case THROWTO_BLOCKED:
178 throwToMsg (Capability *cap, MessageThrowTo *msg)
181 StgTSO *target = msg->target;
182 Capability *target_cap;
188 debugTrace(DEBUG_sched, "throwTo: retrying...");
191 ASSERT(target != END_TSO_QUEUE);
193 // Thread already dead?
194 if (target->what_next == ThreadComplete
195 || target->what_next == ThreadKilled) {
196 return THROWTO_SUCCESS;
199 debugTraceCap(DEBUG_sched, cap,
200 "throwTo: from thread %lu to thread %lu",
201 (unsigned long)msg->source->id,
202 (unsigned long)msg->target->id);
205 traceThreadStatus(DEBUG_sched, target);
208 target_cap = target->cap;
209 if (target->cap != cap) {
210 throwToSendMsg(cap, target_cap, msg);
211 return THROWTO_BLOCKED;
214 status = target->why_blocked;
219 if ((target->flags & TSO_BLOCKEX) == 0) {
220 // It's on our run queue and not blocking exceptions
221 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
222 return THROWTO_SUCCESS;
224 blockedThrowTo(cap,target,msg);
225 return THROWTO_BLOCKED;
229 case BlockedOnMsgThrowTo:
231 const StgInfoTable *i;
234 m = target->block_info.throwto;
236 // target is local to this cap, but has sent a throwto
237 // message to another cap.
239 // The source message is locked. We need to revoke the
240 // target's message so that we can raise the exception, so
241 // we attempt to lock it.
243 // There's a possibility of a deadlock if two threads are both
244 // trying to throwTo each other (or more generally, a cycle of
245 // threads). To break the symmetry we compare the addresses
246 // of the MessageThrowTo objects, and the one for which m <
247 // msg gets to spin, while the other can only try to lock
248 // once, but must then back off and unlock both before trying
251 i = lockClosure((StgClosure *)m);
253 i = tryLockClosure((StgClosure *)m);
255 // debugBelch("collision\n");
256 throwToSendMsg(cap, target->cap, msg);
257 return THROWTO_BLOCKED;
261 if (i == &stg_MSG_NULL_info) {
262 // we know there's a MSG_TRY_WAKEUP on the way, so we
263 // might as well just do it now. The message will
264 // be a no-op when it arrives.
265 unlockClosure((StgClosure*)m, i);
266 tryWakeupThread(cap, target);
270 if (i != &stg_MSG_THROWTO_info) {
271 // if it's a MSG_NULL, this TSO has been woken up by another Cap
272 unlockClosure((StgClosure*)m, i);
276 if ((target->flags & TSO_BLOCKEX) &&
277 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
278 unlockClosure((StgClosure*)m, i);
279 blockedThrowTo(cap,target,msg);
280 return THROWTO_BLOCKED;
283 // nobody else can wake up this TSO after we claim the message
284 doneWithMsgThrowTo(m);
286 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
287 return THROWTO_SUCCESS;
293 To establish ownership of this TSO, we need to acquire a
294 lock on the MVar that it is blocked on.
297 StgInfoTable *info USED_IF_THREADS;
299 mvar = (StgMVar *)target->block_info.closure;
301 // ASSUMPTION: tso->block_info must always point to a
302 // closure. In the threaded RTS it does.
303 switch (get_itbl(mvar)->type) {
311 info = lockClosure((StgClosure *)mvar);
313 // we have the MVar, let's check whether the thread
314 // is still blocked on the same MVar.
315 if (target->why_blocked != BlockedOnMVar
316 || (StgMVar *)target->block_info.closure != mvar) {
317 unlockClosure((StgClosure *)mvar, info);
321 if (target->_link == END_TSO_QUEUE) {
322 // the MVar operation has already completed. There is a
323 // MSG_TRY_WAKEUP on the way, but we can just wake up the
324 // thread now anyway and ignore the message when it
326 unlockClosure((StgClosure *)mvar, info);
327 tryWakeupThread(cap, target);
331 if ((target->flags & TSO_BLOCKEX) &&
332 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
333 blockedThrowTo(cap,target,msg);
334 unlockClosure((StgClosure *)mvar, info);
335 return THROWTO_BLOCKED;
337 // revoke the MVar operation
338 removeFromMVarBlockedQueue(target);
339 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
340 unlockClosure((StgClosure *)mvar, info);
341 return THROWTO_SUCCESS;
345 case BlockedOnBlackHole:
347 if (target->flags & TSO_BLOCKEX) {
348 // BlockedOnBlackHole is not interruptible.
349 blockedThrowTo(cap,target,msg);
350 return THROWTO_BLOCKED;
352 // Revoke the message by replacing it with IND. We're not
353 // locking anything here, so we might still get a TRY_WAKEUP
354 // message from the owner of the blackhole some time in the
355 // future, but that doesn't matter.
356 ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info);
357 OVERWRITE_INFO(target->block_info.bh, &stg_IND_info);
358 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
359 return THROWTO_SUCCESS;
365 // Unblocking BlockedOnSTM threads requires the TSO to be
366 // locked; see STM.c:unpark_tso().
367 if (target->why_blocked != BlockedOnSTM) {
371 if ((target->flags & TSO_BLOCKEX) &&
372 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
373 blockedThrowTo(cap,target,msg);
375 return THROWTO_BLOCKED;
377 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
379 return THROWTO_SUCCESS;
382 case BlockedOnCCall_Interruptible:
386 // walk suspended_ccalls to find the correct worker thread
388 for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) {
389 if (incall->suspended_tso == target) {
395 blockedThrowTo(cap, target, msg);
396 if (!((target->flags & TSO_BLOCKEX) &&
397 ((target->flags & TSO_INTERRUPTIBLE) == 0))) {
398 interruptWorkerTask(task);
400 return THROWTO_BLOCKED;
402 debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill");
408 blockedThrowTo(cap,target,msg);
409 return THROWTO_BLOCKED;
411 #ifndef THREADEDED_RTS
415 #if defined(mingw32_HOST_OS)
416 case BlockedOnDoProc:
418 if ((target->flags & TSO_BLOCKEX) &&
419 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
420 blockedThrowTo(cap,target,msg);
421 return THROWTO_BLOCKED;
423 removeFromQueues(cap,target);
424 raiseAsync(cap, target, msg->exception, rtsFalse, NULL);
425 return THROWTO_SUCCESS;
429 case ThreadMigrating:
430 // if is is ThreadMigrating and tso->cap is ours, then it
431 // *must* be migrating *to* this capability. If it were
432 // migrating away from the capability, then tso->cap would
433 // point to the destination.
435 // There is a MSG_WAKEUP in the message queue for this thread,
436 // but we can just do it preemptively:
437 tryWakeupThread(cap, target);
438 // and now retry, the thread should be runnable.
442 barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked);
448 throwToSendMsg (Capability *cap STG_UNUSED,
449 Capability *target_cap USED_IF_THREADS,
450 MessageThrowTo *msg USED_IF_THREADS)
454 debugTraceCap(DEBUG_sched, cap, "throwTo: sending a throwto message to cap %lu", (unsigned long)target_cap->no);
456 sendMessage(cap, target_cap, (Message*)msg);
460 // Block a throwTo message on the target TSO's blocked_exceptions
461 // queue. The current Capability must own the target TSO in order to
462 // modify the blocked_exceptions queue.
464 blockedThrowTo (Capability *cap, StgTSO *target, MessageThrowTo *msg)
466 debugTraceCap(DEBUG_sched, cap, "throwTo: blocking on thread %lu",
467 (unsigned long)target->id);
469 ASSERT(target->cap == cap);
471 msg->link = target->blocked_exceptions;
472 target->blocked_exceptions = msg;
473 dirty_TSO(cap,target); // we modified the blocked_exceptions queue
476 /* -----------------------------------------------------------------------------
477 Waking up threads blocked in throwTo
479 There are two ways to do this: maybePerformBlockedException() will
480 perform the throwTo() for the thread at the head of the queue
481 immediately, and leave the other threads on the queue.
482 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
483 before raising an exception.
485 awakenBlockedExceptionQueue() will wake up all the threads in the
486 queue, but not perform any throwTo() immediately. This might be
487 more appropriate when the target thread is the one actually running
490 Returns: non-zero if an exception was raised, zero otherwise.
491 -------------------------------------------------------------------------- */
494 maybePerformBlockedException (Capability *cap, StgTSO *tso)
497 const StgInfoTable *i;
500 if (tso->what_next == ThreadComplete || tso->what_next == ThreadFinished) {
501 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
502 awakenBlockedExceptionQueue(cap,tso);
509 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE &&
510 (tso->flags & TSO_BLOCKEX) != 0) {
511 debugTraceCap(DEBUG_sched, cap, "throwTo: thread %lu has blocked exceptions but is inside block", (unsigned long)tso->id);
514 if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE
515 && ((tso->flags & TSO_BLOCKEX) == 0
516 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
518 // We unblock just the first thread on the queue, and perform
519 // its throw immediately.
521 msg = tso->blocked_exceptions;
522 if (msg == END_BLOCKED_EXCEPTIONS_QUEUE) return 0;
523 i = lockClosure((StgClosure*)msg);
524 tso->blocked_exceptions = (MessageThrowTo*)msg->link;
525 if (i == &stg_MSG_NULL_info) {
526 unlockClosure((StgClosure*)msg,i);
530 throwToSingleThreaded(cap, msg->target, msg->exception);
531 source = msg->source;
532 doneWithMsgThrowTo(msg);
533 tryWakeupThread(cap, source);
539 // awakenBlockedExceptionQueue(): Just wake up the whole queue of
540 // blocked exceptions.
543 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
546 const StgInfoTable *i;
549 for (msg = tso->blocked_exceptions; msg != END_BLOCKED_EXCEPTIONS_QUEUE;
550 msg = (MessageThrowTo*)msg->link) {
551 i = lockClosure((StgClosure *)msg);
552 if (i != &stg_MSG_NULL_info) {
553 source = msg->source;
554 doneWithMsgThrowTo(msg);
555 tryWakeupThread(cap, source);
557 unlockClosure((StgClosure *)msg,i);
560 tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
563 /* -----------------------------------------------------------------------------
564 Remove a thread from blocking queues.
566 This is for use when we raise an exception in another thread, which
569 Precondition: we have exclusive access to the TSO, via the same set
570 of conditions as throwToSingleThreaded() (c.f.).
571 -------------------------------------------------------------------------- */
574 removeFromMVarBlockedQueue (StgTSO *tso)
576 StgMVar *mvar = (StgMVar*)tso->block_info.closure;
577 StgMVarTSOQueue *q = (StgMVarTSOQueue*)tso->_link;
579 if (q == (StgMVarTSOQueue*)END_TSO_QUEUE) {
580 // already removed from this MVar
584 // Assume the MVar is locked. (not assertable; sometimes it isn't
585 // actually WHITEHOLE'd).
587 // We want to remove the MVAR_TSO_QUEUE object from the queue. It
588 // isn't doubly-linked so we can't actually remove it; instead we
589 // just overwrite it with an IND if possible and let the GC short
590 // it out. However, we have to be careful to maintain the deque
593 if (mvar->head == q) {
594 mvar->head = q->link;
595 OVERWRITE_INFO(q, &stg_IND_info);
596 if (mvar->tail == q) {
597 mvar->tail = (StgMVarTSOQueue*)END_TSO_QUEUE;
600 else if (mvar->tail == q) {
601 // we can't replace it with an IND in this case, because then
602 // we lose the tail pointer when the GC shorts out the IND.
603 // So we use MSG_NULL as a kind of non-dupable indirection;
604 // these are ignored by takeMVar/putMVar.
605 OVERWRITE_INFO(q, &stg_MSG_NULL_info);
608 OVERWRITE_INFO(q, &stg_IND_info);
611 // revoke the MVar operation
612 tso->_link = END_TSO_QUEUE;
616 removeFromQueues(Capability *cap, StgTSO *tso)
618 switch (tso->why_blocked) {
621 case ThreadMigrating:
625 // Be careful: nothing to do here! We tell the scheduler that the
626 // thread is runnable and we leave it to the stack-walking code to
627 // abort the transaction while unwinding the stack. We should
628 // perhaps have a debugging test to make sure that this really
629 // happens and that the 'zombie' transaction does not get
634 removeFromMVarBlockedQueue(tso);
637 case BlockedOnBlackHole:
641 case BlockedOnMsgThrowTo:
643 MessageThrowTo *m = tso->block_info.throwto;
644 // The message is locked by us, unless we got here via
645 // deleteAllThreads(), in which case we own all the
647 // ASSERT(m->header.info == &stg_WHITEHOLE_info);
649 // unlock and revoke it at the same time
650 doneWithMsgThrowTo(m);
654 #if !defined(THREADED_RTS)
657 #if defined(mingw32_HOST_OS)
658 case BlockedOnDoProc:
660 removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
661 #if defined(mingw32_HOST_OS)
662 /* (Cooperatively) signal that the worker thread should abort
665 abandonWorkRequest(tso->block_info.async_result->reqID);
670 removeThreadFromQueue(cap, &sleeping_queue, tso);
675 barf("removeFromQueues: %d", tso->why_blocked);
679 tso->why_blocked = NotBlocked;
680 appendToRunQueue(cap, tso);
683 /* -----------------------------------------------------------------------------
686 * The following function implements the magic for raising an
687 * asynchronous exception in an existing thread.
689 * We first remove the thread from any queue on which it might be
690 * blocked. The possible blockages are MVARs, BLOCKING_QUEUESs, and
691 * TSO blocked_exception queues.
693 * We strip the stack down to the innermost CATCH_FRAME, building
694 * thunks in the heap for all the active computations, so they can
695 * be restarted if necessary. When we reach a CATCH_FRAME, we build
696 * an application of the handler to the exception, and push it on
697 * the top of the stack.
699 * How exactly do we save all the active computations? We create an
700 * AP_STACK for every UpdateFrame on the stack. Entering one of these
701 * AP_STACKs pushes everything from the corresponding update frame
702 * upwards onto the stack. (Actually, it pushes everything up to the
703 * next update frame plus a pointer to the next AP_STACK object.
704 * Entering the next AP_STACK object pushes more onto the stack until we
705 * reach the last AP_STACK object - at which point the stack should look
706 * exactly as it did when we killed the TSO and we can continue
707 * execution by entering the closure on top of the stack.
709 * We can also kill a thread entirely - this happens if either (a) the
710 * exception passed to raiseAsync is NULL, or (b) there's no
711 * CATCH_FRAME on the stack. In either case, we strip the entire
712 * stack and replace the thread with a zombie.
714 * ToDo: in THREADED_RTS mode, this function is only safe if either
715 * (a) we hold all the Capabilities (eg. in GC, or if there is only
716 * one Capability), or (b) we own the Capability that the TSO is
717 * currently blocked on or on the run queue of.
719 * -------------------------------------------------------------------------- */
722 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
723 rtsBool stop_at_atomically, StgUpdateFrame *stop_here)
725 StgRetInfoTable *info;
731 debugTraceCap(DEBUG_sched, cap,
732 "raising exception in thread %ld.", (long)tso->id);
734 #if defined(PROFILING)
736 * Debugging tool: on raising an exception, show where we are.
737 * See also Exception.cmm:stg_raisezh.
738 * This wasn't done for asynchronous exceptions originally; see #1450
740 if (RtsFlags.ProfFlags.showCCSOnException)
742 fprintCCS_stderr(tso->prof.CCCS);
745 // ASSUMES: the thread is not already complete or dead
746 // Upper layers should deal with that.
747 ASSERT(tso->what_next != ThreadComplete &&
748 tso->what_next != ThreadKilled);
750 // only if we own this TSO (except that deleteThread() calls this
751 ASSERT(tso->cap == cap);
753 stack = tso->stackobj;
755 // mark it dirty; we're about to change its stack.
757 dirty_STACK(cap, stack);
761 if (stop_here != NULL) {
762 updatee = stop_here->updatee;
767 // The stack freezing code assumes there's a closure pointer on
768 // the top of the stack, so we have to arrange that this is the case...
770 if (sp[0] == (W_)&stg_enter_info) {
774 sp[0] = (W_)&stg_dummy_ret_closure;
778 while (stop_here == NULL || frame < (StgPtr)stop_here) {
780 // 1. Let the top of the stack be the "current closure"
782 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
785 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
786 // current closure applied to the chunk of stack up to (but not
787 // including) the update frame. This closure becomes the "current
788 // closure". Go back to step 2.
790 // 4. If it's a CATCH_FRAME, then leave the exception handler on
791 // top of the stack applied to the exception.
793 // 5. If it's a STOP_FRAME, then kill the thread.
795 // 6. If it's an UNDERFLOW_FRAME, then continue with the next
798 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
801 info = get_ret_itbl((StgClosure *)frame);
803 switch (info->i.type) {
810 // First build an AP_STACK consisting of the stack chunk above the
811 // current update frame, with the top word on the stack as the
814 words = frame - sp - 1;
815 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
818 ap->fun = (StgClosure *)sp[0];
820 for(i=0; i < (nat)words; ++i) {
821 ap->payload[i] = (StgClosure *)*sp++;
824 SET_HDR(ap,&stg_AP_STACK_info,
825 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
826 TICK_ALLOC_UP_THK(words+1,0);
828 //IF_DEBUG(scheduler,
829 // debugBelch("sched: Updating ");
830 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
831 // debugBelch(" with ");
832 // printObj((StgClosure *)ap);
835 if (((StgUpdateFrame *)frame)->updatee == updatee) {
836 // If this update frame points to the same closure as
837 // the update frame further down the stack
838 // (stop_here), then don't perform the update. We
839 // want to keep the blackhole in this case, so we can
840 // detect and report the loop (#2783).
841 ap = (StgAP_STACK*)updatee;
843 // Perform the update
844 // TODO: this may waste some work, if the thunk has
845 // already been updated by another thread.
846 updateThunk(cap, tso,
847 ((StgUpdateFrame *)frame)->updatee, (StgClosure *)ap);
850 sp += sizeofW(StgUpdateFrame) - 1;
851 sp[0] = (W_)ap; // push onto stack
853 continue; //no need to bump frame
856 case UNDERFLOW_FRAME:
861 // First build an AP_STACK consisting of the stack chunk above the
862 // current update frame, with the top word on the stack as the
865 words = frame - sp - 1;
866 ap = (StgAP_STACK *)allocate(cap,AP_STACK_sizeW(words));
869 ap->fun = (StgClosure *)sp[0];
871 for(i=0; i < (nat)words; ++i) {
872 ap->payload[i] = (StgClosure *)*sp++;
875 SET_HDR(ap,&stg_AP_STACK_NOUPD_info,
876 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
877 TICK_ALLOC_SE_THK(words+1,0);
880 threadStackUnderflow(cap,tso);
881 stack = tso->stackobj;
892 // We've stripped the entire stack, the thread is now dead.
893 tso->what_next = ThreadKilled;
894 stack->sp = frame + sizeofW(StgStopFrame);
899 // If we find a CATCH_FRAME, and we've got an exception to raise,
900 // then build the THUNK raise(exception), and leave it on
901 // top of the CATCH_FRAME ready to enter.
904 StgCatchFrame *cf = (StgCatchFrame *)frame;
907 if (exception == NULL) break;
909 // we've got an exception to raise, so let's pass it to the
910 // handler in this frame.
912 raise = (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
913 TICK_ALLOC_SE_THK(1,0);
914 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
915 raise->payload[0] = exception;
917 // throw away the stack from Sp up to the CATCH_FRAME.
921 /* Ensure that async excpetions are blocked now, so we don't get
922 * a surprise exception before we get around to executing the
925 tso->flags |= TSO_BLOCKEX;
926 if ((cf->exceptions_blocked & TSO_INTERRUPTIBLE) == 0) {
927 tso->flags &= ~TSO_INTERRUPTIBLE;
929 tso->flags |= TSO_INTERRUPTIBLE;
932 /* Put the newly-built THUNK on top of the stack, ready to execute
933 * when the thread restarts.
936 sp[-1] = (W_)&stg_enter_info;
938 tso->what_next = ThreadRunGHC;
942 case ATOMICALLY_FRAME:
943 if (stop_at_atomically) {
944 ASSERT(tso->trec->enclosing_trec == NO_TREC);
945 stmCondemnTransaction(cap, tso -> trec);
946 stack->sp = frame - 2;
947 // The ATOMICALLY_FRAME expects to be returned a
948 // result from the transaction, which it stores in the
949 // stack frame. Hence we arrange to return a dummy
950 // result, so that the GC doesn't get upset (#3578).
951 // Perhaps a better way would be to have a different
952 // ATOMICALLY_FRAME instance for condemned
953 // transactions, but I don't fully understand the
954 // interaction with STM invariants.
955 stack->sp[1] = (W_)&stg_NO_TREC_closure;
956 stack->sp[0] = (W_)&stg_gc_unpt_r1_info;
957 tso->what_next = ThreadRunGHC;
960 // Not stop_at_atomically... fall through and abort the
963 case CATCH_STM_FRAME:
964 case CATCH_RETRY_FRAME:
965 // IF we find an ATOMICALLY_FRAME then we abort the
966 // current transaction and propagate the exception. In
967 // this case (unlike ordinary exceptions) we do not care
968 // whether the transaction is valid or not because its
969 // possible validity cannot have caused the exception
970 // and will not be visible after the abort.
973 StgTRecHeader *trec = tso -> trec;
974 StgTRecHeader *outer = trec -> enclosing_trec;
975 debugTraceCap(DEBUG_stm, cap,
976 "found atomically block delivering async exception");
977 stmAbortTransaction(cap, trec);
978 stmFreeAbortedTRec(cap, trec);
987 // move on to the next stack frame
988 frame += stack_frame_sizeW((StgClosure *)frame);
992 IF_DEBUG(sanity, checkTSO(tso));
995 if (tso->why_blocked != NotBlocked) {
996 tso->why_blocked = NotBlocked;
997 appendToRunQueue(cap,tso);