1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
13 #include "RaiseAsync.h"
21 static void raiseAsync (Capability *cap,
23 StgClosure *exception,
24 rtsBool stop_at_atomically,
27 static void removeFromQueues(Capability *cap, StgTSO *tso);
29 static void blockedThrowTo (StgTSO *source, StgTSO *target);
31 static void performBlockedException (Capability *cap,
32 StgTSO *source, StgTSO *target);
34 /* -----------------------------------------------------------------------------
37 This version of throwTo is safe to use if and only if one of the
42 - all the other threads in the system are stopped (eg. during GC).
44 - we surely own the target TSO (eg. we just took it from the
45 run queue of the current capability, or we are running it).
47 It doesn't cater for blocking the source thread until the exception
49 -------------------------------------------------------------------------- */
52 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
54 throwToSingleThreaded_(cap, tso, exception, rtsFalse, NULL);
58 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
59 rtsBool stop_at_atomically, StgPtr stop_here)
61 // Thread already dead?
62 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
66 // Remove it from any blocking queues
67 removeFromQueues(cap,tso);
69 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
73 suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here)
75 // Thread already dead?
76 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
80 // Remove it from any blocking queues
81 removeFromQueues(cap,tso);
83 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
86 /* -----------------------------------------------------------------------------
89 This function may be used to throw an exception from one thread to
90 another, during the course of normal execution. This is a tricky
91 task: the target thread might be running on another CPU, or it
92 may be blocked and could be woken up at any point by another CPU.
93 We have some delicate synchronisation to do.
95 There is a completely safe fallback scheme: it is always possible
96 to just block the source TSO on the target TSO's blocked_exceptions
97 queue. This queue is locked using lockTSO()/unlockTSO(). It is
98 checked at regular intervals: before and after running a thread
99 (schedule() and threadPaused() respectively), and just before GC
100 (scheduleDoGC()). Activating a thread on this queue should be done
101 using maybePerformBlockedException(): this is done in the context
102 of the target thread, so the exception can be raised eagerly.
104 This fallback scheme works even if the target thread is complete or
105 killed: scheduleDoGC() will discover the blocked thread before the
108 Blocking the source thread on the target thread's blocked_exception
109 queue is also employed when the target thread is currently blocking
110 exceptions (ie. inside Control.Exception.block).
112 We could use the safe fallback scheme exclusively, but that
113 wouldn't be ideal: most calls to throwTo would block immediately,
114 possibly until the next GC, which might require the deadlock
115 detection mechanism to kick in. So we try to provide promptness
118 We can promptly deliver the exception if the target thread is:
120 - runnable, on the same Capability as the source thread (because
121 we own the run queue and therefore the target thread).
123 - blocked, and we can obtain exclusive access to it. Obtaining
124 exclusive access to the thread depends on how it is blocked.
126 We must also be careful to not trip over threadStackOverflow(),
127 which might be moving the TSO to enlarge its stack.
128 lockTSO()/unlockTSO() are used here too.
132 THROWTO_SUCCESS exception was raised, ok to continue
134 THROWTO_BLOCKED exception was not raised; block the source
135 thread then call throwToReleaseTarget() when
136 the source thread is properly tidied away.
138 -------------------------------------------------------------------------- */
141 throwTo (Capability *cap, // the Capability we hold
142 StgTSO *source, // the TSO sending the exception
143 StgTSO *target, // the TSO receiving the exception
144 StgClosure *exception, // the exception closure
145 /*[out]*/ void **out USED_IF_THREADS)
149 // follow ThreadRelocated links in the target first
150 while (target->what_next == ThreadRelocated) {
151 target = target->link;
152 // No, it might be a WHITEHOLE:
153 // ASSERT(get_itbl(target)->type == TSO);
156 debugTrace(DEBUG_sched, "throwTo: from thread %d to thread %d",
157 source->id, target->id);
160 if (traceClass(DEBUG_sched)) {
161 debugTraceBegin("throwTo: target");
162 printThreadStatus(target);
169 debugTrace(DEBUG_sched, "throwTo: retrying...");
172 // Thread already dead?
173 if (target->what_next == ThreadComplete
174 || target->what_next == ThreadKilled) {
175 return THROWTO_SUCCESS;
178 status = target->why_blocked;
182 /* if status==NotBlocked, and target->cap == cap, then
183 we own this TSO and can raise the exception.
185 How do we establish this condition? Very carefully.
188 P = (status == NotBlocked)
189 Q = (tso->cap == cap)
191 Now, if P & Q are true, then the TSO is locked and owned by
192 this capability. No other OS thread can steal it.
194 If P==0 and Q==1: the TSO is blocked, but attached to this
195 capabilty, and it can be stolen by another capability.
197 If P==1 and Q==0: the TSO is runnable on another
198 capability. At any time, the TSO may change from runnable
199 to blocked and vice versa, while it remains owned by
202 Suppose we test like this:
208 this is defeated by another capability stealing a blocked
209 TSO from us to wake it up (Schedule.c:unblockOne()). The
210 other thread is doing
215 assuming arbitrary reordering, we could see this
225 so we need a memory barrier:
232 this avoids the problematic case. There are other cases
233 to consider, but this is the tricky one.
235 Note that we must be sure that unblockOne() does the
236 writes in the correct order: Q before P. The memory
237 barrier ensures that if we have seen the write to P, we
238 have also seen the write to Q.
241 Capability *target_cap;
244 target_cap = target->cap;
245 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
246 // It's on our run queue and not blocking exceptions
247 raiseAsync(cap, target, exception, rtsFalse, NULL);
248 return THROWTO_SUCCESS;
250 // Otherwise, just block on the blocked_exceptions queue
251 // of the target thread. The queue will get looked at
252 // soon enough: it is checked before and after running a
253 // thread, and during GC.
256 // Avoid race with threadStackOverflow, which may have
257 // just moved this TSO.
258 if (target->what_next == ThreadRelocated) {
260 target = target->link;
263 blockedThrowTo(source,target);
265 return THROWTO_BLOCKED;
272 To establish ownership of this TSO, we need to acquire a
273 lock on the MVar that it is blocked on.
276 StgInfoTable *info USED_IF_THREADS;
278 mvar = (StgMVar *)target->block_info.closure;
280 // ASSUMPTION: tso->block_info must always point to a
281 // closure. In the threaded RTS it does.
282 if (get_itbl(mvar)->type != MVAR) goto retry;
284 info = lockClosure((StgClosure *)mvar);
286 if (target->what_next == ThreadRelocated) {
287 target = target->link;
288 unlockClosure((StgClosure *)mvar,info);
291 // we have the MVar, let's check whether the thread
292 // is still blocked on the same MVar.
293 if (target->why_blocked != BlockedOnMVar
294 || (StgMVar *)target->block_info.closure != mvar) {
295 unlockClosure((StgClosure *)mvar, info);
299 if ((target->flags & TSO_BLOCKEX) &&
300 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
301 lockClosure((StgClosure *)target);
302 blockedThrowTo(source,target);
303 unlockClosure((StgClosure *)mvar, info);
305 return THROWTO_BLOCKED; // caller releases TSO
307 removeThreadFromMVarQueue(mvar, target);
308 raiseAsync(cap, target, exception, rtsFalse, NULL);
309 unblockOne(cap, target);
310 unlockClosure((StgClosure *)mvar, info);
311 return THROWTO_SUCCESS;
315 case BlockedOnBlackHole:
317 ACQUIRE_LOCK(&sched_mutex);
318 // double checking the status after the memory barrier:
319 if (target->why_blocked != BlockedOnBlackHole) {
320 RELEASE_LOCK(&sched_mutex);
324 if (target->flags & TSO_BLOCKEX) {
326 blockedThrowTo(source,target);
327 RELEASE_LOCK(&sched_mutex);
329 return THROWTO_BLOCKED; // caller releases TSO
331 removeThreadFromQueue(&blackhole_queue, target);
332 raiseAsync(cap, target, exception, rtsFalse, NULL);
333 unblockOne(cap, target);
334 RELEASE_LOCK(&sched_mutex);
335 return THROWTO_SUCCESS;
339 case BlockedOnException:
345 To obtain exclusive access to a BlockedOnException thread,
346 we must call lockClosure() on the TSO on which it is blocked.
347 Since the TSO might change underneath our feet, after we
348 call lockClosure() we must check that
350 (a) the closure we locked is actually a TSO
351 (b) the original thread is still BlockedOnException,
352 (c) the original thread is still blocked on the TSO we locked
353 and (d) the target thread has not been relocated.
355 We synchronise with threadStackOverflow() (which relocates
356 threads) using lockClosure()/unlockClosure().
358 target2 = target->block_info.tso;
360 info = lockClosure((StgClosure *)target2);
361 if (info != &stg_TSO_info) {
362 unlockClosure((StgClosure *)target2, info);
365 if (target->what_next == ThreadRelocated) {
366 target = target->link;
370 if (target2->what_next == ThreadRelocated) {
371 target->block_info.tso = target2->link;
375 if (target->why_blocked != BlockedOnException
376 || target->block_info.tso != target2) {
382 Now we have exclusive rights to the target TSO...
384 If it is blocking exceptions, add the source TSO to its
385 blocked_exceptions queue. Otherwise, raise the exception.
387 if ((target->flags & TSO_BLOCKEX) &&
388 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
390 blockedThrowTo(source,target);
393 return THROWTO_BLOCKED;
395 removeThreadFromQueue(&target2->blocked_exceptions, target);
396 raiseAsync(cap, target, exception, rtsFalse, NULL);
397 unblockOne(cap, target);
399 return THROWTO_SUCCESS;
407 case BlockedOnCCall_NoUnblockExc:
408 // I don't think it's possible to acquire ownership of a
409 // BlockedOnCCall thread. We just assume that the target
410 // thread is blocking exceptions, and block on its
411 // blocked_exception queue.
413 blockedThrowTo(source,target);
415 return THROWTO_BLOCKED;
417 #ifndef THREADEDED_RTS
421 if ((target->flags & TSO_BLOCKEX) &&
422 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
423 blockedThrowTo(source,target);
424 return THROWTO_BLOCKED;
426 removeFromQueues(cap,target);
427 raiseAsync(cap, target, exception, rtsFalse, NULL);
428 return THROWTO_SUCCESS;
433 barf("throwTo: unrecognised why_blocked value");
438 // Block a TSO on another TSO's blocked_exceptions queue.
439 // Precondition: we hold an exclusive lock on the target TSO (this is
440 // complex to achieve as there's no single lock on a TSO; see
443 blockedThrowTo (StgTSO *source, StgTSO *target)
445 debugTrace(DEBUG_sched, "throwTo: blocking on thread %d", target->id);
446 source->link = target->blocked_exceptions;
447 target->blocked_exceptions = source;
448 dirtyTSO(target); // we modified the blocked_exceptions queue
450 source->block_info.tso = target;
451 wb(); // throwTo_exception *must* be visible if BlockedOnException is.
452 source->why_blocked = BlockedOnException;
458 throwToReleaseTarget (void *tso)
460 unlockTSO((StgTSO *)tso);
464 /* -----------------------------------------------------------------------------
465 Waking up threads blocked in throwTo
467 There are two ways to do this: maybePerformBlockedException() will
468 perform the throwTo() for the thread at the head of the queue
469 immediately, and leave the other threads on the queue.
470 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
471 before raising an exception.
473 awakenBlockedExceptionQueue() will wake up all the threads in the
474 queue, but not perform any throwTo() immediately. This might be
475 more appropriate when the target thread is the one actually running
477 -------------------------------------------------------------------------- */
480 maybePerformBlockedException (Capability *cap, StgTSO *tso)
484 if (tso->blocked_exceptions != END_TSO_QUEUE
485 && ((tso->flags & TSO_BLOCKEX) == 0
486 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
488 // Lock the TSO, this gives us exclusive access to the queue
491 // Check the queue again; it might have changed before we
493 if (tso->blocked_exceptions == END_TSO_QUEUE) {
498 // We unblock just the first thread on the queue, and perform
499 // its throw immediately.
500 source = tso->blocked_exceptions;
501 performBlockedException(cap, source, tso);
502 tso->blocked_exceptions = unblockOne_(cap, source,
503 rtsFalse/*no migrate*/);
509 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
511 if (tso->blocked_exceptions != END_TSO_QUEUE) {
513 awakenBlockedQueue(cap, tso->blocked_exceptions);
514 tso->blocked_exceptions = END_TSO_QUEUE;
520 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
522 StgClosure *exception;
524 ASSERT(source->why_blocked == BlockedOnException);
525 ASSERT(source->block_info.tso->id == target->id);
526 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
527 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
528 // check ids not pointers, because the thread might be relocated
530 exception = (StgClosure *)source->sp[2];
531 throwToSingleThreaded(cap, target, exception);
535 /* -----------------------------------------------------------------------------
536 Remove a thread from blocking queues.
538 This is for use when we raise an exception in another thread, which
540 This has nothing to do with the UnblockThread event in GranSim. -- HWL
541 -------------------------------------------------------------------------- */
543 #if defined(GRAN) || defined(PARALLEL_HASKELL)
545 NB: only the type of the blocking queue is different in GranSim and GUM
546 the operations on the queue-elements are the same
547 long live polymorphism!
549 Locks: sched_mutex is held upon entry and exit.
553 removeFromQueues(Capability *cap, StgTSO *tso)
555 StgBlockingQueueElement *t, **last;
557 switch (tso->why_blocked) {
560 return; /* not blocked */
563 // Be careful: nothing to do here! We tell the scheduler that the thread
564 // is runnable and we leave it to the stack-walking code to abort the
565 // transaction while unwinding the stack. We should perhaps have a debugging
566 // test to make sure that this really happens and that the 'zombie' transaction
567 // does not get committed.
571 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
573 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
574 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
576 last = (StgBlockingQueueElement **)&mvar->head;
577 for (t = (StgBlockingQueueElement *)mvar->head;
579 last = &t->link, last_tso = t, t = t->link) {
580 if (t == (StgBlockingQueueElement *)tso) {
581 *last = (StgBlockingQueueElement *)tso->link;
582 if (mvar->tail == tso) {
583 mvar->tail = (StgTSO *)last_tso;
588 barf("removeFromQueues (MVAR): TSO not found");
591 case BlockedOnBlackHole:
592 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
594 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
596 last = &bq->blocking_queue;
597 for (t = bq->blocking_queue;
599 last = &t->link, t = t->link) {
600 if (t == (StgBlockingQueueElement *)tso) {
601 *last = (StgBlockingQueueElement *)tso->link;
605 barf("removeFromQueues (BLACKHOLE): TSO not found");
608 case BlockedOnException:
610 StgTSO *target = tso->block_info.tso;
612 ASSERT(get_itbl(target)->type == TSO);
614 while (target->what_next == ThreadRelocated) {
615 target = target2->link;
616 ASSERT(get_itbl(target)->type == TSO);
619 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
620 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
622 last = &t->link, t = t->link) {
623 ASSERT(get_itbl(t)->type == TSO);
624 if (t == (StgBlockingQueueElement *)tso) {
625 *last = (StgBlockingQueueElement *)tso->link;
629 barf("removeFromQueues (Exception): TSO not found");
634 #if defined(mingw32_HOST_OS)
635 case BlockedOnDoProc:
638 /* take TSO off blocked_queue */
639 StgBlockingQueueElement *prev = NULL;
640 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
641 prev = t, t = t->link) {
642 if (t == (StgBlockingQueueElement *)tso) {
644 blocked_queue_hd = (StgTSO *)t->link;
645 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
646 blocked_queue_tl = END_TSO_QUEUE;
649 prev->link = t->link;
650 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
651 blocked_queue_tl = (StgTSO *)prev;
654 #if defined(mingw32_HOST_OS)
655 /* (Cooperatively) signal that the worker thread should abort
658 abandonWorkRequest(tso->block_info.async_result->reqID);
663 barf("removeFromQueues (I/O): TSO not found");
668 /* take TSO off sleeping_queue */
669 StgBlockingQueueElement *prev = NULL;
670 for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE;
671 prev = t, t = t->link) {
672 if (t == (StgBlockingQueueElement *)tso) {
674 sleeping_queue = (StgTSO *)t->link;
676 prev->link = t->link;
681 barf("removeFromQueues (delay): TSO not found");
685 barf("removeFromQueues");
689 tso->link = END_TSO_QUEUE;
690 tso->why_blocked = NotBlocked;
691 tso->block_info.closure = NULL;
692 pushOnRunQueue(cap,tso);
696 removeFromQueues(Capability *cap, StgTSO *tso)
698 switch (tso->why_blocked) {
704 // Be careful: nothing to do here! We tell the scheduler that the
705 // thread is runnable and we leave it to the stack-walking code to
706 // abort the transaction while unwinding the stack. We should
707 // perhaps have a debugging test to make sure that this really
708 // happens and that the 'zombie' transaction does not get
713 removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
716 case BlockedOnBlackHole:
717 removeThreadFromQueue(&blackhole_queue, tso);
720 case BlockedOnException:
722 StgTSO *target = tso->block_info.tso;
724 // NO: when called by threadPaused(), we probably have this
725 // TSO already locked (WHITEHOLEd) because we just placed
726 // ourselves on its queue.
727 // ASSERT(get_itbl(target)->type == TSO);
729 while (target->what_next == ThreadRelocated) {
730 target = target->link;
733 removeThreadFromQueue(&target->blocked_exceptions, tso);
737 #if !defined(THREADED_RTS)
740 #if defined(mingw32_HOST_OS)
741 case BlockedOnDoProc:
743 removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
744 #if defined(mingw32_HOST_OS)
745 /* (Cooperatively) signal that the worker thread should abort
748 abandonWorkRequest(tso->block_info.async_result->reqID);
753 removeThreadFromQueue(&sleeping_queue, tso);
758 barf("removeFromQueues");
762 tso->link = END_TSO_QUEUE;
763 tso->why_blocked = NotBlocked;
764 tso->block_info.closure = NULL;
765 appendToRunQueue(cap,tso);
767 // We might have just migrated this TSO to our Capability:
769 tso->bound->cap = cap;
775 /* -----------------------------------------------------------------------------
778 * The following function implements the magic for raising an
779 * asynchronous exception in an existing thread.
781 * We first remove the thread from any queue on which it might be
782 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
784 * We strip the stack down to the innermost CATCH_FRAME, building
785 * thunks in the heap for all the active computations, so they can
786 * be restarted if necessary. When we reach a CATCH_FRAME, we build
787 * an application of the handler to the exception, and push it on
788 * the top of the stack.
790 * How exactly do we save all the active computations? We create an
791 * AP_STACK for every UpdateFrame on the stack. Entering one of these
792 * AP_STACKs pushes everything from the corresponding update frame
793 * upwards onto the stack. (Actually, it pushes everything up to the
794 * next update frame plus a pointer to the next AP_STACK object.
795 * Entering the next AP_STACK object pushes more onto the stack until we
796 * reach the last AP_STACK object - at which point the stack should look
797 * exactly as it did when we killed the TSO and we can continue
798 * execution by entering the closure on top of the stack.
800 * We can also kill a thread entirely - this happens if either (a) the
801 * exception passed to raiseAsync is NULL, or (b) there's no
802 * CATCH_FRAME on the stack. In either case, we strip the entire
803 * stack and replace the thread with a zombie.
805 * ToDo: in THREADED_RTS mode, this function is only safe if either
806 * (a) we hold all the Capabilities (eg. in GC, or if there is only
807 * one Capability), or (b) we own the Capability that the TSO is
808 * currently blocked on or on the run queue of.
810 * -------------------------------------------------------------------------- */
813 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
814 rtsBool stop_at_atomically, StgPtr stop_here)
816 StgRetInfoTable *info;
820 debugTrace(DEBUG_sched,
821 "raising exception in thread %ld.", (long)tso->id);
823 // mark it dirty; we're about to change its stack.
828 // ASSUMES: the thread is not already complete or dead. Upper
829 // layers should deal with that.
830 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
832 // The stack freezing code assumes there's a closure pointer on
833 // the top of the stack, so we have to arrange that this is the case...
835 if (sp[0] == (W_)&stg_enter_info) {
839 sp[0] = (W_)&stg_dummy_ret_closure;
843 while (stop_here == NULL || frame < stop_here) {
845 // 1. Let the top of the stack be the "current closure"
847 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
850 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
851 // current closure applied to the chunk of stack up to (but not
852 // including) the update frame. This closure becomes the "current
853 // closure". Go back to step 2.
855 // 4. If it's a CATCH_FRAME, then leave the exception handler on
856 // top of the stack applied to the exception.
858 // 5. If it's a STOP_FRAME, then kill the thread.
860 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
863 info = get_ret_itbl((StgClosure *)frame);
865 switch (info->i.type) {
872 // First build an AP_STACK consisting of the stack chunk above the
873 // current update frame, with the top word on the stack as the
876 words = frame - sp - 1;
877 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
880 ap->fun = (StgClosure *)sp[0];
882 for(i=0; i < (nat)words; ++i) {
883 ap->payload[i] = (StgClosure *)*sp++;
886 SET_HDR(ap,&stg_AP_STACK_info,
887 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
888 TICK_ALLOC_UP_THK(words+1,0);
890 //IF_DEBUG(scheduler,
891 // debugBelch("sched: Updating ");
892 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
893 // debugBelch(" with ");
894 // printObj((StgClosure *)ap);
897 // Replace the updatee with an indirection
899 // Warning: if we're in a loop, more than one update frame on
900 // the stack may point to the same object. Be careful not to
901 // overwrite an IND_OLDGEN in this case, because we'll screw
902 // up the mutable lists. To be on the safe side, don't
903 // overwrite any kind of indirection at all. See also
904 // threadSqueezeStack in GC.c, where we have to make a similar
907 if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) {
908 // revert the black hole
909 UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
912 sp += sizeofW(StgUpdateFrame) - 1;
913 sp[0] = (W_)ap; // push onto stack
915 continue; //no need to bump frame
919 // We've stripped the entire stack, the thread is now dead.
920 tso->what_next = ThreadKilled;
921 tso->sp = frame + sizeofW(StgStopFrame);
925 // If we find a CATCH_FRAME, and we've got an exception to raise,
926 // then build the THUNK raise(exception), and leave it on
927 // top of the CATCH_FRAME ready to enter.
931 StgCatchFrame *cf = (StgCatchFrame *)frame;
935 if (exception == NULL) break;
937 // we've got an exception to raise, so let's pass it to the
938 // handler in this frame.
940 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
941 TICK_ALLOC_SE_THK(1,0);
942 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
943 raise->payload[0] = exception;
945 // throw away the stack from Sp up to the CATCH_FRAME.
949 /* Ensure that async excpetions are blocked now, so we don't get
950 * a surprise exception before we get around to executing the
953 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
955 /* Put the newly-built THUNK on top of the stack, ready to execute
956 * when the thread restarts.
959 sp[-1] = (W_)&stg_enter_info;
961 tso->what_next = ThreadRunGHC;
962 IF_DEBUG(sanity, checkTSO(tso));
966 case ATOMICALLY_FRAME:
967 if (stop_at_atomically) {
968 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
969 stmCondemnTransaction(cap, tso -> trec);
973 // R1 is not a register: the return convention for IO in
974 // this case puts the return value on the stack, so we
975 // need to set up the stack to return to the atomically
978 tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
979 tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
981 tso->what_next = ThreadRunGHC;
984 // Not stop_at_atomically... fall through and abort the
987 case CATCH_RETRY_FRAME:
988 // IF we find an ATOMICALLY_FRAME then we abort the
989 // current transaction and propagate the exception. In
990 // this case (unlike ordinary exceptions) we do not care
991 // whether the transaction is valid or not because its
992 // possible validity cannot have caused the exception
993 // and will not be visible after the abort.
994 debugTrace(DEBUG_stm,
995 "found atomically block delivering async exception");
997 StgTRecHeader *trec = tso -> trec;
998 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
999 stmAbortTransaction(cap, trec);
1000 tso -> trec = outer;
1007 // move on to the next stack frame
1008 frame += stack_frame_sizeW((StgClosure *)frame);
1011 // if we got here, then we stopped at stop_here
1012 ASSERT(stop_here != NULL);