1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
13 #include "RaiseAsync.h"
21 static void raiseAsync (Capability *cap,
23 StgClosure *exception,
24 rtsBool stop_at_atomically,
27 static void removeFromQueues(Capability *cap, StgTSO *tso);
29 static void blockedThrowTo (StgTSO *source, StgTSO *target);
31 static void performBlockedException (Capability *cap,
32 StgTSO *source, StgTSO *target);
34 /* -----------------------------------------------------------------------------
37 This version of throwTo is safe to use if and only if one of the
42 - all the other threads in the system are stopped (eg. during GC).
44 - we surely own the target TSO (eg. we just took it from the
45 run queue of the current capability, or we are running it).
47 It doesn't cater for blocking the source thread until the exception
49 -------------------------------------------------------------------------- */
52 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
54 throwToSingleThreaded_(cap, tso, exception, rtsFalse, NULL);
58 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
59 rtsBool stop_at_atomically, StgPtr stop_here)
61 // Thread already dead?
62 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
66 // Remove it from any blocking queues
67 removeFromQueues(cap,tso);
69 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
73 suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here)
75 // Thread already dead?
76 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
80 // Remove it from any blocking queues
81 removeFromQueues(cap,tso);
83 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
86 /* -----------------------------------------------------------------------------
89 This function may be used to throw an exception from one thread to
90 another, during the course of normal execution. This is a tricky
91 task: the target thread might be running on another CPU, or it
92 may be blocked and could be woken up at any point by another CPU.
93 We have some delicate synchronisation to do.
95 There is a completely safe fallback scheme: it is always possible
96 to just block the source TSO on the target TSO's blocked_exceptions
97 queue. This queue is locked using lockTSO()/unlockTSO(). It is
98 checked at regular intervals: before and after running a thread
99 (schedule() and threadPaused() respectively), and just before GC
100 (scheduleDoGC()). Activating a thread on this queue should be done
101 using maybePerformBlockedException(): this is done in the context
102 of the target thread, so the exception can be raised eagerly.
104 This fallback scheme works even if the target thread is complete or
105 killed: scheduleDoGC() will discover the blocked thread before the
108 Blocking the source thread on the target thread's blocked_exception
109 queue is also employed when the target thread is currently blocking
110 exceptions (ie. inside Control.Exception.block).
112 We could use the safe fallback scheme exclusively, but that
113 wouldn't be ideal: most calls to throwTo would block immediately,
114 possibly until the next GC, which might require the deadlock
115 detection mechanism to kick in. So we try to provide promptness
118 We can promptly deliver the exception if the target thread is:
120 - runnable, on the same Capability as the source thread (because
121 we own the run queue and therefore the target thread).
123 - blocked, and we can obtain exclusive access to it. Obtaining
124 exclusive access to the thread depends on how it is blocked.
126 We must also be careful to not trip over threadStackOverflow(),
127 which might be moving the TSO to enlarge its stack.
128 lockTSO()/unlockTSO() are used here too.
132 THROWTO_SUCCESS exception was raised, ok to continue
134 THROWTO_BLOCKED exception was not raised; block the source
135 thread then call throwToReleaseTarget() when
136 the source thread is properly tidied away.
138 -------------------------------------------------------------------------- */
141 throwTo (Capability *cap, // the Capability we hold
142 StgTSO *source, // the TSO sending the exception
143 StgTSO *target, // the TSO receiving the exception
144 StgClosure *exception, // the exception closure
145 /*[out]*/ void **out USED_IF_THREADS)
149 // follow ThreadRelocated links in the target first
150 while (target->what_next == ThreadRelocated) {
151 target = target->link;
152 // No, it might be a WHITEHOLE:
153 // ASSERT(get_itbl(target)->type == TSO);
156 debugTrace(DEBUG_sched, "throwTo: from thread %d to thread %d",
157 source->id, target->id);
160 if (traceClass(DEBUG_sched)) {
161 debugTraceBegin("throwTo: target");
162 printThreadStatus(target);
169 debugTrace(DEBUG_sched, "throwTo: retrying...");
172 // Thread already dead?
173 if (target->what_next == ThreadComplete
174 || target->what_next == ThreadKilled) {
175 return THROWTO_SUCCESS;
178 status = target->why_blocked;
182 /* if status==NotBlocked, and target->cap == cap, then
183 we own this TSO and can raise the exception.
185 How do we establish this condition? Very carefully.
188 P = (status == NotBlocked)
189 Q = (tso->cap == cap)
191 Now, if P & Q are true, then the TSO is locked and owned by
192 this capability. No other OS thread can steal it.
194 If P==0 and Q==1: the TSO is blocked, but attached to this
195 capabilty, and it can be stolen by another capability.
197 If P==1 and Q==0: the TSO is runnable on another
198 capability. At any time, the TSO may change from runnable
199 to blocked and vice versa, while it remains owned by
202 Suppose we test like this:
208 this is defeated by another capability stealing a blocked
209 TSO from us to wake it up (Schedule.c:unblockOne()). The
210 other thread is doing
215 assuming arbitrary reordering, we could see this
225 so we need a memory barrier:
232 this avoids the problematic case. There are other cases
233 to consider, but this is the tricky one.
235 Note that we must be sure that unblockOne() does the
236 writes in the correct order: Q before P. The memory
237 barrier ensures that if we have seen the write to P, we
238 have also seen the write to Q.
241 Capability *target_cap;
244 target_cap = target->cap;
245 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
246 // It's on our run queue and not blocking exceptions
247 raiseAsync(cap, target, exception, rtsFalse, NULL);
248 return THROWTO_SUCCESS;
250 // Otherwise, just block on the blocked_exceptions queue
251 // of the target thread. The queue will get looked at
252 // soon enough: it is checked before and after running a
253 // thread, and during GC.
256 // Avoid race with threadStackOverflow, which may have
257 // just moved this TSO.
258 if (target->what_next == ThreadRelocated) {
260 target = target->link;
263 blockedThrowTo(source,target);
265 return THROWTO_BLOCKED;
272 To establish ownership of this TSO, we need to acquire a
273 lock on the MVar that it is blocked on.
276 StgInfoTable *info USED_IF_THREADS;
278 mvar = (StgMVar *)target->block_info.closure;
280 // ASSUMPTION: tso->block_info must always point to a
281 // closure. In the threaded RTS it does.
282 if (get_itbl(mvar)->type != MVAR) goto retry;
284 info = lockClosure((StgClosure *)mvar);
286 if (target->what_next == ThreadRelocated) {
287 target = target->link;
288 unlockClosure((StgClosure *)mvar,info);
291 // we have the MVar, let's check whether the thread
292 // is still blocked on the same MVar.
293 if (target->why_blocked != BlockedOnMVar
294 || (StgMVar *)target->block_info.closure != mvar) {
295 unlockClosure((StgClosure *)mvar, info);
299 if ((target->flags & TSO_BLOCKEX) &&
300 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
301 lockClosure((StgClosure *)target);
302 blockedThrowTo(source,target);
303 unlockClosure((StgClosure *)mvar, info);
305 return THROWTO_BLOCKED; // caller releases TSO
307 removeThreadFromMVarQueue(mvar, target);
308 raiseAsync(cap, target, exception, rtsFalse, NULL);
309 unblockOne(cap, target);
310 unlockClosure((StgClosure *)mvar, info);
311 return THROWTO_SUCCESS;
315 case BlockedOnBlackHole:
317 ACQUIRE_LOCK(&sched_mutex);
318 // double checking the status after the memory barrier:
319 if (target->why_blocked != BlockedOnBlackHole) {
320 RELEASE_LOCK(&sched_mutex);
324 if (target->flags & TSO_BLOCKEX) {
326 blockedThrowTo(source,target);
327 RELEASE_LOCK(&sched_mutex);
329 return THROWTO_BLOCKED; // caller releases TSO
331 removeThreadFromQueue(&blackhole_queue, target);
332 raiseAsync(cap, target, exception, rtsFalse, NULL);
333 unblockOne(cap, target);
334 RELEASE_LOCK(&sched_mutex);
335 return THROWTO_SUCCESS;
339 case BlockedOnException:
345 To obtain exclusive access to a BlockedOnException thread,
346 we must call lockClosure() on the TSO on which it is blocked.
347 Since the TSO might change underneath our feet, after we
348 call lockClosure() we must check that
350 (a) the closure we locked is actually a TSO
351 (b) the original thread is still BlockedOnException,
352 (c) the original thread is still blocked on the TSO we locked
353 and (d) the target thread has not been relocated.
355 We synchronise with threadStackOverflow() (which relocates
356 threads) using lockClosure()/unlockClosure().
358 target2 = target->block_info.tso;
360 info = lockClosure((StgClosure *)target2);
361 if (info != &stg_TSO_info) {
362 unlockClosure((StgClosure *)target2, info);
365 if (target->what_next == ThreadRelocated) {
366 target = target->link;
370 if (target2->what_next == ThreadRelocated) {
371 target->block_info.tso = target2->link;
375 if (target->why_blocked != BlockedOnException
376 || target->block_info.tso != target2) {
382 Now we have exclusive rights to the target TSO...
384 If it is blocking exceptions, add the source TSO to its
385 blocked_exceptions queue. Otherwise, raise the exception.
387 if ((target->flags & TSO_BLOCKEX) &&
388 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
390 blockedThrowTo(source,target);
393 return THROWTO_BLOCKED;
395 removeThreadFromQueue(&target2->blocked_exceptions, target);
396 raiseAsync(cap, target, exception, rtsFalse, NULL);
397 unblockOne(cap, target);
399 return THROWTO_SUCCESS;
405 // Unblocking BlockedOnSTM threads requires the TSO to be
406 // locked; see STM.c:unpark_tso().
407 if (target->why_blocked != BlockedOnSTM) {
410 if ((target->flags & TSO_BLOCKEX) &&
411 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
412 blockedThrowTo(source,target);
414 return THROWTO_BLOCKED;
416 raiseAsync(cap, target, exception, rtsFalse, NULL);
417 unblockOne(cap, target);
419 return THROWTO_SUCCESS;
423 case BlockedOnCCall_NoUnblockExc:
424 // I don't think it's possible to acquire ownership of a
425 // BlockedOnCCall thread. We just assume that the target
426 // thread is blocking exceptions, and block on its
427 // blocked_exception queue.
429 blockedThrowTo(source,target);
431 return THROWTO_BLOCKED;
433 #ifndef THREADEDED_RTS
437 if ((target->flags & TSO_BLOCKEX) &&
438 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
439 blockedThrowTo(source,target);
440 return THROWTO_BLOCKED;
442 removeFromQueues(cap,target);
443 raiseAsync(cap, target, exception, rtsFalse, NULL);
444 return THROWTO_SUCCESS;
449 barf("throwTo: unrecognised why_blocked value");
454 // Block a TSO on another TSO's blocked_exceptions queue.
455 // Precondition: we hold an exclusive lock on the target TSO (this is
456 // complex to achieve as there's no single lock on a TSO; see
459 blockedThrowTo (StgTSO *source, StgTSO *target)
461 debugTrace(DEBUG_sched, "throwTo: blocking on thread %d", target->id);
462 source->link = target->blocked_exceptions;
463 target->blocked_exceptions = source;
464 dirtyTSO(target); // we modified the blocked_exceptions queue
466 source->block_info.tso = target;
467 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
468 source->why_blocked = BlockedOnException;
474 throwToReleaseTarget (void *tso)
476 unlockTSO((StgTSO *)tso);
480 /* -----------------------------------------------------------------------------
481 Waking up threads blocked in throwTo
483 There are two ways to do this: maybePerformBlockedException() will
484 perform the throwTo() for the thread at the head of the queue
485 immediately, and leave the other threads on the queue.
486 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
487 before raising an exception.
489 awakenBlockedExceptionQueue() will wake up all the threads in the
490 queue, but not perform any throwTo() immediately. This might be
491 more appropriate when the target thread is the one actually running
493 -------------------------------------------------------------------------- */
496 maybePerformBlockedException (Capability *cap, StgTSO *tso)
500 if (tso->blocked_exceptions != END_TSO_QUEUE
501 && ((tso->flags & TSO_BLOCKEX) == 0
502 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
504 // Lock the TSO, this gives us exclusive access to the queue
507 // Check the queue again; it might have changed before we
509 if (tso->blocked_exceptions == END_TSO_QUEUE) {
514 // We unblock just the first thread on the queue, and perform
515 // its throw immediately.
516 source = tso->blocked_exceptions;
517 performBlockedException(cap, source, tso);
518 tso->blocked_exceptions = unblockOne_(cap, source,
519 rtsFalse/*no migrate*/);
525 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
527 if (tso->blocked_exceptions != END_TSO_QUEUE) {
529 awakenBlockedQueue(cap, tso->blocked_exceptions);
530 tso->blocked_exceptions = END_TSO_QUEUE;
536 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
538 StgClosure *exception;
540 ASSERT(source->why_blocked == BlockedOnException);
541 ASSERT(source->block_info.tso->id == target->id);
542 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
543 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
544 // check ids not pointers, because the thread might be relocated
546 exception = (StgClosure *)source->sp[2];
547 throwToSingleThreaded(cap, target, exception);
551 /* -----------------------------------------------------------------------------
552 Remove a thread from blocking queues.
554 This is for use when we raise an exception in another thread, which
556 This has nothing to do with the UnblockThread event in GranSim. -- HWL
557 -------------------------------------------------------------------------- */
559 #if defined(GRAN) || defined(PARALLEL_HASKELL)
561 NB: only the type of the blocking queue is different in GranSim and GUM
562 the operations on the queue-elements are the same
563 long live polymorphism!
565 Locks: sched_mutex is held upon entry and exit.
569 removeFromQueues(Capability *cap, StgTSO *tso)
571 StgBlockingQueueElement *t, **last;
573 switch (tso->why_blocked) {
576 return; /* not blocked */
579 // Be careful: nothing to do here! We tell the scheduler that the thread
580 // is runnable and we leave it to the stack-walking code to abort the
581 // transaction while unwinding the stack. We should perhaps have a debugging
582 // test to make sure that this really happens and that the 'zombie' transaction
583 // does not get committed.
587 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
589 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
590 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
592 last = (StgBlockingQueueElement **)&mvar->head;
593 for (t = (StgBlockingQueueElement *)mvar->head;
595 last = &t->link, last_tso = t, t = t->link) {
596 if (t == (StgBlockingQueueElement *)tso) {
597 *last = (StgBlockingQueueElement *)tso->link;
598 if (mvar->tail == tso) {
599 mvar->tail = (StgTSO *)last_tso;
604 barf("removeFromQueues (MVAR): TSO not found");
607 case BlockedOnBlackHole:
608 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
610 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
612 last = &bq->blocking_queue;
613 for (t = bq->blocking_queue;
615 last = &t->link, t = t->link) {
616 if (t == (StgBlockingQueueElement *)tso) {
617 *last = (StgBlockingQueueElement *)tso->link;
621 barf("removeFromQueues (BLACKHOLE): TSO not found");
624 case BlockedOnException:
626 StgTSO *target = tso->block_info.tso;
628 ASSERT(get_itbl(target)->type == TSO);
630 while (target->what_next == ThreadRelocated) {
631 target = target2->link;
632 ASSERT(get_itbl(target)->type == TSO);
635 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
636 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
638 last = &t->link, t = t->link) {
639 ASSERT(get_itbl(t)->type == TSO);
640 if (t == (StgBlockingQueueElement *)tso) {
641 *last = (StgBlockingQueueElement *)tso->link;
645 barf("removeFromQueues (Exception): TSO not found");
650 #if defined(mingw32_HOST_OS)
651 case BlockedOnDoProc:
654 /* take TSO off blocked_queue */
655 StgBlockingQueueElement *prev = NULL;
656 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
657 prev = t, t = t->link) {
658 if (t == (StgBlockingQueueElement *)tso) {
660 blocked_queue_hd = (StgTSO *)t->link;
661 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
662 blocked_queue_tl = END_TSO_QUEUE;
665 prev->link = t->link;
666 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
667 blocked_queue_tl = (StgTSO *)prev;
670 #if defined(mingw32_HOST_OS)
671 /* (Cooperatively) signal that the worker thread should abort
674 abandonWorkRequest(tso->block_info.async_result->reqID);
679 barf("removeFromQueues (I/O): TSO not found");
684 /* take TSO off sleeping_queue */
685 StgBlockingQueueElement *prev = NULL;
686 for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE;
687 prev = t, t = t->link) {
688 if (t == (StgBlockingQueueElement *)tso) {
690 sleeping_queue = (StgTSO *)t->link;
692 prev->link = t->link;
697 barf("removeFromQueues (delay): TSO not found");
701 barf("removeFromQueues");
705 tso->link = END_TSO_QUEUE;
706 tso->why_blocked = NotBlocked;
707 tso->block_info.closure = NULL;
708 pushOnRunQueue(cap,tso);
712 removeFromQueues(Capability *cap, StgTSO *tso)
714 switch (tso->why_blocked) {
720 // Be careful: nothing to do here! We tell the scheduler that the
721 // thread is runnable and we leave it to the stack-walking code to
722 // abort the transaction while unwinding the stack. We should
723 // perhaps have a debugging test to make sure that this really
724 // happens and that the 'zombie' transaction does not get
729 removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
732 case BlockedOnBlackHole:
733 removeThreadFromQueue(&blackhole_queue, tso);
736 case BlockedOnException:
738 StgTSO *target = tso->block_info.tso;
740 // NO: when called by threadPaused(), we probably have this
741 // TSO already locked (WHITEHOLEd) because we just placed
742 // ourselves on its queue.
743 // ASSERT(get_itbl(target)->type == TSO);
745 while (target->what_next == ThreadRelocated) {
746 target = target->link;
749 removeThreadFromQueue(&target->blocked_exceptions, tso);
753 #if !defined(THREADED_RTS)
756 #if defined(mingw32_HOST_OS)
757 case BlockedOnDoProc:
759 removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
760 #if defined(mingw32_HOST_OS)
761 /* (Cooperatively) signal that the worker thread should abort
764 abandonWorkRequest(tso->block_info.async_result->reqID);
769 removeThreadFromQueue(&sleeping_queue, tso);
774 barf("removeFromQueues");
778 tso->link = END_TSO_QUEUE;
779 tso->why_blocked = NotBlocked;
780 tso->block_info.closure = NULL;
781 appendToRunQueue(cap,tso);
783 // We might have just migrated this TSO to our Capability:
785 tso->bound->cap = cap;
791 /* -----------------------------------------------------------------------------
794 * The following function implements the magic for raising an
795 * asynchronous exception in an existing thread.
797 * We first remove the thread from any queue on which it might be
798 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
800 * We strip the stack down to the innermost CATCH_FRAME, building
801 * thunks in the heap for all the active computations, so they can
802 * be restarted if necessary. When we reach a CATCH_FRAME, we build
803 * an application of the handler to the exception, and push it on
804 * the top of the stack.
806 * How exactly do we save all the active computations? We create an
807 * AP_STACK for every UpdateFrame on the stack. Entering one of these
808 * AP_STACKs pushes everything from the corresponding update frame
809 * upwards onto the stack. (Actually, it pushes everything up to the
810 * next update frame plus a pointer to the next AP_STACK object.
811 * Entering the next AP_STACK object pushes more onto the stack until we
812 * reach the last AP_STACK object - at which point the stack should look
813 * exactly as it did when we killed the TSO and we can continue
814 * execution by entering the closure on top of the stack.
816 * We can also kill a thread entirely - this happens if either (a) the
817 * exception passed to raiseAsync is NULL, or (b) there's no
818 * CATCH_FRAME on the stack. In either case, we strip the entire
819 * stack and replace the thread with a zombie.
821 * ToDo: in THREADED_RTS mode, this function is only safe if either
822 * (a) we hold all the Capabilities (eg. in GC, or if there is only
823 * one Capability), or (b) we own the Capability that the TSO is
824 * currently blocked on or on the run queue of.
826 * -------------------------------------------------------------------------- */
829 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
830 rtsBool stop_at_atomically, StgPtr stop_here)
832 StgRetInfoTable *info;
836 debugTrace(DEBUG_sched,
837 "raising exception in thread %ld.", (long)tso->id);
839 // mark it dirty; we're about to change its stack.
844 // ASSUMES: the thread is not already complete or dead. Upper
845 // layers should deal with that.
846 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
848 // The stack freezing code assumes there's a closure pointer on
849 // the top of the stack, so we have to arrange that this is the case...
851 if (sp[0] == (W_)&stg_enter_info) {
855 sp[0] = (W_)&stg_dummy_ret_closure;
859 while (stop_here == NULL || frame < stop_here) {
861 // 1. Let the top of the stack be the "current closure"
863 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
866 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
867 // current closure applied to the chunk of stack up to (but not
868 // including) the update frame. This closure becomes the "current
869 // closure". Go back to step 2.
871 // 4. If it's a CATCH_FRAME, then leave the exception handler on
872 // top of the stack applied to the exception.
874 // 5. If it's a STOP_FRAME, then kill the thread.
876 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
879 info = get_ret_itbl((StgClosure *)frame);
881 switch (info->i.type) {
888 // First build an AP_STACK consisting of the stack chunk above the
889 // current update frame, with the top word on the stack as the
892 words = frame - sp - 1;
893 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
896 ap->fun = (StgClosure *)sp[0];
898 for(i=0; i < (nat)words; ++i) {
899 ap->payload[i] = (StgClosure *)*sp++;
902 SET_HDR(ap,&stg_AP_STACK_info,
903 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
904 TICK_ALLOC_UP_THK(words+1,0);
906 //IF_DEBUG(scheduler,
907 // debugBelch("sched: Updating ");
908 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
909 // debugBelch(" with ");
910 // printObj((StgClosure *)ap);
913 // Replace the updatee with an indirection
915 // Warning: if we're in a loop, more than one update frame on
916 // the stack may point to the same object. Be careful not to
917 // overwrite an IND_OLDGEN in this case, because we'll screw
918 // up the mutable lists. To be on the safe side, don't
919 // overwrite any kind of indirection at all. See also
920 // threadSqueezeStack in GC.c, where we have to make a similar
923 if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) {
924 // revert the black hole
925 UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
928 sp += sizeofW(StgUpdateFrame) - 1;
929 sp[0] = (W_)ap; // push onto stack
931 continue; //no need to bump frame
935 // We've stripped the entire stack, the thread is now dead.
936 tso->what_next = ThreadKilled;
937 tso->sp = frame + sizeofW(StgStopFrame);
941 // If we find a CATCH_FRAME, and we've got an exception to raise,
942 // then build the THUNK raise(exception), and leave it on
943 // top of the CATCH_FRAME ready to enter.
947 StgCatchFrame *cf = (StgCatchFrame *)frame;
951 if (exception == NULL) break;
953 // we've got an exception to raise, so let's pass it to the
954 // handler in this frame.
956 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
957 TICK_ALLOC_SE_THK(1,0);
958 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
959 raise->payload[0] = exception;
961 // throw away the stack from Sp up to the CATCH_FRAME.
965 /* Ensure that async excpetions are blocked now, so we don't get
966 * a surprise exception before we get around to executing the
969 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
971 /* Put the newly-built THUNK on top of the stack, ready to execute
972 * when the thread restarts.
975 sp[-1] = (W_)&stg_enter_info;
977 tso->what_next = ThreadRunGHC;
978 IF_DEBUG(sanity, checkTSO(tso));
982 case ATOMICALLY_FRAME:
983 if (stop_at_atomically) {
984 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
985 stmCondemnTransaction(cap, tso -> trec);
989 // R1 is not a register: the return convention for IO in
990 // this case puts the return value on the stack, so we
991 // need to set up the stack to return to the atomically
994 tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
995 tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
997 tso->what_next = ThreadRunGHC;
1000 // Not stop_at_atomically... fall through and abort the
1003 case CATCH_RETRY_FRAME:
1004 // IF we find an ATOMICALLY_FRAME then we abort the
1005 // current transaction and propagate the exception. In
1006 // this case (unlike ordinary exceptions) we do not care
1007 // whether the transaction is valid or not because its
1008 // possible validity cannot have caused the exception
1009 // and will not be visible after the abort.
1010 debugTrace(DEBUG_stm,
1011 "found atomically block delivering async exception");
1013 StgTRecHeader *trec = tso -> trec;
1014 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
1015 stmAbortTransaction(cap, trec);
1016 tso -> trec = outer;
1023 // move on to the next stack frame
1024 frame += stack_frame_sizeW((StgClosure *)frame);
1027 // if we got here, then we stopped at stop_here
1028 ASSERT(stop_here != NULL);