1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
13 #include "RaiseAsync.h"
17 #include "LdvProfile.h"
22 static void raiseAsync (Capability *cap,
24 StgClosure *exception,
25 rtsBool stop_at_atomically,
28 static void removeFromQueues(Capability *cap, StgTSO *tso);
30 static void blockedThrowTo (StgTSO *source, StgTSO *target);
32 static void performBlockedException (Capability *cap,
33 StgTSO *source, StgTSO *target);
35 /* -----------------------------------------------------------------------------
38 This version of throwTo is safe to use if and only if one of the
43 - all the other threads in the system are stopped (eg. during GC).
45 - we surely own the target TSO (eg. we just took it from the
46 run queue of the current capability, or we are running it).
48 It doesn't cater for blocking the source thread until the exception
50 -------------------------------------------------------------------------- */
53 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
55 throwToSingleThreaded_(cap, tso, exception, rtsFalse, NULL);
59 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
60 rtsBool stop_at_atomically, StgPtr stop_here)
62 // Thread already dead?
63 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
67 // Remove it from any blocking queues
68 removeFromQueues(cap,tso);
70 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
74 suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here)
76 // Thread already dead?
77 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
81 // Remove it from any blocking queues
82 removeFromQueues(cap,tso);
84 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
87 /* -----------------------------------------------------------------------------
90 This function may be used to throw an exception from one thread to
91 another, during the course of normal execution. This is a tricky
92 task: the target thread might be running on another CPU, or it
93 may be blocked and could be woken up at any point by another CPU.
94 We have some delicate synchronisation to do.
96 There is a completely safe fallback scheme: it is always possible
97 to just block the source TSO on the target TSO's blocked_exceptions
98 queue. This queue is locked using lockTSO()/unlockTSO(). It is
99 checked at regular intervals: before and after running a thread
100 (schedule() and threadPaused() respectively), and just before GC
101 (scheduleDoGC()). Activating a thread on this queue should be done
102 using maybePerformBlockedException(): this is done in the context
103 of the target thread, so the exception can be raised eagerly.
105 This fallback scheme works even if the target thread is complete or
106 killed: scheduleDoGC() will discover the blocked thread before the
109 Blocking the source thread on the target thread's blocked_exception
110 queue is also employed when the target thread is currently blocking
111 exceptions (ie. inside Control.Exception.block).
113 We could use the safe fallback scheme exclusively, but that
114 wouldn't be ideal: most calls to throwTo would block immediately,
115 possibly until the next GC, which might require the deadlock
116 detection mechanism to kick in. So we try to provide promptness
119 We can promptly deliver the exception if the target thread is:
121 - runnable, on the same Capability as the source thread (because
122 we own the run queue and therefore the target thread).
124 - blocked, and we can obtain exclusive access to it. Obtaining
125 exclusive access to the thread depends on how it is blocked.
127 We must also be careful to not trip over threadStackOverflow(),
128 which might be moving the TSO to enlarge its stack.
129 lockTSO()/unlockTSO() are used here too.
133 THROWTO_SUCCESS exception was raised, ok to continue
135 THROWTO_BLOCKED exception was not raised; block the source
136 thread then call throwToReleaseTarget() when
137 the source thread is properly tidied away.
139 -------------------------------------------------------------------------- */
142 throwTo (Capability *cap, // the Capability we hold
143 StgTSO *source, // the TSO sending the exception
144 StgTSO *target, // the TSO receiving the exception
145 StgClosure *exception, // the exception closure
146 /*[out]*/ void **out USED_IF_THREADS)
150 // follow ThreadRelocated links in the target first
151 while (target->what_next == ThreadRelocated) {
152 target = target->link;
153 // No, it might be a WHITEHOLE:
154 // ASSERT(get_itbl(target)->type == TSO);
157 debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
158 (unsigned long)source->id, (unsigned long)target->id);
161 if (traceClass(DEBUG_sched)) {
162 debugTraceBegin("throwTo: target");
163 printThreadStatus(target);
170 debugTrace(DEBUG_sched, "throwTo: retrying...");
173 // Thread already dead?
174 if (target->what_next == ThreadComplete
175 || target->what_next == ThreadKilled) {
176 return THROWTO_SUCCESS;
179 status = target->why_blocked;
183 /* if status==NotBlocked, and target->cap == cap, then
184 we own this TSO and can raise the exception.
186 How do we establish this condition? Very carefully.
189 P = (status == NotBlocked)
190 Q = (tso->cap == cap)
192 Now, if P & Q are true, then the TSO is locked and owned by
193 this capability. No other OS thread can steal it.
195 If P==0 and Q==1: the TSO is blocked, but attached to this
196 capabilty, and it can be stolen by another capability.
198 If P==1 and Q==0: the TSO is runnable on another
199 capability. At any time, the TSO may change from runnable
200 to blocked and vice versa, while it remains owned by
203 Suppose we test like this:
209 this is defeated by another capability stealing a blocked
210 TSO from us to wake it up (Schedule.c:unblockOne()). The
211 other thread is doing
216 assuming arbitrary reordering, we could see this
226 so we need a memory barrier:
233 this avoids the problematic case. There are other cases
234 to consider, but this is the tricky one.
236 Note that we must be sure that unblockOne() does the
237 writes in the correct order: Q before P. The memory
238 barrier ensures that if we have seen the write to P, we
239 have also seen the write to Q.
242 Capability *target_cap;
245 target_cap = target->cap;
246 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
247 // It's on our run queue and not blocking exceptions
248 raiseAsync(cap, target, exception, rtsFalse, NULL);
249 return THROWTO_SUCCESS;
251 // Otherwise, just block on the blocked_exceptions queue
252 // of the target thread. The queue will get looked at
253 // soon enough: it is checked before and after running a
254 // thread, and during GC.
257 // Avoid race with threadStackOverflow, which may have
258 // just moved this TSO.
259 if (target->what_next == ThreadRelocated) {
261 target = target->link;
264 blockedThrowTo(source,target);
266 return THROWTO_BLOCKED;
273 To establish ownership of this TSO, we need to acquire a
274 lock on the MVar that it is blocked on.
277 StgInfoTable *info USED_IF_THREADS;
279 mvar = (StgMVar *)target->block_info.closure;
281 // ASSUMPTION: tso->block_info must always point to a
282 // closure. In the threaded RTS it does.
283 if (get_itbl(mvar)->type != MVAR) goto retry;
285 info = lockClosure((StgClosure *)mvar);
287 if (target->what_next == ThreadRelocated) {
288 target = target->link;
289 unlockClosure((StgClosure *)mvar,info);
292 // we have the MVar, let's check whether the thread
293 // is still blocked on the same MVar.
294 if (target->why_blocked != BlockedOnMVar
295 || (StgMVar *)target->block_info.closure != mvar) {
296 unlockClosure((StgClosure *)mvar, info);
300 if ((target->flags & TSO_BLOCKEX) &&
301 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
302 lockClosure((StgClosure *)target);
303 blockedThrowTo(source,target);
304 unlockClosure((StgClosure *)mvar, info);
306 return THROWTO_BLOCKED; // caller releases TSO
308 removeThreadFromMVarQueue(mvar, target);
309 raiseAsync(cap, target, exception, rtsFalse, NULL);
310 unblockOne(cap, target);
311 unlockClosure((StgClosure *)mvar, info);
312 return THROWTO_SUCCESS;
316 case BlockedOnBlackHole:
318 ACQUIRE_LOCK(&sched_mutex);
319 // double checking the status after the memory barrier:
320 if (target->why_blocked != BlockedOnBlackHole) {
321 RELEASE_LOCK(&sched_mutex);
325 if (target->flags & TSO_BLOCKEX) {
327 blockedThrowTo(source,target);
328 RELEASE_LOCK(&sched_mutex);
330 return THROWTO_BLOCKED; // caller releases TSO
332 removeThreadFromQueue(&blackhole_queue, target);
333 raiseAsync(cap, target, exception, rtsFalse, NULL);
334 unblockOne(cap, target);
335 RELEASE_LOCK(&sched_mutex);
336 return THROWTO_SUCCESS;
340 case BlockedOnException:
346 To obtain exclusive access to a BlockedOnException thread,
347 we must call lockClosure() on the TSO on which it is blocked.
348 Since the TSO might change underneath our feet, after we
349 call lockClosure() we must check that
351 (a) the closure we locked is actually a TSO
352 (b) the original thread is still BlockedOnException,
353 (c) the original thread is still blocked on the TSO we locked
354 and (d) the target thread has not been relocated.
356 We synchronise with threadStackOverflow() (which relocates
357 threads) using lockClosure()/unlockClosure().
359 target2 = target->block_info.tso;
361 info = lockClosure((StgClosure *)target2);
362 if (info != &stg_TSO_info) {
363 unlockClosure((StgClosure *)target2, info);
366 if (target->what_next == ThreadRelocated) {
367 target = target->link;
371 if (target2->what_next == ThreadRelocated) {
372 target->block_info.tso = target2->link;
376 if (target->why_blocked != BlockedOnException
377 || target->block_info.tso != target2) {
383 Now we have exclusive rights to the target TSO...
385 If it is blocking exceptions, add the source TSO to its
386 blocked_exceptions queue. Otherwise, raise the exception.
388 if ((target->flags & TSO_BLOCKEX) &&
389 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
391 blockedThrowTo(source,target);
394 return THROWTO_BLOCKED;
396 removeThreadFromQueue(&target2->blocked_exceptions, target);
397 raiseAsync(cap, target, exception, rtsFalse, NULL);
398 unblockOne(cap, target);
400 return THROWTO_SUCCESS;
406 // Unblocking BlockedOnSTM threads requires the TSO to be
407 // locked; see STM.c:unpark_tso().
408 if (target->why_blocked != BlockedOnSTM) {
411 if ((target->flags & TSO_BLOCKEX) &&
412 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
413 blockedThrowTo(source,target);
415 return THROWTO_BLOCKED;
417 raiseAsync(cap, target, exception, rtsFalse, NULL);
418 unblockOne(cap, target);
420 return THROWTO_SUCCESS;
424 case BlockedOnCCall_NoUnblockExc:
425 // I don't think it's possible to acquire ownership of a
426 // BlockedOnCCall thread. We just assume that the target
427 // thread is blocking exceptions, and block on its
428 // blocked_exception queue.
430 blockedThrowTo(source,target);
432 return THROWTO_BLOCKED;
434 #ifndef THREADEDED_RTS
438 if ((target->flags & TSO_BLOCKEX) &&
439 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
440 blockedThrowTo(source,target);
441 return THROWTO_BLOCKED;
443 removeFromQueues(cap,target);
444 raiseAsync(cap, target, exception, rtsFalse, NULL);
445 return THROWTO_SUCCESS;
450 barf("throwTo: unrecognised why_blocked value");
455 // Block a TSO on another TSO's blocked_exceptions queue.
456 // Precondition: we hold an exclusive lock on the target TSO (this is
457 // complex to achieve as there's no single lock on a TSO; see
460 blockedThrowTo (StgTSO *source, StgTSO *target)
462 debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
463 source->link = target->blocked_exceptions;
464 target->blocked_exceptions = source;
465 dirtyTSO(target); // we modified the blocked_exceptions queue
467 source->block_info.tso = target;
468 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
469 source->why_blocked = BlockedOnException;
475 throwToReleaseTarget (void *tso)
477 unlockTSO((StgTSO *)tso);
481 /* -----------------------------------------------------------------------------
482 Waking up threads blocked in throwTo
484 There are two ways to do this: maybePerformBlockedException() will
485 perform the throwTo() for the thread at the head of the queue
486 immediately, and leave the other threads on the queue.
487 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
488 before raising an exception.
490 awakenBlockedExceptionQueue() will wake up all the threads in the
491 queue, but not perform any throwTo() immediately. This might be
492 more appropriate when the target thread is the one actually running
494 -------------------------------------------------------------------------- */
497 maybePerformBlockedException (Capability *cap, StgTSO *tso)
501 if (tso->blocked_exceptions != END_TSO_QUEUE
502 && ((tso->flags & TSO_BLOCKEX) == 0
503 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
505 // Lock the TSO, this gives us exclusive access to the queue
508 // Check the queue again; it might have changed before we
510 if (tso->blocked_exceptions == END_TSO_QUEUE) {
515 // We unblock just the first thread on the queue, and perform
516 // its throw immediately.
517 source = tso->blocked_exceptions;
518 performBlockedException(cap, source, tso);
519 tso->blocked_exceptions = unblockOne_(cap, source,
520 rtsFalse/*no migrate*/);
526 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
528 if (tso->blocked_exceptions != END_TSO_QUEUE) {
530 awakenBlockedQueue(cap, tso->blocked_exceptions);
531 tso->blocked_exceptions = END_TSO_QUEUE;
537 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
539 StgClosure *exception;
541 ASSERT(source->why_blocked == BlockedOnException);
542 ASSERT(source->block_info.tso->id == target->id);
543 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
544 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
545 // check ids not pointers, because the thread might be relocated
547 exception = (StgClosure *)source->sp[2];
548 throwToSingleThreaded(cap, target, exception);
552 /* -----------------------------------------------------------------------------
553 Remove a thread from blocking queues.
555 This is for use when we raise an exception in another thread, which
557 This has nothing to do with the UnblockThread event in GranSim. -- HWL
558 -------------------------------------------------------------------------- */
560 #if defined(GRAN) || defined(PARALLEL_HASKELL)
562 NB: only the type of the blocking queue is different in GranSim and GUM
563 the operations on the queue-elements are the same
564 long live polymorphism!
566 Locks: sched_mutex is held upon entry and exit.
570 removeFromQueues(Capability *cap, StgTSO *tso)
572 StgBlockingQueueElement *t, **last;
574 switch (tso->why_blocked) {
577 return; /* not blocked */
580 // Be careful: nothing to do here! We tell the scheduler that the thread
581 // is runnable and we leave it to the stack-walking code to abort the
582 // transaction while unwinding the stack. We should perhaps have a debugging
583 // test to make sure that this really happens and that the 'zombie' transaction
584 // does not get committed.
588 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
590 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
591 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
593 last = (StgBlockingQueueElement **)&mvar->head;
594 for (t = (StgBlockingQueueElement *)mvar->head;
596 last = &t->link, last_tso = t, t = t->link) {
597 if (t == (StgBlockingQueueElement *)tso) {
598 *last = (StgBlockingQueueElement *)tso->link;
599 if (mvar->tail == tso) {
600 mvar->tail = (StgTSO *)last_tso;
605 barf("removeFromQueues (MVAR): TSO not found");
608 case BlockedOnBlackHole:
609 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
611 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
613 last = &bq->blocking_queue;
614 for (t = bq->blocking_queue;
616 last = &t->link, t = t->link) {
617 if (t == (StgBlockingQueueElement *)tso) {
618 *last = (StgBlockingQueueElement *)tso->link;
622 barf("removeFromQueues (BLACKHOLE): TSO not found");
625 case BlockedOnException:
627 StgTSO *target = tso->block_info.tso;
629 ASSERT(get_itbl(target)->type == TSO);
631 while (target->what_next == ThreadRelocated) {
632 target = target2->link;
633 ASSERT(get_itbl(target)->type == TSO);
636 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
637 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
639 last = &t->link, t = t->link) {
640 ASSERT(get_itbl(t)->type == TSO);
641 if (t == (StgBlockingQueueElement *)tso) {
642 *last = (StgBlockingQueueElement *)tso->link;
646 barf("removeFromQueues (Exception): TSO not found");
651 #if defined(mingw32_HOST_OS)
652 case BlockedOnDoProc:
655 /* take TSO off blocked_queue */
656 StgBlockingQueueElement *prev = NULL;
657 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
658 prev = t, t = t->link) {
659 if (t == (StgBlockingQueueElement *)tso) {
661 blocked_queue_hd = (StgTSO *)t->link;
662 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
663 blocked_queue_tl = END_TSO_QUEUE;
666 prev->link = t->link;
667 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
668 blocked_queue_tl = (StgTSO *)prev;
671 #if defined(mingw32_HOST_OS)
672 /* (Cooperatively) signal that the worker thread should abort
675 abandonWorkRequest(tso->block_info.async_result->reqID);
680 barf("removeFromQueues (I/O): TSO not found");
685 /* take TSO off sleeping_queue */
686 StgBlockingQueueElement *prev = NULL;
687 for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE;
688 prev = t, t = t->link) {
689 if (t == (StgBlockingQueueElement *)tso) {
691 sleeping_queue = (StgTSO *)t->link;
693 prev->link = t->link;
698 barf("removeFromQueues (delay): TSO not found");
702 barf("removeFromQueues");
706 tso->link = END_TSO_QUEUE;
707 tso->why_blocked = NotBlocked;
708 tso->block_info.closure = NULL;
709 pushOnRunQueue(cap,tso);
713 removeFromQueues(Capability *cap, StgTSO *tso)
715 switch (tso->why_blocked) {
721 // Be careful: nothing to do here! We tell the scheduler that the
722 // thread is runnable and we leave it to the stack-walking code to
723 // abort the transaction while unwinding the stack. We should
724 // perhaps have a debugging test to make sure that this really
725 // happens and that the 'zombie' transaction does not get
730 removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
733 case BlockedOnBlackHole:
734 removeThreadFromQueue(&blackhole_queue, tso);
737 case BlockedOnException:
739 StgTSO *target = tso->block_info.tso;
741 // NO: when called by threadPaused(), we probably have this
742 // TSO already locked (WHITEHOLEd) because we just placed
743 // ourselves on its queue.
744 // ASSERT(get_itbl(target)->type == TSO);
746 while (target->what_next == ThreadRelocated) {
747 target = target->link;
750 removeThreadFromQueue(&target->blocked_exceptions, tso);
754 #if !defined(THREADED_RTS)
757 #if defined(mingw32_HOST_OS)
758 case BlockedOnDoProc:
760 removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
761 #if defined(mingw32_HOST_OS)
762 /* (Cooperatively) signal that the worker thread should abort
765 abandonWorkRequest(tso->block_info.async_result->reqID);
770 removeThreadFromQueue(&sleeping_queue, tso);
775 barf("removeFromQueues");
779 tso->link = END_TSO_QUEUE;
780 tso->why_blocked = NotBlocked;
781 tso->block_info.closure = NULL;
782 appendToRunQueue(cap,tso);
784 // We might have just migrated this TSO to our Capability:
786 tso->bound->cap = cap;
792 /* -----------------------------------------------------------------------------
795 * The following function implements the magic for raising an
796 * asynchronous exception in an existing thread.
798 * We first remove the thread from any queue on which it might be
799 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
801 * We strip the stack down to the innermost CATCH_FRAME, building
802 * thunks in the heap for all the active computations, so they can
803 * be restarted if necessary. When we reach a CATCH_FRAME, we build
804 * an application of the handler to the exception, and push it on
805 * the top of the stack.
807 * How exactly do we save all the active computations? We create an
808 * AP_STACK for every UpdateFrame on the stack. Entering one of these
809 * AP_STACKs pushes everything from the corresponding update frame
810 * upwards onto the stack. (Actually, it pushes everything up to the
811 * next update frame plus a pointer to the next AP_STACK object.
812 * Entering the next AP_STACK object pushes more onto the stack until we
813 * reach the last AP_STACK object - at which point the stack should look
814 * exactly as it did when we killed the TSO and we can continue
815 * execution by entering the closure on top of the stack.
817 * We can also kill a thread entirely - this happens if either (a) the
818 * exception passed to raiseAsync is NULL, or (b) there's no
819 * CATCH_FRAME on the stack. In either case, we strip the entire
820 * stack and replace the thread with a zombie.
822 * ToDo: in THREADED_RTS mode, this function is only safe if either
823 * (a) we hold all the Capabilities (eg. in GC, or if there is only
824 * one Capability), or (b) we own the Capability that the TSO is
825 * currently blocked on or on the run queue of.
827 * -------------------------------------------------------------------------- */
830 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
831 rtsBool stop_at_atomically, StgPtr stop_here)
833 StgRetInfoTable *info;
837 debugTrace(DEBUG_sched,
838 "raising exception in thread %ld.", (long)tso->id);
840 // mark it dirty; we're about to change its stack.
845 // ASSUMES: the thread is not already complete or dead. Upper
846 // layers should deal with that.
847 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
849 // The stack freezing code assumes there's a closure pointer on
850 // the top of the stack, so we have to arrange that this is the case...
852 if (sp[0] == (W_)&stg_enter_info) {
856 sp[0] = (W_)&stg_dummy_ret_closure;
860 while (stop_here == NULL || frame < stop_here) {
862 // 1. Let the top of the stack be the "current closure"
864 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
867 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
868 // current closure applied to the chunk of stack up to (but not
869 // including) the update frame. This closure becomes the "current
870 // closure". Go back to step 2.
872 // 4. If it's a CATCH_FRAME, then leave the exception handler on
873 // top of the stack applied to the exception.
875 // 5. If it's a STOP_FRAME, then kill the thread.
877 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
880 info = get_ret_itbl((StgClosure *)frame);
882 switch (info->i.type) {
889 // First build an AP_STACK consisting of the stack chunk above the
890 // current update frame, with the top word on the stack as the
893 words = frame - sp - 1;
894 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
897 ap->fun = (StgClosure *)sp[0];
899 for(i=0; i < (nat)words; ++i) {
900 ap->payload[i] = (StgClosure *)*sp++;
903 SET_HDR(ap,&stg_AP_STACK_info,
904 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
905 TICK_ALLOC_UP_THK(words+1,0);
907 //IF_DEBUG(scheduler,
908 // debugBelch("sched: Updating ");
909 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
910 // debugBelch(" with ");
911 // printObj((StgClosure *)ap);
914 // Replace the updatee with an indirection
916 // Warning: if we're in a loop, more than one update frame on
917 // the stack may point to the same object. Be careful not to
918 // overwrite an IND_OLDGEN in this case, because we'll screw
919 // up the mutable lists. To be on the safe side, don't
920 // overwrite any kind of indirection at all. See also
921 // threadSqueezeStack in GC.c, where we have to make a similar
924 if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) {
925 // revert the black hole
926 UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
929 sp += sizeofW(StgUpdateFrame) - 1;
930 sp[0] = (W_)ap; // push onto stack
932 continue; //no need to bump frame
936 // We've stripped the entire stack, the thread is now dead.
937 tso->what_next = ThreadKilled;
938 tso->sp = frame + sizeofW(StgStopFrame);
942 // If we find a CATCH_FRAME, and we've got an exception to raise,
943 // then build the THUNK raise(exception), and leave it on
944 // top of the CATCH_FRAME ready to enter.
948 StgCatchFrame *cf = (StgCatchFrame *)frame;
952 if (exception == NULL) break;
954 // we've got an exception to raise, so let's pass it to the
955 // handler in this frame.
957 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
958 TICK_ALLOC_SE_THK(1,0);
959 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
960 raise->payload[0] = exception;
962 // throw away the stack from Sp up to the CATCH_FRAME.
966 /* Ensure that async excpetions are blocked now, so we don't get
967 * a surprise exception before we get around to executing the
970 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
972 /* Put the newly-built THUNK on top of the stack, ready to execute
973 * when the thread restarts.
976 sp[-1] = (W_)&stg_enter_info;
978 tso->what_next = ThreadRunGHC;
979 IF_DEBUG(sanity, checkTSO(tso));
983 case ATOMICALLY_FRAME:
984 if (stop_at_atomically) {
985 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
986 stmCondemnTransaction(cap, tso -> trec);
990 // R1 is not a register: the return convention for IO in
991 // this case puts the return value on the stack, so we
992 // need to set up the stack to return to the atomically
995 tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
996 tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
998 tso->what_next = ThreadRunGHC;
1001 // Not stop_at_atomically... fall through and abort the
1004 case CATCH_RETRY_FRAME:
1005 // IF we find an ATOMICALLY_FRAME then we abort the
1006 // current transaction and propagate the exception. In
1007 // this case (unlike ordinary exceptions) we do not care
1008 // whether the transaction is valid or not because its
1009 // possible validity cannot have caused the exception
1010 // and will not be visible after the abort.
1011 debugTrace(DEBUG_stm,
1012 "found atomically block delivering async exception");
1014 StgTRecHeader *trec = tso -> trec;
1015 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
1016 stmAbortTransaction(cap, trec);
1017 tso -> trec = outer;
1024 // move on to the next stack frame
1025 frame += stack_frame_sizeW((StgClosure *)frame);
1028 // if we got here, then we stopped at stop_here
1029 ASSERT(stop_here != NULL);