1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * Asynchronous exceptions
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
13 #include "RaiseAsync.h"
16 #include "LdvProfile.h"
20 #if defined(mingw32_HOST_OS)
21 #include "win32/IOManager.h"
24 static void raiseAsync (Capability *cap,
26 StgClosure *exception,
27 rtsBool stop_at_atomically,
30 static void removeFromQueues(Capability *cap, StgTSO *tso);
32 static void blockedThrowTo (StgTSO *source, StgTSO *target);
34 static void performBlockedException (Capability *cap,
35 StgTSO *source, StgTSO *target);
37 /* -----------------------------------------------------------------------------
40 This version of throwTo is safe to use if and only if one of the
45 - all the other threads in the system are stopped (eg. during GC).
47 - we surely own the target TSO (eg. we just took it from the
48 run queue of the current capability, or we are running it).
50 It doesn't cater for blocking the source thread until the exception
52 -------------------------------------------------------------------------- */
55 throwToSingleThreaded(Capability *cap, StgTSO *tso, StgClosure *exception)
57 throwToSingleThreaded_(cap, tso, exception, rtsFalse, NULL);
61 throwToSingleThreaded_(Capability *cap, StgTSO *tso, StgClosure *exception,
62 rtsBool stop_at_atomically, StgPtr stop_here)
64 // Thread already dead?
65 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
69 // Remove it from any blocking queues
70 removeFromQueues(cap,tso);
72 raiseAsync(cap, tso, exception, stop_at_atomically, stop_here);
76 suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here)
78 // Thread already dead?
79 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
83 // Remove it from any blocking queues
84 removeFromQueues(cap,tso);
86 raiseAsync(cap, tso, NULL, rtsFalse, stop_here);
89 /* -----------------------------------------------------------------------------
92 This function may be used to throw an exception from one thread to
93 another, during the course of normal execution. This is a tricky
94 task: the target thread might be running on another CPU, or it
95 may be blocked and could be woken up at any point by another CPU.
96 We have some delicate synchronisation to do.
98 There is a completely safe fallback scheme: it is always possible
99 to just block the source TSO on the target TSO's blocked_exceptions
100 queue. This queue is locked using lockTSO()/unlockTSO(). It is
101 checked at regular intervals: before and after running a thread
102 (schedule() and threadPaused() respectively), and just before GC
103 (scheduleDoGC()). Activating a thread on this queue should be done
104 using maybePerformBlockedException(): this is done in the context
105 of the target thread, so the exception can be raised eagerly.
107 This fallback scheme works even if the target thread is complete or
108 killed: scheduleDoGC() will discover the blocked thread before the
111 Blocking the source thread on the target thread's blocked_exception
112 queue is also employed when the target thread is currently blocking
113 exceptions (ie. inside Control.Exception.block).
115 We could use the safe fallback scheme exclusively, but that
116 wouldn't be ideal: most calls to throwTo would block immediately,
117 possibly until the next GC, which might require the deadlock
118 detection mechanism to kick in. So we try to provide promptness
121 We can promptly deliver the exception if the target thread is:
123 - runnable, on the same Capability as the source thread (because
124 we own the run queue and therefore the target thread).
126 - blocked, and we can obtain exclusive access to it. Obtaining
127 exclusive access to the thread depends on how it is blocked.
129 We must also be careful to not trip over threadStackOverflow(),
130 which might be moving the TSO to enlarge its stack.
131 lockTSO()/unlockTSO() are used here too.
135 THROWTO_SUCCESS exception was raised, ok to continue
137 THROWTO_BLOCKED exception was not raised; block the source
138 thread then call throwToReleaseTarget() when
139 the source thread is properly tidied away.
141 -------------------------------------------------------------------------- */
144 throwTo (Capability *cap, // the Capability we hold
145 StgTSO *source, // the TSO sending the exception
146 StgTSO *target, // the TSO receiving the exception
147 StgClosure *exception, // the exception closure
148 /*[out]*/ void **out USED_IF_THREADS)
152 // follow ThreadRelocated links in the target first
153 while (target->what_next == ThreadRelocated) {
154 target = target->link;
155 // No, it might be a WHITEHOLE:
156 // ASSERT(get_itbl(target)->type == TSO);
159 debugTrace(DEBUG_sched, "throwTo: from thread %lu to thread %lu",
160 (unsigned long)source->id, (unsigned long)target->id);
163 if (traceClass(DEBUG_sched)) {
164 debugTraceBegin("throwTo: target");
165 printThreadStatus(target);
172 debugTrace(DEBUG_sched, "throwTo: retrying...");
175 // Thread already dead?
176 if (target->what_next == ThreadComplete
177 || target->what_next == ThreadKilled) {
178 return THROWTO_SUCCESS;
181 status = target->why_blocked;
185 /* if status==NotBlocked, and target->cap == cap, then
186 we own this TSO and can raise the exception.
188 How do we establish this condition? Very carefully.
191 P = (status == NotBlocked)
192 Q = (tso->cap == cap)
194 Now, if P & Q are true, then the TSO is locked and owned by
195 this capability. No other OS thread can steal it.
197 If P==0 and Q==1: the TSO is blocked, but attached to this
198 capabilty, and it can be stolen by another capability.
200 If P==1 and Q==0: the TSO is runnable on another
201 capability. At any time, the TSO may change from runnable
202 to blocked and vice versa, while it remains owned by
205 Suppose we test like this:
211 this is defeated by another capability stealing a blocked
212 TSO from us to wake it up (Schedule.c:unblockOne()). The
213 other thread is doing
218 assuming arbitrary reordering, we could see this
228 so we need a memory barrier:
235 this avoids the problematic case. There are other cases
236 to consider, but this is the tricky one.
238 Note that we must be sure that unblockOne() does the
239 writes in the correct order: Q before P. The memory
240 barrier ensures that if we have seen the write to P, we
241 have also seen the write to Q.
244 Capability *target_cap;
247 target_cap = target->cap;
248 if (target_cap == cap && (target->flags & TSO_BLOCKEX) == 0) {
249 // It's on our run queue and not blocking exceptions
250 raiseAsync(cap, target, exception, rtsFalse, NULL);
251 return THROWTO_SUCCESS;
253 // Otherwise, just block on the blocked_exceptions queue
254 // of the target thread. The queue will get looked at
255 // soon enough: it is checked before and after running a
256 // thread, and during GC.
259 // Avoid race with threadStackOverflow, which may have
260 // just moved this TSO.
261 if (target->what_next == ThreadRelocated) {
263 target = target->link;
266 blockedThrowTo(source,target);
268 return THROWTO_BLOCKED;
275 To establish ownership of this TSO, we need to acquire a
276 lock on the MVar that it is blocked on.
279 StgInfoTable *info USED_IF_THREADS;
281 mvar = (StgMVar *)target->block_info.closure;
283 // ASSUMPTION: tso->block_info must always point to a
284 // closure. In the threaded RTS it does.
285 if (get_itbl(mvar)->type != MVAR) goto retry;
287 info = lockClosure((StgClosure *)mvar);
289 if (target->what_next == ThreadRelocated) {
290 target = target->link;
291 unlockClosure((StgClosure *)mvar,info);
294 // we have the MVar, let's check whether the thread
295 // is still blocked on the same MVar.
296 if (target->why_blocked != BlockedOnMVar
297 || (StgMVar *)target->block_info.closure != mvar) {
298 unlockClosure((StgClosure *)mvar, info);
302 if ((target->flags & TSO_BLOCKEX) &&
303 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
304 lockClosure((StgClosure *)target);
305 blockedThrowTo(source,target);
306 unlockClosure((StgClosure *)mvar, info);
308 return THROWTO_BLOCKED; // caller releases TSO
310 removeThreadFromMVarQueue(mvar, target);
311 raiseAsync(cap, target, exception, rtsFalse, NULL);
312 unblockOne(cap, target);
313 unlockClosure((StgClosure *)mvar, info);
314 return THROWTO_SUCCESS;
318 case BlockedOnBlackHole:
320 ACQUIRE_LOCK(&sched_mutex);
321 // double checking the status after the memory barrier:
322 if (target->why_blocked != BlockedOnBlackHole) {
323 RELEASE_LOCK(&sched_mutex);
327 if (target->flags & TSO_BLOCKEX) {
329 blockedThrowTo(source,target);
330 RELEASE_LOCK(&sched_mutex);
332 return THROWTO_BLOCKED; // caller releases TSO
334 removeThreadFromQueue(&blackhole_queue, target);
335 raiseAsync(cap, target, exception, rtsFalse, NULL);
336 unblockOne(cap, target);
337 RELEASE_LOCK(&sched_mutex);
338 return THROWTO_SUCCESS;
342 case BlockedOnException:
348 To obtain exclusive access to a BlockedOnException thread,
349 we must call lockClosure() on the TSO on which it is blocked.
350 Since the TSO might change underneath our feet, after we
351 call lockClosure() we must check that
353 (a) the closure we locked is actually a TSO
354 (b) the original thread is still BlockedOnException,
355 (c) the original thread is still blocked on the TSO we locked
356 and (d) the target thread has not been relocated.
358 We synchronise with threadStackOverflow() (which relocates
359 threads) using lockClosure()/unlockClosure().
361 target2 = target->block_info.tso;
363 info = lockClosure((StgClosure *)target2);
364 if (info != &stg_TSO_info) {
365 unlockClosure((StgClosure *)target2, info);
368 if (target->what_next == ThreadRelocated) {
369 target = target->link;
373 if (target2->what_next == ThreadRelocated) {
374 target->block_info.tso = target2->link;
378 if (target->why_blocked != BlockedOnException
379 || target->block_info.tso != target2) {
385 Now we have exclusive rights to the target TSO...
387 If it is blocking exceptions, add the source TSO to its
388 blocked_exceptions queue. Otherwise, raise the exception.
390 if ((target->flags & TSO_BLOCKEX) &&
391 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
393 blockedThrowTo(source,target);
396 return THROWTO_BLOCKED;
398 removeThreadFromQueue(&target2->blocked_exceptions, target);
399 raiseAsync(cap, target, exception, rtsFalse, NULL);
400 unblockOne(cap, target);
402 return THROWTO_SUCCESS;
408 // Unblocking BlockedOnSTM threads requires the TSO to be
409 // locked; see STM.c:unpark_tso().
410 if (target->why_blocked != BlockedOnSTM) {
413 if ((target->flags & TSO_BLOCKEX) &&
414 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
415 blockedThrowTo(source,target);
417 return THROWTO_BLOCKED;
419 raiseAsync(cap, target, exception, rtsFalse, NULL);
420 unblockOne(cap, target);
422 return THROWTO_SUCCESS;
426 case BlockedOnCCall_NoUnblockExc:
427 // I don't think it's possible to acquire ownership of a
428 // BlockedOnCCall thread. We just assume that the target
429 // thread is blocking exceptions, and block on its
430 // blocked_exception queue.
432 blockedThrowTo(source,target);
434 return THROWTO_BLOCKED;
436 #ifndef THREADEDED_RTS
440 #if defined(mingw32_HOST_OS)
441 case BlockedOnDoProc:
443 if ((target->flags & TSO_BLOCKEX) &&
444 ((target->flags & TSO_INTERRUPTIBLE) == 0)) {
445 blockedThrowTo(source,target);
446 return THROWTO_BLOCKED;
448 removeFromQueues(cap,target);
449 raiseAsync(cap, target, exception, rtsFalse, NULL);
450 return THROWTO_SUCCESS;
455 barf("throwTo: unrecognised why_blocked value");
460 // Block a TSO on another TSO's blocked_exceptions queue.
461 // Precondition: we hold an exclusive lock on the target TSO (this is
462 // complex to achieve as there's no single lock on a TSO; see
465 blockedThrowTo (StgTSO *source, StgTSO *target)
467 debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
468 source->link = target->blocked_exceptions;
469 target->blocked_exceptions = source;
470 dirtyTSO(target); // we modified the blocked_exceptions queue
472 source->block_info.tso = target;
473 write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
474 source->why_blocked = BlockedOnException;
480 throwToReleaseTarget (void *tso)
482 unlockTSO((StgTSO *)tso);
486 /* -----------------------------------------------------------------------------
487 Waking up threads blocked in throwTo
489 There are two ways to do this: maybePerformBlockedException() will
490 perform the throwTo() for the thread at the head of the queue
491 immediately, and leave the other threads on the queue.
492 maybePerformBlockedException() also checks the TSO_BLOCKEX flag
493 before raising an exception.
495 awakenBlockedExceptionQueue() will wake up all the threads in the
496 queue, but not perform any throwTo() immediately. This might be
497 more appropriate when the target thread is the one actually running
500 Returns: non-zero if an exception was raised, zero otherwise.
501 -------------------------------------------------------------------------- */
504 maybePerformBlockedException (Capability *cap, StgTSO *tso)
508 if (tso->blocked_exceptions != END_TSO_QUEUE
509 && ((tso->flags & TSO_BLOCKEX) == 0
510 || ((tso->flags & TSO_INTERRUPTIBLE) && interruptible(tso)))) {
512 // Lock the TSO, this gives us exclusive access to the queue
515 // Check the queue again; it might have changed before we
517 if (tso->blocked_exceptions == END_TSO_QUEUE) {
522 // We unblock just the first thread on the queue, and perform
523 // its throw immediately.
524 source = tso->blocked_exceptions;
525 performBlockedException(cap, source, tso);
526 tso->blocked_exceptions = unblockOne_(cap, source,
527 rtsFalse/*no migrate*/);
535 awakenBlockedExceptionQueue (Capability *cap, StgTSO *tso)
537 if (tso->blocked_exceptions != END_TSO_QUEUE) {
539 awakenBlockedQueue(cap, tso->blocked_exceptions);
540 tso->blocked_exceptions = END_TSO_QUEUE;
546 performBlockedException (Capability *cap, StgTSO *source, StgTSO *target)
548 StgClosure *exception;
550 ASSERT(source->why_blocked == BlockedOnException);
551 ASSERT(source->block_info.tso->id == target->id);
552 ASSERT(source->sp[0] == (StgWord)&stg_block_throwto_info);
553 ASSERT(((StgTSO *)source->sp[1])->id == target->id);
554 // check ids not pointers, because the thread might be relocated
556 exception = (StgClosure *)source->sp[2];
557 throwToSingleThreaded(cap, target, exception);
561 /* -----------------------------------------------------------------------------
562 Remove a thread from blocking queues.
564 This is for use when we raise an exception in another thread, which
566 This has nothing to do with the UnblockThread event in GranSim. -- HWL
567 -------------------------------------------------------------------------- */
569 #if defined(GRAN) || defined(PARALLEL_HASKELL)
571 NB: only the type of the blocking queue is different in GranSim and GUM
572 the operations on the queue-elements are the same
573 long live polymorphism!
575 Locks: sched_mutex is held upon entry and exit.
579 removeFromQueues(Capability *cap, StgTSO *tso)
581 StgBlockingQueueElement *t, **last;
583 switch (tso->why_blocked) {
586 return; /* not blocked */
589 // Be careful: nothing to do here! We tell the scheduler that the thread
590 // is runnable and we leave it to the stack-walking code to abort the
591 // transaction while unwinding the stack. We should perhaps have a debugging
592 // test to make sure that this really happens and that the 'zombie' transaction
593 // does not get committed.
597 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
599 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
600 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
602 last = (StgBlockingQueueElement **)&mvar->head;
603 for (t = (StgBlockingQueueElement *)mvar->head;
605 last = &t->link, last_tso = t, t = t->link) {
606 if (t == (StgBlockingQueueElement *)tso) {
607 *last = (StgBlockingQueueElement *)tso->link;
608 if (mvar->tail == tso) {
609 mvar->tail = (StgTSO *)last_tso;
614 barf("removeFromQueues (MVAR): TSO not found");
617 case BlockedOnBlackHole:
618 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
620 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
622 last = &bq->blocking_queue;
623 for (t = bq->blocking_queue;
625 last = &t->link, t = t->link) {
626 if (t == (StgBlockingQueueElement *)tso) {
627 *last = (StgBlockingQueueElement *)tso->link;
631 barf("removeFromQueues (BLACKHOLE): TSO not found");
634 case BlockedOnException:
636 StgTSO *target = tso->block_info.tso;
638 ASSERT(get_itbl(target)->type == TSO);
640 while (target->what_next == ThreadRelocated) {
641 target = target2->link;
642 ASSERT(get_itbl(target)->type == TSO);
645 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
646 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
648 last = &t->link, t = t->link) {
649 ASSERT(get_itbl(t)->type == TSO);
650 if (t == (StgBlockingQueueElement *)tso) {
651 *last = (StgBlockingQueueElement *)tso->link;
655 barf("removeFromQueues (Exception): TSO not found");
660 #if defined(mingw32_HOST_OS)
661 case BlockedOnDoProc:
664 /* take TSO off blocked_queue */
665 StgBlockingQueueElement *prev = NULL;
666 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
667 prev = t, t = t->link) {
668 if (t == (StgBlockingQueueElement *)tso) {
670 blocked_queue_hd = (StgTSO *)t->link;
671 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
672 blocked_queue_tl = END_TSO_QUEUE;
675 prev->link = t->link;
676 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
677 blocked_queue_tl = (StgTSO *)prev;
680 #if defined(mingw32_HOST_OS)
681 /* (Cooperatively) signal that the worker thread should abort
684 abandonWorkRequest(tso->block_info.async_result->reqID);
689 barf("removeFromQueues (I/O): TSO not found");
694 /* take TSO off sleeping_queue */
695 StgBlockingQueueElement *prev = NULL;
696 for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE;
697 prev = t, t = t->link) {
698 if (t == (StgBlockingQueueElement *)tso) {
700 sleeping_queue = (StgTSO *)t->link;
702 prev->link = t->link;
707 barf("removeFromQueues (delay): TSO not found");
711 barf("removeFromQueues");
715 tso->link = END_TSO_QUEUE;
716 tso->why_blocked = NotBlocked;
717 tso->block_info.closure = NULL;
718 pushOnRunQueue(cap,tso);
722 removeFromQueues(Capability *cap, StgTSO *tso)
724 switch (tso->why_blocked) {
730 // Be careful: nothing to do here! We tell the scheduler that the
731 // thread is runnable and we leave it to the stack-walking code to
732 // abort the transaction while unwinding the stack. We should
733 // perhaps have a debugging test to make sure that this really
734 // happens and that the 'zombie' transaction does not get
739 removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
742 case BlockedOnBlackHole:
743 removeThreadFromQueue(&blackhole_queue, tso);
746 case BlockedOnException:
748 StgTSO *target = tso->block_info.tso;
750 // NO: when called by threadPaused(), we probably have this
751 // TSO already locked (WHITEHOLEd) because we just placed
752 // ourselves on its queue.
753 // ASSERT(get_itbl(target)->type == TSO);
755 while (target->what_next == ThreadRelocated) {
756 target = target->link;
759 removeThreadFromQueue(&target->blocked_exceptions, tso);
763 #if !defined(THREADED_RTS)
766 #if defined(mingw32_HOST_OS)
767 case BlockedOnDoProc:
769 removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
770 #if defined(mingw32_HOST_OS)
771 /* (Cooperatively) signal that the worker thread should abort
774 abandonWorkRequest(tso->block_info.async_result->reqID);
779 removeThreadFromQueue(&sleeping_queue, tso);
784 barf("removeFromQueues");
788 tso->link = END_TSO_QUEUE;
789 tso->why_blocked = NotBlocked;
790 tso->block_info.closure = NULL;
791 appendToRunQueue(cap,tso);
793 // We might have just migrated this TSO to our Capability:
795 tso->bound->cap = cap;
801 /* -----------------------------------------------------------------------------
804 * The following function implements the magic for raising an
805 * asynchronous exception in an existing thread.
807 * We first remove the thread from any queue on which it might be
808 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
810 * We strip the stack down to the innermost CATCH_FRAME, building
811 * thunks in the heap for all the active computations, so they can
812 * be restarted if necessary. When we reach a CATCH_FRAME, we build
813 * an application of the handler to the exception, and push it on
814 * the top of the stack.
816 * How exactly do we save all the active computations? We create an
817 * AP_STACK for every UpdateFrame on the stack. Entering one of these
818 * AP_STACKs pushes everything from the corresponding update frame
819 * upwards onto the stack. (Actually, it pushes everything up to the
820 * next update frame plus a pointer to the next AP_STACK object.
821 * Entering the next AP_STACK object pushes more onto the stack until we
822 * reach the last AP_STACK object - at which point the stack should look
823 * exactly as it did when we killed the TSO and we can continue
824 * execution by entering the closure on top of the stack.
826 * We can also kill a thread entirely - this happens if either (a) the
827 * exception passed to raiseAsync is NULL, or (b) there's no
828 * CATCH_FRAME on the stack. In either case, we strip the entire
829 * stack and replace the thread with a zombie.
831 * ToDo: in THREADED_RTS mode, this function is only safe if either
832 * (a) we hold all the Capabilities (eg. in GC, or if there is only
833 * one Capability), or (b) we own the Capability that the TSO is
834 * currently blocked on or on the run queue of.
836 * -------------------------------------------------------------------------- */
839 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
840 rtsBool stop_at_atomically, StgPtr stop_here)
842 StgRetInfoTable *info;
846 debugTrace(DEBUG_sched,
847 "raising exception in thread %ld.", (long)tso->id);
849 // mark it dirty; we're about to change its stack.
854 // ASSUMES: the thread is not already complete or dead. Upper
855 // layers should deal with that.
856 ASSERT(tso->what_next != ThreadComplete && tso->what_next != ThreadKilled);
858 // The stack freezing code assumes there's a closure pointer on
859 // the top of the stack, so we have to arrange that this is the case...
861 if (sp[0] == (W_)&stg_enter_info) {
865 sp[0] = (W_)&stg_dummy_ret_closure;
869 while (stop_here == NULL || frame < stop_here) {
871 // 1. Let the top of the stack be the "current closure"
873 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
876 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
877 // current closure applied to the chunk of stack up to (but not
878 // including) the update frame. This closure becomes the "current
879 // closure". Go back to step 2.
881 // 4. If it's a CATCH_FRAME, then leave the exception handler on
882 // top of the stack applied to the exception.
884 // 5. If it's a STOP_FRAME, then kill the thread.
886 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
889 info = get_ret_itbl((StgClosure *)frame);
891 switch (info->i.type) {
898 // First build an AP_STACK consisting of the stack chunk above the
899 // current update frame, with the top word on the stack as the
902 words = frame - sp - 1;
903 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
906 ap->fun = (StgClosure *)sp[0];
908 for(i=0; i < (nat)words; ++i) {
909 ap->payload[i] = (StgClosure *)*sp++;
912 SET_HDR(ap,&stg_AP_STACK_info,
913 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
914 TICK_ALLOC_UP_THK(words+1,0);
916 //IF_DEBUG(scheduler,
917 // debugBelch("sched: Updating ");
918 // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
919 // debugBelch(" with ");
920 // printObj((StgClosure *)ap);
923 // Replace the updatee with an indirection
925 // Warning: if we're in a loop, more than one update frame on
926 // the stack may point to the same object. Be careful not to
927 // overwrite an IND_OLDGEN in this case, because we'll screw
928 // up the mutable lists. To be on the safe side, don't
929 // overwrite any kind of indirection at all. See also
930 // threadSqueezeStack in GC.c, where we have to make a similar
933 if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) {
934 // revert the black hole
935 UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
938 sp += sizeofW(StgUpdateFrame) - 1;
939 sp[0] = (W_)ap; // push onto stack
941 continue; //no need to bump frame
946 // We've stripped the entire stack, the thread is now dead.
947 tso->what_next = ThreadKilled;
948 tso->sp = frame + sizeofW(StgStopFrame);
953 // If we find a CATCH_FRAME, and we've got an exception to raise,
954 // then build the THUNK raise(exception), and leave it on
955 // top of the CATCH_FRAME ready to enter.
959 StgCatchFrame *cf = (StgCatchFrame *)frame;
963 if (exception == NULL) break;
965 // we've got an exception to raise, so let's pass it to the
966 // handler in this frame.
968 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
969 TICK_ALLOC_SE_THK(1,0);
970 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
971 raise->payload[0] = exception;
973 // throw away the stack from Sp up to the CATCH_FRAME.
977 /* Ensure that async excpetions are blocked now, so we don't get
978 * a surprise exception before we get around to executing the
981 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
983 /* Put the newly-built THUNK on top of the stack, ready to execute
984 * when the thread restarts.
987 sp[-1] = (W_)&stg_enter_info;
989 tso->what_next = ThreadRunGHC;
990 IF_DEBUG(sanity, checkTSO(tso));
994 case ATOMICALLY_FRAME:
995 if (stop_at_atomically) {
996 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
997 stmCondemnTransaction(cap, tso -> trec);
1001 // R1 is not a register: the return convention for IO in
1002 // this case puts the return value on the stack, so we
1003 // need to set up the stack to return to the atomically
1004 // frame properly...
1005 tso->sp = frame - 2;
1006 tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
1007 tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
1009 tso->what_next = ThreadRunGHC;
1012 // Not stop_at_atomically... fall through and abort the
1015 case CATCH_RETRY_FRAME:
1016 // IF we find an ATOMICALLY_FRAME then we abort the
1017 // current transaction and propagate the exception. In
1018 // this case (unlike ordinary exceptions) we do not care
1019 // whether the transaction is valid or not because its
1020 // possible validity cannot have caused the exception
1021 // and will not be visible after the abort.
1024 StgTRecHeader *trec = tso -> trec;
1025 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
1026 debugTrace(DEBUG_stm,
1027 "found atomically block delivering async exception");
1028 stmAbortTransaction(cap, trec);
1029 stmFreeAbortedTRec(cap, trec);
1030 tso -> trec = outer;
1038 // move on to the next stack frame
1039 frame += stack_frame_sizeW((StgClosure *)frame);
1042 // if we got here, then we stopped at stop_here
1043 ASSERT(stop_here != NULL);