1 /* -----------------------------------------------------------------------------
2 * $Id: Exception.hc,v 1.7 2000/02/04 11:15:04 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
11 #include "Exception.h"
21 /* -----------------------------------------------------------------------------
24 A thread can request that asynchronous exceptions not be delivered
25 ("blocked") for the duration of an I/O computation. The primitive
27 blockAsyncExceptions# :: IO a -> IO a
29 is used for this purpose. During a blocked section, asynchronous
30 exceptions may be unblocked again temporarily:
32 unblockAsyncExceptions# :: IO a -> IO a
34 Furthermore, asynchronous exceptions are blocked automatically during
35 the execution of an exception handler. Both of these primitives
36 leave a continuation on the stack which reverts to the previous
37 state (blocked or unblocked) on exit.
39 A thread which wants to raise an exception in another thread (using
40 killThread#) must block until the target thread is ready to receive
41 it. The action of unblocking exceptions in a thread will release all
42 the threads waiting to deliver exceptions to that thread.
44 -------------------------------------------------------------------------- */
46 FN_(blockAsyncExceptionszh_fast)
49 /* Args: R1 :: IO a */
50 STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
52 if (CurrentTSO->blocked_exceptions == NULL) {
53 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
54 /* avoid growing the stack unnecessarily */
55 if (Sp[0] == (W_)&blockAsyncExceptionszh_ret_info) {
59 Sp[0] = (W_)&unblockAsyncExceptionszh_ret_info;
64 JMP_(GET_ENTRY(R1.cl));
68 INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
69 FN_(unblockAsyncExceptionszh_ret_entry)
72 ASSERT(CurrentTSO->blocked_exceptions != NULL);
74 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
75 CurrentTSO->block_info.closure);
77 // is CurrentTSO->block_info.closure always set to the node
78 // holding the blocking queue !? -- HWL
79 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
80 CurrentTSO->block_info.closure);
82 awakenBlockedQueue(CurrentTSO->blocked_exceptions);
84 CurrentTSO->blocked_exceptions = NULL;
86 JMP_(ENTRY_CODE(Sp[0]));
90 FN_(unblockAsyncExceptionszh_fast)
93 /* Args: R1 :: IO a */
94 STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
96 if (CurrentTSO->blocked_exceptions != NULL) {
98 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
99 CurrentTSO->block_info.closure);
101 // is CurrentTSO->block_info.closure always set to the node
102 // holding the blocking queue !? -- HWL
103 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
104 CurrentTSO->block_info.closure);
106 awakenBlockedQueue(CurrentTSO->blocked_exceptions);
108 CurrentTSO->blocked_exceptions = NULL;
110 /* avoid growing the stack unnecessarily */
111 if (Sp[0] == (W_)&unblockAsyncExceptionszh_ret_info) {
115 Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
120 JMP_(GET_ENTRY(R1.cl));
124 INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
125 FN_(blockAsyncExceptionszh_ret_entry)
128 ASSERT(CurrentTSO->blocked_exceptions == NULL);
129 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
131 JMP_(ENTRY_CODE(Sp[0]));
136 FN_(killThreadzh_fast)
139 /* args: R1.p = TSO to kill, R2.p = Exception */
141 /* This thread may have been relocated.
142 * (see Schedule.c:threadStackOverflow)
144 while (R1.t->whatNext == ThreadRelocated) {
148 /* If the target thread is currently blocking async exceptions,
149 * we'll have to block until it's ready to accept them.
151 if (R1.t->blocked_exceptions != NULL) {
153 /* ToDo (SMP): locking if destination thread is currently
156 CurrentTSO->link = R1.t->blocked_exceptions;
157 R1.t->blocked_exceptions = CurrentTSO;
159 CurrentTSO->why_blocked = BlockedOnException;
160 CurrentTSO->block_info.tso = R1.t;
162 BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
165 /* Killed threads turn into zombies, which might be garbage
166 * collected at a later date. That's why we don't have to
167 * explicitly remove them from any queues they might be on.
170 /* We might have killed ourselves. In which case, better be *very*
171 * careful. If the exception killed us, then return to the scheduler.
172 * If the exception went to a catch frame, we'll just continue from
175 if (R1.t == CurrentTSO) {
176 SaveThreadState(); /* inline! */
177 STGCALL2(raiseAsync, R1.t, R2.cl);
178 if (CurrentTSO->whatNext == ThreadKilled) {
179 R1.w = ThreadYielding;
183 if (CurrentTSO->whatNext == ThreadEnterGHC) {
186 JMP_(GET_ENTRY(R1.cl));
188 barf("killThreadzh_fast");
191 STGCALL2(raiseAsync, R1.t, R2.cl);
194 JMP_(ENTRY_CODE(Sp[0]));
198 /* -----------------------------------------------------------------------------
200 -------------------------------------------------------------------------- */
203 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
208 Su = ((StgCatchFrame *)Sp)->link; \
209 Sp += sizeofW(StgCatchFrame); \
214 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
222 Su = ((StgCatchFrame *)Sp)->link; \
223 Sp += sizeofW(StgCatchFrame) - 1; \
236 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
237 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
238 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
239 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
240 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
241 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
242 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
243 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
244 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
247 #define CATCH_FRAME_BITMAP 7
249 #define CATCH_FRAME_BITMAP 3
252 /* Catch frames are very similar to update frames, but when entering
253 * one we just pop the frame off the stack and perform the correct
254 * kind of return to the activation record underneath us on the stack.
257 VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
259 /* -----------------------------------------------------------------------------
260 * The catch infotable
262 * This should be exactly the same as would be generated by this STG code
264 * catch = {x,h} \n {} -> catch#{x,h}
266 * It is used in deleteThread when reverting blackholes.
267 * -------------------------------------------------------------------------- */
269 INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0);
273 R2.cl = payloadCPtr(R1.cl,1); /* h */
274 R1.cl = payloadCPtr(R1.cl,0); /* x */
284 /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
285 STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
287 /* Set up the catch frame */
288 Sp -= sizeofW(StgCatchFrame);
289 fp = (StgCatchFrame *)Sp;
290 SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS);
291 fp -> handler = R2.cl;
292 fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
294 Su = (StgUpdateFrame *)fp;
295 TICK_CATCHF_PUSHED();
297 /* Push realworld token and enter R1. */
301 JMP_(GET_ENTRY(R1.cl));
306 /* -----------------------------------------------------------------------------
307 * The raise infotable
309 * This should be exactly the same as would be generated by this STG code
311 * raise = {err} \n {} -> raise#{err}
313 * It is used in raisezh_fast to update thunks on the update list
314 * -------------------------------------------------------------------------- */
316 INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0);
320 R1.cl = R1.cl->payload[0];
329 StgClosure *raise_closure;
331 /* args : R1 = error */
334 #if defined(PROFILING)
336 /* Debugging tool: on raising an exception, show where we are. */
338 /* ToDo: currently this is a hack. Would be much better if
339 * the info was only displayed for an *uncaught* exception.
341 if (RtsFlags.ProfFlags.showCCSOnException) {
342 STGCALL2(print_ccs,stderr,CCCS);
349 /* This closure represents the expression 'raise# E' where E
350 * is the exception raise. It is used to overwrite all the
351 * thunks which are currently under evaluataion.
353 raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
354 sizeofW(StgClosure)+1);
355 raise_closure->header.info = &raise_info;
356 raise_closure->payload[0] = R1.cl;
360 switch (get_itbl(p)->type) {
363 UPD_IND(p->updatee,raise_closure);
368 p = ((StgSeqFrame *)p)->link;
376 barf("raisezh_fast: STOP_FRAME");
379 barf("raisezh_fast: weird activation record");
386 /* Ok, p points to the enclosing CATCH_FRAME. Pop everything down to
387 * and including this frame, update Su, push R1, and enter the handler.
389 Su = ((StgCatchFrame *)p)->link;
390 handler = ((StgCatchFrame *)p)->handler;
392 Sp = (P_)p + sizeofW(StgCatchFrame);
394 /* Restore the blocked/unblocked state for asynchronous exceptions
395 * at the CATCH_FRAME.
397 * If exceptions were unblocked, arrange that they are unblocked
398 * again after executing the handler by pushing an
399 * unblockAsyncExceptions_ret stack frame.
401 if (! ((StgCatchFrame *)p)->exceptions_blocked) {
402 *(--Sp) = (W_)&unblockAsyncExceptionszh_ret_info;
405 /* Ensure that async excpetions are blocked when running the handler.
407 if (CurrentTSO->blocked_exceptions == NULL) {
408 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
411 /* Enter the handler, passing the exception value and a realworld
412 * token as arguments.
419 JMP_(GET_ENTRY(R1.cl));