1 /* -----------------------------------------------------------------------------
2 * $Id: Exception.hc,v 1.12 2000/04/14 16:47:43 panne Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
11 #include "Exception.h"
20 #if defined(PROFILING)
21 # include "Profiling.h"
24 /* -----------------------------------------------------------------------------
27 A thread can request that asynchronous exceptions not be delivered
28 ("blocked") for the duration of an I/O computation. The primitive
30 blockAsyncExceptions# :: IO a -> IO a
32 is used for this purpose. During a blocked section, asynchronous
33 exceptions may be unblocked again temporarily:
35 unblockAsyncExceptions# :: IO a -> IO a
37 Furthermore, asynchronous exceptions are blocked automatically during
38 the execution of an exception handler. Both of these primitives
39 leave a continuation on the stack which reverts to the previous
40 state (blocked or unblocked) on exit.
42 A thread which wants to raise an exception in another thread (using
43 killThread#) must block until the target thread is ready to receive
44 it. The action of unblocking exceptions in a thread will release all
45 the threads waiting to deliver exceptions to that thread.
47 -------------------------------------------------------------------------- */
49 FN_(blockAsyncExceptionszh_fast)
52 /* Args: R1 :: IO a */
53 STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
55 if (CurrentTSO->blocked_exceptions == NULL) {
56 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
57 /* avoid growing the stack unnecessarily */
58 if (Sp[0] == (W_)&blockAsyncExceptionszh_ret_info) {
62 Sp[0] = (W_)&unblockAsyncExceptionszh_ret_info;
67 JMP_(GET_ENTRY(R1.cl));
71 INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
72 FN_(unblockAsyncExceptionszh_ret_entry)
75 ASSERT(CurrentTSO->blocked_exceptions != NULL);
77 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
78 CurrentTSO->block_info.closure);
80 // is CurrentTSO->block_info.closure always set to the node
81 // holding the blocking queue !? -- HWL
82 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
83 CurrentTSO->block_info.closure);
85 awakenBlockedQueue(CurrentTSO->blocked_exceptions);
87 CurrentTSO->blocked_exceptions = NULL;
90 JMP_(ENTRY_CODE(Sp[0]));
94 JMP_(ENTRY_CODE(Sp[1]));
99 FN_(unblockAsyncExceptionszh_fast)
102 /* Args: R1 :: IO a */
103 STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
105 if (CurrentTSO->blocked_exceptions != NULL) {
107 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
108 CurrentTSO->block_info.closure);
110 // is CurrentTSO->block_info.closure always set to the node
111 // holding the blocking queue !? -- HWL
112 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
113 CurrentTSO->block_info.closure);
115 awakenBlockedQueue(CurrentTSO->blocked_exceptions);
117 CurrentTSO->blocked_exceptions = NULL;
119 /* avoid growing the stack unnecessarily */
120 if (Sp[0] == (W_)&unblockAsyncExceptionszh_ret_info) {
124 Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
129 JMP_(GET_ENTRY(R1.cl));
133 INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
134 FN_(blockAsyncExceptionszh_ret_entry)
137 ASSERT(CurrentTSO->blocked_exceptions == NULL);
138 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
141 JMP_(ENTRY_CODE(Sp[0]));
145 JMP_(ENTRY_CODE(Sp[1]));
150 FN_(killThreadzh_fast)
153 /* args: R1.p = TSO to kill, R2.p = Exception */
155 /* This thread may have been relocated.
156 * (see Schedule.c:threadStackOverflow)
158 while (R1.t->what_next == ThreadRelocated) {
162 /* If the target thread is currently blocking async exceptions,
163 * we'll have to block until it's ready to accept them. The
164 * exception is interruptible threads - ie. those that are blocked
167 if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) {
169 /* ToDo (SMP): locking if destination thread is currently
172 CurrentTSO->link = R1.t->blocked_exceptions;
173 R1.t->blocked_exceptions = CurrentTSO;
175 CurrentTSO->why_blocked = BlockedOnException;
176 CurrentTSO->block_info.tso = R1.t;
178 BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
181 /* Killed threads turn into zombies, which might be garbage
182 * collected at a later date. That's why we don't have to
183 * explicitly remove them from any queues they might be on.
186 /* We might have killed ourselves. In which case, better be *very*
187 * careful. If the exception killed us, then return to the scheduler.
188 * If the exception went to a catch frame, we'll just continue from
191 if (R1.t == CurrentTSO) {
192 SaveThreadState(); /* inline! */
193 STGCALL2(raiseAsync, R1.t, R2.cl);
194 if (CurrentTSO->what_next == ThreadKilled) {
195 R1.w = ThreadYielding;
199 if (CurrentTSO->what_next == ThreadEnterGHC) {
202 JMP_(GET_ENTRY(R1.cl));
204 barf("killThreadzh_fast");
207 STGCALL2(raiseAsync, R1.t, R2.cl);
210 JMP_(ENTRY_CODE(Sp[0]));
214 /* -----------------------------------------------------------------------------
216 -------------------------------------------------------------------------- */
219 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
224 Su = ((StgCatchFrame *)Sp)->link; \
225 Sp += sizeofW(StgCatchFrame); \
230 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
238 Su = ((StgCatchFrame *)Sp)->link; \
239 Sp += sizeofW(StgCatchFrame) - 1; \
252 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
253 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
254 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
255 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
256 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
257 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
258 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
259 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
260 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
263 #define CATCH_FRAME_BITMAP 7
265 #define CATCH_FRAME_BITMAP 3
268 /* Catch frames are very similar to update frames, but when entering
269 * one we just pop the frame off the stack and perform the correct
270 * kind of return to the activation record underneath us on the stack.
273 VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
275 /* -----------------------------------------------------------------------------
276 * The catch infotable
278 * This should be exactly the same as would be generated by this STG code
280 * catch = {x,h} \n {} -> catch#{x,h}
282 * It is used in deleteThread when reverting blackholes.
283 * -------------------------------------------------------------------------- */
285 INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0);
289 R2.cl = R1.cl->payload[1]; /* h */
290 R1.cl = R1.cl->payload[0]; /* x */
300 /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
301 STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
303 /* Set up the catch frame */
304 Sp -= sizeofW(StgCatchFrame);
305 fp = (StgCatchFrame *)Sp;
306 SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS);
307 fp -> handler = R2.cl;
308 fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
310 Su = (StgUpdateFrame *)fp;
311 TICK_CATCHF_PUSHED();
313 /* Push realworld token and enter R1. */
317 JMP_(GET_ENTRY(R1.cl));
322 /* -----------------------------------------------------------------------------
323 * The raise infotable
325 * This should be exactly the same as would be generated by this STG code
327 * raise = {err} \n {} -> raise#{err}
329 * It is used in raisezh_fast to update thunks on the update list
330 * -------------------------------------------------------------------------- */
332 INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0);
336 R1.cl = R1.cl->payload[0];
345 StgClosure *raise_closure;
347 /* args : R1 = error */
350 #if defined(PROFILING)
352 /* Debugging tool: on raising an exception, show where we are. */
354 /* ToDo: currently this is a hack. Would be much better if
355 * the info was only displayed for an *uncaught* exception.
357 if (RtsFlags.ProfFlags.showCCSOnException) {
358 STGCALL2(print_ccs,stderr,CCCS);
365 /* This closure represents the expression 'raise# E' where E
366 * is the exception raise. It is used to overwrite all the
367 * thunks which are currently under evaluataion.
369 raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
370 sizeofW(StgClosure)+1);
371 raise_closure->header.info = &raise_info;
372 raise_closure->payload[0] = R1.cl;
376 switch (get_itbl(p)->type) {
379 UPD_IND(p->updatee,raise_closure);
384 p = ((StgSeqFrame *)p)->link;
392 barf("raisezh_fast: STOP_FRAME");
395 barf("raisezh_fast: weird activation record");
402 /* Ok, p points to the enclosing CATCH_FRAME. Pop everything down to
403 * and including this frame, update Su, push R1, and enter the handler.
405 Su = ((StgCatchFrame *)p)->link;
406 handler = ((StgCatchFrame *)p)->handler;
408 Sp = (P_)p + sizeofW(StgCatchFrame);
410 /* Restore the blocked/unblocked state for asynchronous exceptions
411 * at the CATCH_FRAME.
413 * If exceptions were unblocked, arrange that they are unblocked
414 * again after executing the handler by pushing an
415 * unblockAsyncExceptions_ret stack frame.
417 if (! ((StgCatchFrame *)p)->exceptions_blocked) {
418 *(--Sp) = (W_)&unblockAsyncExceptionszh_ret_info;
421 /* Ensure that async excpetions are blocked when running the handler.
423 if (CurrentTSO->blocked_exceptions == NULL) {
424 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
427 /* Enter the handler, passing the exception value and a realworld
428 * token as arguments.
435 JMP_(GET_ENTRY(R1.cl));