1 /* -----------------------------------------------------------------------------
2 * $Id: Exception.hc,v 1.23 2001/12/05 17:35:15 sewardj Exp $
4 * (c) The GHC Team, 1998-2000
8 * ---------------------------------------------------------------------------*/
12 #include "Exception.h"
21 #if defined(PROFILING)
22 # include "Profiling.h"
25 /* -----------------------------------------------------------------------------
28 A thread can request that asynchronous exceptions not be delivered
29 ("blocked") for the duration of an I/O computation. The primitive
31 blockAsyncExceptions# :: IO a -> IO a
33 is used for this purpose. During a blocked section, asynchronous
34 exceptions may be unblocked again temporarily:
36 unblockAsyncExceptions# :: IO a -> IO a
38 Furthermore, asynchronous exceptions are blocked automatically during
39 the execution of an exception handler. Both of these primitives
40 leave a continuation on the stack which reverts to the previous
41 state (blocked or unblocked) on exit.
43 A thread which wants to raise an exception in another thread (using
44 killThread#) must block until the target thread is ready to receive
45 it. The action of unblocking exceptions in a thread will release all
46 the threads waiting to deliver exceptions to that thread.
48 -------------------------------------------------------------------------- */
50 FN_(blockAsyncExceptionszh_fast)
53 /* Args: R1 :: IO a */
54 STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
56 if (CurrentTSO->blocked_exceptions == NULL) {
57 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
58 /* avoid growing the stack unnecessarily */
59 if (Sp[0] == (W_)&stg_blockAsyncExceptionszh_ret_info) {
63 Sp[0] = (W_)&stg_unblockAsyncExceptionszh_ret_info;
68 JMP_(GET_ENTRY(R1.cl));
72 INFO_TABLE_SRT_BITMAP(stg_unblockAsyncExceptionszh_ret_info, stg_unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
73 FN_(stg_unblockAsyncExceptionszh_ret_entry)
76 ASSERT(CurrentTSO->blocked_exceptions != NULL);
78 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
81 /* we don't need node info (2nd arg) in this case
82 (note that CurrentTSO->block_info.closure isn't always set) */
83 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
86 awakenBlockedQueue(CurrentTSO->blocked_exceptions);
88 CurrentTSO->blocked_exceptions = NULL;
91 JMP_(ENTRY_CODE(Sp[0]));
95 JMP_(ENTRY_CODE(Sp[1]));
100 FN_(unblockAsyncExceptionszh_fast)
103 /* Args: R1 :: IO a */
104 STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
106 if (CurrentTSO->blocked_exceptions != NULL) {
108 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
109 CurrentTSO->block_info.closure);
111 // is CurrentTSO->block_info.closure always set to the node
112 // holding the blocking queue !? -- HWL
113 awakenBlockedQueue(CurrentTSO->blocked_exceptions,
114 CurrentTSO->block_info.closure);
116 awakenBlockedQueue(CurrentTSO->blocked_exceptions);
118 CurrentTSO->blocked_exceptions = NULL;
120 /* avoid growing the stack unnecessarily */
121 if (Sp[0] == (W_)&stg_unblockAsyncExceptionszh_ret_info) {
125 Sp[0] = (W_)&stg_blockAsyncExceptionszh_ret_info;
130 JMP_(GET_ENTRY(R1.cl));
134 INFO_TABLE_SRT_BITMAP(stg_blockAsyncExceptionszh_ret_info, stg_blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
135 FN_(stg_blockAsyncExceptionszh_ret_entry)
138 ASSERT(CurrentTSO->blocked_exceptions == NULL);
139 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
142 JMP_(ENTRY_CODE(Sp[0]));
146 JMP_(ENTRY_CODE(Sp[1]));
151 FN_(killThreadzh_fast)
154 /* args: R1.p = TSO to kill, R2.p = Exception */
156 /* This thread may have been relocated.
157 * (see Schedule.c:threadStackOverflow)
159 while (R1.t->what_next == ThreadRelocated) {
163 /* If the target thread is currently blocking async exceptions,
164 * we'll have to block until it's ready to accept them. The
165 * exception is interruptible threads - ie. those that are blocked
168 if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) {
170 /* ToDo (SMP): locking if destination thread is currently
173 CurrentTSO->link = R1.t->blocked_exceptions;
174 R1.t->blocked_exceptions = CurrentTSO;
176 CurrentTSO->why_blocked = BlockedOnException;
177 CurrentTSO->block_info.tso = R1.t;
179 BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
182 /* Killed threads turn into zombies, which might be garbage
183 * collected at a later date. That's why we don't have to
184 * explicitly remove them from any queues they might be on.
187 /* We might have killed ourselves. In which case, better be *very*
188 * careful. If the exception killed us, then return to the scheduler.
189 * If the exception went to a catch frame, we'll just continue from
192 if (R1.t == CurrentTSO) {
193 SaveThreadState(); /* inline! */
194 STGCALL2(raiseAsync, R1.t, R2.cl);
195 if (CurrentTSO->what_next == ThreadKilled) {
196 R1.w = ThreadFinished;
200 if (CurrentTSO->what_next == ThreadEnterGHC) {
203 JMP_(GET_ENTRY(R1.cl));
205 barf("killThreadzh_fast");
208 STGCALL2(raiseAsync, R1.t, R2.cl);
211 JMP_(ENTRY_CODE(Sp[0]));
216 FN_(myThreadIdzh_fast)
220 R1.p = (P_)CurrentTSO;
221 JMP_(ENTRY_CODE(Sp[0]));
226 /* -----------------------------------------------------------------------------
228 -------------------------------------------------------------------------- */
231 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
236 Su = ((StgCatchFrame *)Sp)->link; \
237 Sp += sizeofW(StgCatchFrame); \
242 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
250 Su = ((StgCatchFrame *)Sp)->link; \
251 Sp += sizeofW(StgCatchFrame) - 1; \
264 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
265 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
266 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
267 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
268 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
269 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
270 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
271 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
272 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
274 #if defined(PROFILING)
275 #define CATCH_FRAME_BITMAP 15
277 #define CATCH_FRAME_BITMAP 3
280 /* Catch frames are very similar to update frames, but when entering
281 * one we just pop the frame off the stack and perform the correct
282 * kind of return to the activation record underneath us on the stack.
285 VEC_POLY_INFO_TABLE(stg_catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
287 /* -----------------------------------------------------------------------------
288 * The catch infotable
290 * This should be exactly the same as would be generated by this STG code
292 * catch = {x,h} \n {} -> catch#{x,h}
294 * It is used in deleteThread when reverting blackholes.
295 * -------------------------------------------------------------------------- */
297 INFO_TABLE(stg_catch_info,stg_catch_entry,2,0,FUN,,EF_,0,0);
298 STGFUN(stg_catch_entry)
301 R2.cl = R1.cl->payload[1]; /* h */
302 R1.cl = R1.cl->payload[0]; /* x */
312 /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
313 STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
315 /* Set up the catch frame */
316 Sp -= sizeofW(StgCatchFrame);
317 fp = (StgCatchFrame *)Sp;
318 SET_HDR(fp,(StgInfoTable *)&stg_catch_frame_info,CCCS);
319 fp -> handler = R2.cl;
320 fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
322 Su = (StgUpdateFrame *)fp;
323 TICK_CATCHF_PUSHED();
325 /* Push realworld token and enter R1. */
329 JMP_(GET_ENTRY(R1.cl));
334 /* -----------------------------------------------------------------------------
335 * The raise infotable
337 * This should be exactly the same as would be generated by this STG code
339 * raise = {err} \n {} -> raise#{err}
341 * It is used in raisezh_fast to update thunks on the update list
342 * -------------------------------------------------------------------------- */
344 INFO_TABLE(stg_raise_info,stg_raise_entry,1,0,THUNK,,EF_,0,0);
345 STGFUN(stg_raise_entry)
348 R1.cl = R1.cl->payload[0];
357 StgClosure *raise_closure;
359 /* args : R1 = exception */
362 #if defined(PROFILING)
363 /* Debugging tool: on raising an exception, show where we are. */
365 /* ToDo: currently this is a hack. Would be much better if
366 * the info was only displayed for an *uncaught* exception.
368 if (RtsFlags.ProfFlags.showCCSOnException) {
369 STGCALL2(fprintCCS,stderr,CCCS);
375 /* This closure represents the expression 'raise# E' where E
376 * is the exception raise. It is used to overwrite all the
377 * thunks which are currently under evaluataion.
381 // stg_raise_info has THUNK as its closure type. Since a THUNK takes at least
382 // MIN_UPD_SIZE words in its payload, MIN_UPD_SIZE is more approprate than 1.
383 // It seems that 1 does not cause any problem unless profiling is performed.
384 // However, when LDV profiling goes on, we need to linearly scan small object pool,
385 // where raise_closure is stored, so we should use MIN_UPD_SIZE.
386 raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
387 sizeofW(StgClosure)+1);
389 raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
390 sizeofW(StgClosure)+MIN_UPD_SIZE);
391 SET_HDR(raise_closure, &stg_raise_info, CCCS);
392 raise_closure->payload[0] = R1.cl;
396 switch (get_itbl(p)->type) {
399 UPD_IND(p->updatee,raise_closure);
404 p = ((StgSeqFrame *)p)->link;
412 /* We've stripped the entire stack, the thread is now dead. */
413 Sp = CurrentTSO->stack + CurrentTSO->stack_size - 1;
414 Sp[0] = R1.w; /* save the exception */
415 Su = (StgUpdateFrame *)(Sp+1);
416 CurrentTSO->what_next = ThreadKilled;
417 SaveThreadState(); /* inline! */
418 R1.w = ThreadFinished;
422 barf("raisezh_fast: weird activation record");
429 /* Ok, p points to the enclosing CATCH_FRAME. Pop everything down to
430 * and including this frame, update Su, push R1, and enter the handler.
432 Su = ((StgCatchFrame *)p)->link;
433 handler = ((StgCatchFrame *)p)->handler;
435 Sp = (P_)p + sizeofW(StgCatchFrame);
437 /* Restore the blocked/unblocked state for asynchronous exceptions
438 * at the CATCH_FRAME.
440 * If exceptions were unblocked, arrange that they are unblocked
441 * again after executing the handler by pushing an
442 * unblockAsyncExceptions_ret stack frame.
444 if (! ((StgCatchFrame *)p)->exceptions_blocked) {
445 *(--Sp) = (W_)&stg_unblockAsyncExceptionszh_ret_info;
448 /* Ensure that async excpetions are blocked when running the handler.
450 if (CurrentTSO->blocked_exceptions == NULL) {
451 CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
454 /* Enter the handler, passing the exception value and a realworld
455 * token as arguments.
462 JMP_(GET_ENTRY(R1.cl));