d647013fd1e4bad64878010eccd7a386c7d54449
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.19 2001/03/22 03:51:10 hwloidl Exp $
3  *
4  * (c) The GHC Team, 1998-2000
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Rts.h"
11 #include "Exception.h"
12 #include "Schedule.h"
13 #include "StgRun.h"
14 #include "Storage.h"
15 #include "RtsUtils.h"
16 #include "RtsFlags.h"
17 #if defined(PAR)
18 # include "FetchMe.h"
19 #endif
20 #if defined(PROFILING)
21 # include "Profiling.h"
22 #endif
23
24 /* -----------------------------------------------------------------------------
25    Exception Primitives
26
27    A thread can request that asynchronous exceptions not be delivered
28    ("blocked") for the duration of an I/O computation.  The primitive
29    
30         blockAsyncExceptions# :: IO a -> IO a
31
32    is used for this purpose.  During a blocked section, asynchronous
33    exceptions may be unblocked again temporarily:
34
35         unblockAsyncExceptions# :: IO a -> IO a
36
37    Furthermore, asynchronous exceptions are blocked automatically during
38    the execution of an exception handler.  Both of these primitives
39    leave a continuation on the stack which reverts to the previous
40    state (blocked or unblocked) on exit.
41
42    A thread which wants to raise an exception in another thread (using
43    killThread#) must block until the target thread is ready to receive
44    it.  The action of unblocking exceptions in a thread will release all
45    the threads waiting to deliver exceptions to that thread.
46
47    -------------------------------------------------------------------------- */
48
49 FN_(blockAsyncExceptionszh_fast)
50 {
51   FB_
52     /* Args: R1 :: IO a */
53     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
54
55     if (CurrentTSO->blocked_exceptions == NULL) {
56       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
57       /* avoid growing the stack unnecessarily */
58       if (Sp[0] == (W_)&stg_blockAsyncExceptionszh_ret_info) {
59         Sp++;
60       } else {
61         Sp--;
62         Sp[0] = (W_)&stg_unblockAsyncExceptionszh_ret_info;
63       }
64     }
65     Sp--;
66     Sp[0] = ARG_TAG(0);
67     JMP_(GET_ENTRY(R1.cl));
68   FE_
69 }
70
71 INFO_TABLE_SRT_BITMAP(stg_unblockAsyncExceptionszh_ret_info, stg_unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
72 FN_(stg_unblockAsyncExceptionszh_ret_entry)
73 {
74   FB_
75     ASSERT(CurrentTSO->blocked_exceptions != NULL);
76 #if defined(GRAN)
77       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
78                          (StgClosure*)NULL); 
79 #elif defined(PAR)
80       /* we don't need node info (2nd arg) in this case
81          (note that CurrentTSO->block_info.closure isn't always set) */
82       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
83                          (StgClosure*)NULL); 
84 #else
85     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
86 #endif
87     CurrentTSO->blocked_exceptions = NULL;
88 #ifdef REG_R1
89     Sp++;
90     JMP_(ENTRY_CODE(Sp[0]));
91 #else
92     Sp[1] = Sp[0];
93     Sp++;
94     JMP_(ENTRY_CODE(Sp[1]));
95 #endif
96   FE_
97 }
98
99 FN_(unblockAsyncExceptionszh_fast)
100 {
101   FB_
102     /* Args: R1 :: IO a */
103     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
104
105     if (CurrentTSO->blocked_exceptions != NULL) {
106 #if defined(GRAN)
107       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
108                          CurrentTSO->block_info.closure);
109 #elif defined(PAR)
110       // is CurrentTSO->block_info.closure always set to the node
111       // holding the blocking queue !? -- HWL
112       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
113                          CurrentTSO->block_info.closure);
114 #else
115       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
116 #endif
117       CurrentTSO->blocked_exceptions = NULL;
118
119       /* avoid growing the stack unnecessarily */
120       if (Sp[0] == (W_)&stg_unblockAsyncExceptionszh_ret_info) {
121         Sp++;
122       } else {
123         Sp--;   
124         Sp[0] = (W_)&stg_blockAsyncExceptionszh_ret_info;
125       }
126     }
127     Sp--;
128     Sp[0] = ARG_TAG(0);
129     JMP_(GET_ENTRY(R1.cl));
130   FE_
131 }
132
133 INFO_TABLE_SRT_BITMAP(stg_blockAsyncExceptionszh_ret_info, stg_blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
134 FN_(stg_blockAsyncExceptionszh_ret_entry)
135 {
136   FB_
137     ASSERT(CurrentTSO->blocked_exceptions == NULL);
138     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
139 #ifdef REG_R1
140     Sp++;
141     JMP_(ENTRY_CODE(Sp[0]));
142 #else
143     Sp[1] = Sp[0];
144     Sp++;
145     JMP_(ENTRY_CODE(Sp[1]));
146 #endif
147   FE_
148 }
149
150 FN_(killThreadzh_fast)
151 {
152   FB_
153   /* args: R1.p = TSO to kill, R2.p = Exception */
154
155   /* This thread may have been relocated.
156    * (see Schedule.c:threadStackOverflow)
157    */
158   while (R1.t->what_next == ThreadRelocated) {
159     R1.t = R1.t->link;
160   }
161
162   /* If the target thread is currently blocking async exceptions,
163    * we'll have to block until it's ready to accept them.  The
164    * exception is interruptible threads - ie. those that are blocked
165    * on some resource.
166    */
167   if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) {
168     
169     /* ToDo (SMP): locking if destination thread is currently
170      * running...
171      */
172     CurrentTSO->link = R1.t->blocked_exceptions;
173     R1.t->blocked_exceptions = CurrentTSO;
174
175     CurrentTSO->why_blocked = BlockedOnException;
176     CurrentTSO->block_info.tso = R1.t;
177     
178     BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
179   }
180
181   /* Killed threads turn into zombies, which might be garbage
182    * collected at a later date.  That's why we don't have to
183    * explicitly remove them from any queues they might be on.
184    */
185
186   /* We might have killed ourselves.  In which case, better be *very*
187    * careful.  If the exception killed us, then return to the scheduler.
188    * If the exception went to a catch frame, we'll just continue from
189    * the handler.
190    */
191   if (R1.t == CurrentTSO) {
192         SaveThreadState();      /* inline! */
193         STGCALL2(raiseAsync, R1.t, R2.cl);
194         if (CurrentTSO->what_next == ThreadKilled) {
195                 R1.w = ThreadFinished;
196                 JMP_(StgReturn);
197         }
198         LoadThreadState();
199         if (CurrentTSO->what_next == ThreadEnterGHC) {
200                 R1.w = Sp[0];
201                 Sp++;
202                 JMP_(GET_ENTRY(R1.cl));
203         } else {
204                 barf("killThreadzh_fast");
205         }
206   } else {
207         STGCALL2(raiseAsync, R1.t, R2.cl);
208   }
209
210   JMP_(ENTRY_CODE(Sp[0]));
211   FE_
212 }
213
214 /* -----------------------------------------------------------------------------
215    Catch frames
216    -------------------------------------------------------------------------- */
217
218 #ifdef REG_R1
219 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
220    FN_(label);                                  \
221    FN_(label)                                   \
222    {                                            \
223       FB_                                       \
224       Su = ((StgCatchFrame *)Sp)->link;         \
225       Sp += sizeofW(StgCatchFrame);             \
226       JMP_(ret);                                \
227       FE_                                       \
228    }
229 #else
230 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
231    FN_(label);                                  \
232    FN_(label)                                   \
233    {                                            \
234       StgWord rval;                             \
235       FB_                                       \
236       rval = Sp[0];                             \
237       Sp++;                                     \
238       Su = ((StgCatchFrame *)Sp)->link;         \
239       Sp += sizeofW(StgCatchFrame) - 1;         \
240       Sp[0] = rval;                             \
241       JMP_(ret);                                \
242       FE_                                       \
243    }
244 #endif
245
246 #ifdef REG_R1
247 #define SP_OFF 0
248 #else
249 #define SP_OFF 1
250 #endif
251
252 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
253 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
254 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
255 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
256 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
257 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
258 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
259 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
260 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
261
262 #ifdef PROFILING
263 #define CATCH_FRAME_BITMAP 7
264 #else
265 #define CATCH_FRAME_BITMAP 3
266 #endif
267
268 /* Catch frames are very similar to update frames, but when entering
269  * one we just pop the frame off the stack and perform the correct
270  * kind of return to the activation record underneath us on the stack.
271  */
272
273 VEC_POLY_INFO_TABLE(stg_catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
274
275 /* -----------------------------------------------------------------------------
276  * The catch infotable
277  *
278  * This should be exactly the same as would be generated by this STG code
279  *
280  * catch = {x,h} \n {} -> catch#{x,h}
281  *
282  * It is used in deleteThread when reverting blackholes.
283  * -------------------------------------------------------------------------- */
284
285 INFO_TABLE(stg_catch_info,stg_catch_entry,2,0,FUN,,EF_,0,0);
286 STGFUN(stg_catch_entry)
287 {
288   FB_
289   R2.cl = R1.cl->payload[1]; /* h */
290   R1.cl = R1.cl->payload[0]; /* x */
291   JMP_(catchzh_fast);
292   FE_
293 }
294
295 FN_(catchzh_fast)
296 {
297   StgCatchFrame *fp;
298   FB_
299
300     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
301     STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
302   
303     /* Set up the catch frame */
304     Sp -= sizeofW(StgCatchFrame);
305     fp = (StgCatchFrame *)Sp;
306     SET_HDR(fp,(StgInfoTable *)&stg_catch_frame_info,CCCS);
307     fp -> handler = R2.cl;
308     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
309     fp -> link = Su;
310     Su = (StgUpdateFrame *)fp;
311     TICK_CATCHF_PUSHED();
312
313     /* Push realworld token and enter R1. */
314     Sp--;
315     Sp[0] = ARG_TAG(0);
316     TICK_ENT_VIA_NODE();
317     JMP_(GET_ENTRY(R1.cl));
318     
319   FE_
320 }      
321
322 /* -----------------------------------------------------------------------------
323  * The raise infotable
324  * 
325  * This should be exactly the same as would be generated by this STG code
326  *
327  *   raise = {err} \n {} -> raise#{err}
328  *
329  * It is used in raisezh_fast to update thunks on the update list
330  * -------------------------------------------------------------------------- */
331
332 INFO_TABLE(stg_raise_info,stg_raise_entry,1,0,THUNK,,EF_,0,0);
333 STGFUN(stg_raise_entry)
334 {
335   FB_
336   R1.cl = R1.cl->payload[0];
337   JMP_(raisezh_fast);
338   FE_
339 }
340
341 FN_(raisezh_fast)
342 {
343   StgClosure *handler;
344   StgUpdateFrame *p;
345   StgClosure *raise_closure;
346   FB_
347     /* args : R1 = exception */
348
349
350 #if defined(PROFILING)
351
352     /* Debugging tool: on raising an  exception, show where we are. */
353
354     /* ToDo: currently this is a hack.  Would be much better if
355      * the info was only displayed for an *uncaught* exception.
356      */
357     if (RtsFlags.ProfFlags.showCCSOnException) {
358       STGCALL2(print_ccs,stderr,CCCS);
359     }
360
361 #endif
362
363     p = Su;
364
365     /* This closure represents the expression 'raise# E' where E
366      * is the exception raise.  It is used to overwrite all the
367      * thunks which are currently under evaluataion.
368      */
369     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
370                                                sizeofW(StgClosure)+1);
371     raise_closure->header.info = &stg_raise_info;
372     raise_closure->payload[0] = R1.cl;
373
374     while (1) {
375
376       switch (get_itbl(p)->type) {
377
378       case UPDATE_FRAME:
379         UPD_IND(p->updatee,raise_closure);
380         p = p->link;
381         continue;
382
383       case SEQ_FRAME:
384         p = ((StgSeqFrame *)p)->link;
385         continue;
386
387       case CATCH_FRAME:
388         /* found it! */
389         break;
390
391       case STOP_FRAME:
392         /* We've stripped the entire stack, the thread is now dead. */
393         Sp = CurrentTSO->stack + CurrentTSO->stack_size - 1;
394         Sp[0] = R1.w;           /* save the exception */
395         Su = (StgUpdateFrame *)(Sp+1);
396         CurrentTSO->what_next = ThreadKilled;
397         SaveThreadState();      /* inline! */
398         R1.w = ThreadFinished;
399         JMP_(StgReturn);
400       
401       default:
402         barf("raisezh_fast: weird activation record");
403       }
404       
405       break;
406
407     }
408     
409     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
410      * and including this frame, update Su, push R1, and enter the handler.
411      */
412     Su = ((StgCatchFrame *)p)->link; 
413     handler = ((StgCatchFrame *)p)->handler;
414     
415     Sp = (P_)p + sizeofW(StgCatchFrame);
416
417     /* Restore the blocked/unblocked state for asynchronous exceptions
418      * at the CATCH_FRAME.  
419      *
420      * If exceptions were unblocked, arrange that they are unblocked
421      * again after executing the handler by pushing an
422      * unblockAsyncExceptions_ret stack frame.
423      */
424     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
425       *(--Sp) = (W_)&stg_unblockAsyncExceptionszh_ret_info;
426     }
427
428     /* Ensure that async excpetions are blocked when running the handler.
429     */
430     if (CurrentTSO->blocked_exceptions == NULL) {
431       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
432     }
433
434     /* Enter the handler, passing the exception value and a realworld
435      * token as arguments.
436      */
437     Sp -= 2;
438     Sp[0] = R1.w;
439     Sp[1] = ARG_TAG(0);
440     TICK_ENT_VIA_NODE();
441     R1.cl = handler;
442     JMP_(GET_ENTRY(R1.cl));
443
444   FE_
445 }