[project @ 2001-03-23 16:36:20 by simonmar]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.20 2001/03/23 16:36:21 simonmar Exp $
3  *
4  * (c) The GHC Team, 1998-2000
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Stg.h"
11 #include "Rts.h"
12 #include "Exception.h"
13 #include "Schedule.h"
14 #include "StgRun.h"
15 #include "Storage.h"
16 #include "RtsUtils.h"
17 #include "RtsFlags.h"
18 #if defined(PAR)
19 # include "FetchMe.h"
20 #endif
21 #if defined(PROFILING)
22 # include "Profiling.h"
23 #endif
24
25 /* -----------------------------------------------------------------------------
26    Exception Primitives
27
28    A thread can request that asynchronous exceptions not be delivered
29    ("blocked") for the duration of an I/O computation.  The primitive
30    
31         blockAsyncExceptions# :: IO a -> IO a
32
33    is used for this purpose.  During a blocked section, asynchronous
34    exceptions may be unblocked again temporarily:
35
36         unblockAsyncExceptions# :: IO a -> IO a
37
38    Furthermore, asynchronous exceptions are blocked automatically during
39    the execution of an exception handler.  Both of these primitives
40    leave a continuation on the stack which reverts to the previous
41    state (blocked or unblocked) on exit.
42
43    A thread which wants to raise an exception in another thread (using
44    killThread#) must block until the target thread is ready to receive
45    it.  The action of unblocking exceptions in a thread will release all
46    the threads waiting to deliver exceptions to that thread.
47
48    -------------------------------------------------------------------------- */
49
50 FN_(blockAsyncExceptionszh_fast)
51 {
52   FB_
53     /* Args: R1 :: IO a */
54     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
55
56     if (CurrentTSO->blocked_exceptions == NULL) {
57       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
58       /* avoid growing the stack unnecessarily */
59       if (Sp[0] == (W_)&stg_blockAsyncExceptionszh_ret_info) {
60         Sp++;
61       } else {
62         Sp--;
63         Sp[0] = (W_)&stg_unblockAsyncExceptionszh_ret_info;
64       }
65     }
66     Sp--;
67     Sp[0] = ARG_TAG(0);
68     JMP_(GET_ENTRY(R1.cl));
69   FE_
70 }
71
72 INFO_TABLE_SRT_BITMAP(stg_unblockAsyncExceptionszh_ret_info, stg_unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
73 FN_(stg_unblockAsyncExceptionszh_ret_entry)
74 {
75   FB_
76     ASSERT(CurrentTSO->blocked_exceptions != NULL);
77 #if defined(GRAN)
78       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
79                          (StgClosure*)NULL); 
80 #elif defined(PAR)
81       /* we don't need node info (2nd arg) in this case
82          (note that CurrentTSO->block_info.closure isn't always set) */
83       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
84                          (StgClosure*)NULL); 
85 #else
86     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
87 #endif
88     CurrentTSO->blocked_exceptions = NULL;
89 #ifdef REG_R1
90     Sp++;
91     JMP_(ENTRY_CODE(Sp[0]));
92 #else
93     Sp[1] = Sp[0];
94     Sp++;
95     JMP_(ENTRY_CODE(Sp[1]));
96 #endif
97   FE_
98 }
99
100 FN_(unblockAsyncExceptionszh_fast)
101 {
102   FB_
103     /* Args: R1 :: IO a */
104     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
105
106     if (CurrentTSO->blocked_exceptions != NULL) {
107 #if defined(GRAN)
108       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
109                          CurrentTSO->block_info.closure);
110 #elif defined(PAR)
111       // is CurrentTSO->block_info.closure always set to the node
112       // holding the blocking queue !? -- HWL
113       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
114                          CurrentTSO->block_info.closure);
115 #else
116       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
117 #endif
118       CurrentTSO->blocked_exceptions = NULL;
119
120       /* avoid growing the stack unnecessarily */
121       if (Sp[0] == (W_)&stg_unblockAsyncExceptionszh_ret_info) {
122         Sp++;
123       } else {
124         Sp--;   
125         Sp[0] = (W_)&stg_blockAsyncExceptionszh_ret_info;
126       }
127     }
128     Sp--;
129     Sp[0] = ARG_TAG(0);
130     JMP_(GET_ENTRY(R1.cl));
131   FE_
132 }
133
134 INFO_TABLE_SRT_BITMAP(stg_blockAsyncExceptionszh_ret_info, stg_blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
135 FN_(stg_blockAsyncExceptionszh_ret_entry)
136 {
137   FB_
138     ASSERT(CurrentTSO->blocked_exceptions == NULL);
139     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
140 #ifdef REG_R1
141     Sp++;
142     JMP_(ENTRY_CODE(Sp[0]));
143 #else
144     Sp[1] = Sp[0];
145     Sp++;
146     JMP_(ENTRY_CODE(Sp[1]));
147 #endif
148   FE_
149 }
150
151 FN_(killThreadzh_fast)
152 {
153   FB_
154   /* args: R1.p = TSO to kill, R2.p = Exception */
155
156   /* This thread may have been relocated.
157    * (see Schedule.c:threadStackOverflow)
158    */
159   while (R1.t->what_next == ThreadRelocated) {
160     R1.t = R1.t->link;
161   }
162
163   /* If the target thread is currently blocking async exceptions,
164    * we'll have to block until it's ready to accept them.  The
165    * exception is interruptible threads - ie. those that are blocked
166    * on some resource.
167    */
168   if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) {
169     
170     /* ToDo (SMP): locking if destination thread is currently
171      * running...
172      */
173     CurrentTSO->link = R1.t->blocked_exceptions;
174     R1.t->blocked_exceptions = CurrentTSO;
175
176     CurrentTSO->why_blocked = BlockedOnException;
177     CurrentTSO->block_info.tso = R1.t;
178     
179     BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
180   }
181
182   /* Killed threads turn into zombies, which might be garbage
183    * collected at a later date.  That's why we don't have to
184    * explicitly remove them from any queues they might be on.
185    */
186
187   /* We might have killed ourselves.  In which case, better be *very*
188    * careful.  If the exception killed us, then return to the scheduler.
189    * If the exception went to a catch frame, we'll just continue from
190    * the handler.
191    */
192   if (R1.t == CurrentTSO) {
193         SaveThreadState();      /* inline! */
194         STGCALL2(raiseAsync, R1.t, R2.cl);
195         if (CurrentTSO->what_next == ThreadKilled) {
196                 R1.w = ThreadFinished;
197                 JMP_(StgReturn);
198         }
199         LoadThreadState();
200         if (CurrentTSO->what_next == ThreadEnterGHC) {
201                 R1.w = Sp[0];
202                 Sp++;
203                 JMP_(GET_ENTRY(R1.cl));
204         } else {
205                 barf("killThreadzh_fast");
206         }
207   } else {
208         STGCALL2(raiseAsync, R1.t, R2.cl);
209   }
210
211   JMP_(ENTRY_CODE(Sp[0]));
212   FE_
213 }
214
215 /* -----------------------------------------------------------------------------
216    Catch frames
217    -------------------------------------------------------------------------- */
218
219 #ifdef REG_R1
220 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
221    FN_(label);                                  \
222    FN_(label)                                   \
223    {                                            \
224       FB_                                       \
225       Su = ((StgCatchFrame *)Sp)->link;         \
226       Sp += sizeofW(StgCatchFrame);             \
227       JMP_(ret);                                \
228       FE_                                       \
229    }
230 #else
231 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
232    FN_(label);                                  \
233    FN_(label)                                   \
234    {                                            \
235       StgWord rval;                             \
236       FB_                                       \
237       rval = Sp[0];                             \
238       Sp++;                                     \
239       Su = ((StgCatchFrame *)Sp)->link;         \
240       Sp += sizeofW(StgCatchFrame) - 1;         \
241       Sp[0] = rval;                             \
242       JMP_(ret);                                \
243       FE_                                       \
244    }
245 #endif
246
247 #ifdef REG_R1
248 #define SP_OFF 0
249 #else
250 #define SP_OFF 1
251 #endif
252
253 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
254 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
255 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
256 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
257 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
258 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
259 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
260 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
261 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
262
263 #ifdef PROFILING
264 #define CATCH_FRAME_BITMAP 7
265 #else
266 #define CATCH_FRAME_BITMAP 3
267 #endif
268
269 /* Catch frames are very similar to update frames, but when entering
270  * one we just pop the frame off the stack and perform the correct
271  * kind of return to the activation record underneath us on the stack.
272  */
273
274 VEC_POLY_INFO_TABLE(stg_catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
275
276 /* -----------------------------------------------------------------------------
277  * The catch infotable
278  *
279  * This should be exactly the same as would be generated by this STG code
280  *
281  * catch = {x,h} \n {} -> catch#{x,h}
282  *
283  * It is used in deleteThread when reverting blackholes.
284  * -------------------------------------------------------------------------- */
285
286 INFO_TABLE(stg_catch_info,stg_catch_entry,2,0,FUN,,EF_,0,0);
287 STGFUN(stg_catch_entry)
288 {
289   FB_
290   R2.cl = R1.cl->payload[1]; /* h */
291   R1.cl = R1.cl->payload[0]; /* x */
292   JMP_(catchzh_fast);
293   FE_
294 }
295
296 FN_(catchzh_fast)
297 {
298   StgCatchFrame *fp;
299   FB_
300
301     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
302     STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
303   
304     /* Set up the catch frame */
305     Sp -= sizeofW(StgCatchFrame);
306     fp = (StgCatchFrame *)Sp;
307     SET_HDR(fp,(StgInfoTable *)&stg_catch_frame_info,CCCS);
308     fp -> handler = R2.cl;
309     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
310     fp -> link = Su;
311     Su = (StgUpdateFrame *)fp;
312     TICK_CATCHF_PUSHED();
313
314     /* Push realworld token and enter R1. */
315     Sp--;
316     Sp[0] = ARG_TAG(0);
317     TICK_ENT_VIA_NODE();
318     JMP_(GET_ENTRY(R1.cl));
319     
320   FE_
321 }      
322
323 /* -----------------------------------------------------------------------------
324  * The raise infotable
325  * 
326  * This should be exactly the same as would be generated by this STG code
327  *
328  *   raise = {err} \n {} -> raise#{err}
329  *
330  * It is used in raisezh_fast to update thunks on the update list
331  * -------------------------------------------------------------------------- */
332
333 INFO_TABLE(stg_raise_info,stg_raise_entry,1,0,THUNK,,EF_,0,0);
334 STGFUN(stg_raise_entry)
335 {
336   FB_
337   R1.cl = R1.cl->payload[0];
338   JMP_(raisezh_fast);
339   FE_
340 }
341
342 FN_(raisezh_fast)
343 {
344   StgClosure *handler;
345   StgUpdateFrame *p;
346   StgClosure *raise_closure;
347   FB_
348     /* args : R1 = exception */
349
350
351 #if defined(PROFILING)
352
353     /* Debugging tool: on raising an  exception, show where we are. */
354
355     /* ToDo: currently this is a hack.  Would be much better if
356      * the info was only displayed for an *uncaught* exception.
357      */
358     if (RtsFlags.ProfFlags.showCCSOnException) {
359       STGCALL2(print_ccs,stderr,CCCS);
360     }
361
362 #endif
363
364     p = Su;
365
366     /* This closure represents the expression 'raise# E' where E
367      * is the exception raise.  It is used to overwrite all the
368      * thunks which are currently under evaluataion.
369      */
370     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
371                                                sizeofW(StgClosure)+1);
372     raise_closure->header.info = &stg_raise_info;
373     raise_closure->payload[0] = R1.cl;
374
375     while (1) {
376
377       switch (get_itbl(p)->type) {
378
379       case UPDATE_FRAME:
380         UPD_IND(p->updatee,raise_closure);
381         p = p->link;
382         continue;
383
384       case SEQ_FRAME:
385         p = ((StgSeqFrame *)p)->link;
386         continue;
387
388       case CATCH_FRAME:
389         /* found it! */
390         break;
391
392       case STOP_FRAME:
393         /* We've stripped the entire stack, the thread is now dead. */
394         Sp = CurrentTSO->stack + CurrentTSO->stack_size - 1;
395         Sp[0] = R1.w;           /* save the exception */
396         Su = (StgUpdateFrame *)(Sp+1);
397         CurrentTSO->what_next = ThreadKilled;
398         SaveThreadState();      /* inline! */
399         R1.w = ThreadFinished;
400         JMP_(StgReturn);
401       
402       default:
403         barf("raisezh_fast: weird activation record");
404       }
405       
406       break;
407
408     }
409     
410     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
411      * and including this frame, update Su, push R1, and enter the handler.
412      */
413     Su = ((StgCatchFrame *)p)->link; 
414     handler = ((StgCatchFrame *)p)->handler;
415     
416     Sp = (P_)p + sizeofW(StgCatchFrame);
417
418     /* Restore the blocked/unblocked state for asynchronous exceptions
419      * at the CATCH_FRAME.  
420      *
421      * If exceptions were unblocked, arrange that they are unblocked
422      * again after executing the handler by pushing an
423      * unblockAsyncExceptions_ret stack frame.
424      */
425     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
426       *(--Sp) = (W_)&stg_unblockAsyncExceptionszh_ret_info;
427     }
428
429     /* Ensure that async excpetions are blocked when running the handler.
430     */
431     if (CurrentTSO->blocked_exceptions == NULL) {
432       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
433     }
434
435     /* Enter the handler, passing the exception value and a realworld
436      * token as arguments.
437      */
438     Sp -= 2;
439     Sp[0] = R1.w;
440     Sp[1] = ARG_TAG(0);
441     TICK_ENT_VIA_NODE();
442     R1.cl = handler;
443     JMP_(GET_ENTRY(R1.cl));
444
445   FE_
446 }