[project @ 2000-01-22 18:00:03 by simonmar]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.5 2000/01/22 18:00:03 simonmar Exp $
3  *
4  * (c) The GHC Team, 1998-1999
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Rts.h"
11 #include "Exception.h"
12 #include "Schedule.h"
13 #include "StgRun.h"
14 #include "Storage.h"
15 #include "RtsUtils.h"
16 #include "RtsFlags.h"
17 #if defined(PAR)
18 # include "FetchMe.h"
19 #endif
20
21 /* -----------------------------------------------------------------------------
22    Exception Primitives
23
24    A thread can request that asynchronous exceptions not be delivered
25    ("blocked") for the duration of an I/O computation.  The primitive
26    
27         blockAsyncExceptions# :: IO a -> IO a
28
29    is used for this purpose.  During a blocked section, asynchronous
30    exceptions may be unblocked again temporarily:
31
32         unblockAsyncExceptions# :: IO a -> IO a
33
34    Furthermore, asynchronous exceptions are blocked automatically during
35    the execution of an exception handler.  Both of these primitives
36    leave a continuation on the stack which reverts to the previous
37    state (blocked or unblocked) on exit.
38
39    A thread which wants to raise an exception in another thread (using
40    killThread#) must block until the target thread is ready to receive
41    it.  The action of unblocking exceptions in a thread will release all
42    the threads waiting to deliver exceptions to that thread.
43
44    -------------------------------------------------------------------------- */
45
46 FN_(blockAsyncExceptionszh_fast)
47 {
48   FB_
49     /* Args: R1 :: IO a */
50     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
51
52     if (CurrentTSO->blocked_exceptions == NULL) {
53       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
54       /* avoid growing the stack unnecessarily */
55       if (Sp[0] != (W_)&blockAsyncExceptionszh_ret_info) {
56         Sp--;
57         Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
58       }
59     }
60     Sp--;
61     Sp[0] = ARG_TAG(0);
62     JMP_(GET_ENTRY(R1.cl));
63   FE_
64 }
65
66 INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
67 FN_(unblockAsyncExceptionszh_ret_entry)
68 {
69   FB_
70     ASSERT(CurrentTSO->blocked_exceptions != NULL);
71 #if defined(GRAN)
72       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
73                          CurrentTSO->block_info.closure);
74 #elif defined(PAR)
75       // is CurrentTSO->block_info.closure always set to the node
76       // holding the blocking queue !? -- HWL
77       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
78                          CurrentTSO->block_info.closure);
79 #else
80     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
81 #endif
82     CurrentTSO->blocked_exceptions = NULL;
83     Sp++;
84     JMP_(ENTRY_CODE(Sp[0]));
85   FE_
86 }
87
88 FN_(unblockAsyncExceptionszh_fast)
89 {
90   FB_
91     /* Args: R1 :: IO a */
92     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
93
94     if (CurrentTSO->blocked_exceptions != NULL) {
95 #if defined(GRAN)
96       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
97                          CurrentTSO->block_info.closure);
98 #elif defined(PAR)
99       // is CurrentTSO->block_info.closure always set to the node
100       // holding the blocking queue !? -- HWL
101       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
102                          CurrentTSO->block_info.closure);
103 #else
104       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
105 #endif
106       CurrentTSO->blocked_exceptions = NULL;
107
108       /* avoid growing the stack unnecessarily */
109       if (Sp[0] != (W_)&blockAsyncExceptionszh_ret_info) {
110         Sp--;   
111         Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
112       }
113     }
114     Sp--;
115     Sp[0] = ARG_TAG(0);
116     JMP_(GET_ENTRY(R1.cl));
117   FE_
118 }
119
120 INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
121 FN_(blockAsyncExceptionszh_ret_entry)
122 {
123   FB_
124     ASSERT(CurrentTSO->blocked_exceptions == NULL);
125     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
126     Sp++;
127     JMP_(ENTRY_CODE(Sp[0]));
128   FE_
129 }
130
131
132 FN_(killThreadzh_fast)
133 {
134   FB_
135   /* args: R1.p = TSO to kill, R2.p = Exception */
136
137   /* This thread may have been relocated.
138    * (see Schedule.c:threadStackOverflow)
139    */
140   while (R1.t->whatNext == ThreadRelocated) {
141     R1.t = R1.t->link;
142   }
143
144   /* If the target thread is currently blocking async exceptions,
145    * we'll have to block until it's ready to accept them.
146    */
147   if (R1.t->blocked_exceptions != NULL) {
148
149         /* ToDo (SMP): locking if destination thread is currently
150          * running...
151          */
152         CurrentTSO->link = R1.t->blocked_exceptions;
153         R1.t->blocked_exceptions = CurrentTSO;
154
155         CurrentTSO->why_blocked = BlockedOnException;
156         CurrentTSO->block_info.tso = R1.t;
157
158         BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
159   }
160
161   /* Killed threads turn into zombies, which might be garbage
162    * collected at a later date.  That's why we don't have to
163    * explicitly remove them from any queues they might be on.
164    */
165
166   /* We might have killed ourselves.  In which case, better be *very*
167    * careful.  If the exception killed us, then return to the scheduler.
168    * If the exception went to a catch frame, we'll just continue from
169    * the handler.
170    */
171   if (R1.t == CurrentTSO) {
172         SaveThreadState();      /* inline! */
173         STGCALL2(raiseAsync, R1.t, R2.cl);
174         if (CurrentTSO->whatNext == ThreadKilled) {
175                 R1.w = ThreadYielding;
176                 JMP_(StgReturn);
177         }
178         LoadThreadState();
179         if (CurrentTSO->whatNext == ThreadEnterGHC) {
180                 R1.w = Sp[0];
181                 Sp++;
182                 JMP_(GET_ENTRY(R1.cl));
183         } else {
184                 barf("killThreadzh_fast");
185         }
186   } else {
187         STGCALL2(raiseAsync, R1.t, R2.cl);
188   }
189
190   JMP_(ENTRY_CODE(Sp[0]));
191   FE_
192 }
193
194 /* -----------------------------------------------------------------------------
195    Catch frames
196    -------------------------------------------------------------------------- */
197
198 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
199    FN_(label);                                  \
200    FN_(label)                                   \
201    {                                            \
202       FB_                                       \
203       Su = ((StgCatchFrame *)Sp)->link;         \
204       Sp += sizeofW(StgCatchFrame);             \
205       JMP_(ret);                                \
206       FE_                                       \
207    }
208
209 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[0]));
210 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[0],0));
211 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[0],1));
212 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[0],2));
213 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[0],3));
214 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[0],4));
215 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[0],5));
216 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[0],6));
217 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[0],7));
218
219 #ifdef PROFILING
220 #define CATCH_FRAME_BITMAP 7
221 #else
222 #define CATCH_FRAME_BITMAP 3
223 #endif
224
225 /* Catch frames are very similar to update frames, but when entering
226  * one we just pop the frame off the stack and perform the correct
227  * kind of return to the activation record underneath us on the stack.
228  */
229
230 VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
231
232 /* -----------------------------------------------------------------------------
233  * The catch infotable
234  *
235  * This should be exactly the same as would be generated by this STG code
236  *
237  * catch = {x,h} \n {} -> catch#{x,h}
238  *
239  * It is used in deleteThread when reverting blackholes.
240  * -------------------------------------------------------------------------- */
241
242 INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0);
243 STGFUN(catch_entry)
244 {
245   FB_
246   R2.cl = payloadCPtr(R1.cl,1); /* h */
247   R1.cl = payloadCPtr(R1.cl,0); /* x */
248   JMP_(catchzh_fast);
249   FE_
250 }
251
252 FN_(catchzh_fast)
253 {
254   StgCatchFrame *fp;
255   FB_
256
257     /* args: R1 = m, R2 = handler */
258     STK_CHK_GEN(sizeofW(StgCatchFrame), R1_PTR | R2_PTR, catchzh_fast, );
259     Sp -= sizeofW(StgCatchFrame);
260     fp = (StgCatchFrame *)Sp;
261     SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS);
262     fp -> handler = R2.cl;
263     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
264     fp -> link = Su;
265     Su = (StgUpdateFrame *)fp;
266     TICK_CATCHF_PUSHED();
267     TICK_ENT_VIA_NODE();
268     JMP_(GET_ENTRY(R1.cl));
269     
270   FE_
271 }      
272
273 /* -----------------------------------------------------------------------------
274  * The raise infotable
275  * 
276  * This should be exactly the same as would be generated by this STG code
277  *
278  *   raise = {err} \n {} -> raise#{err}
279  *
280  * It is used in raisezh_fast to update thunks on the update list
281  * -------------------------------------------------------------------------- */
282
283 INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0);
284 STGFUN(raise_entry)
285 {
286   FB_
287   R1.cl = R1.cl->payload[0];
288   JMP_(raisezh_fast);
289   FE_
290 }
291
292 FN_(raisezh_fast)
293 {
294   StgClosure *handler;
295   StgUpdateFrame *p;
296   StgClosure *raise_closure;
297   FB_
298     /* args : R1 = error */
299
300
301 #if defined(PROFILING)
302
303     /* Debugging tool: on raising an  exception, show where we are. */
304
305     /* ToDo: currently this is a hack.  Would be much better if
306      * the info was only displayed for an *uncaught* exception.
307      */
308     if (RtsFlags.ProfFlags.showCCSOnException) {
309       STGCALL2(print_ccs,stderr,CCCS);
310     }
311
312 #endif
313
314     p = Su;
315
316     /* This closure represents the expression 'raise# E' where E
317      * is the exception raise.  It is used to overwrite all the
318      * thunks which are currently under evaluataion.
319      */
320     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
321                                                sizeofW(StgClosure)+1);
322     raise_closure->header.info = &raise_info;
323     raise_closure->payload[0] = R1.cl;
324
325     while (1) {
326
327       switch (get_itbl(p)->type) {
328
329       case UPDATE_FRAME:
330         UPD_IND(p->updatee,raise_closure);
331         p = p->link;
332         continue;
333
334       case SEQ_FRAME:
335         p = ((StgSeqFrame *)p)->link;
336         continue;
337
338       case CATCH_FRAME:
339         /* found it! */
340         break;
341
342       case STOP_FRAME:
343         barf("raisezh_fast: STOP_FRAME");
344
345       default:
346         barf("raisezh_fast: weird activation record");
347       }
348       
349       break;
350
351     }
352     
353     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
354      * and including this frame, update Su, push R1, and enter the handler.
355      */
356     Su = ((StgCatchFrame *)p)->link; 
357     handler = ((StgCatchFrame *)p)->handler;
358     
359     Sp = (P_)p + sizeofW(StgCatchFrame) - 1;
360
361     /* Restore the blocked/unblocked state for asynchronous exceptions
362      * at the CATCH_FRAME.  
363      *
364      * If exceptions were unblocked, arrange that they are unblocked
365      * again after executing the handler by pushing an
366      * unblockAsyncExceptions_ret stack frame.
367      */
368     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
369       *(Sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
370     }
371
372     /* Ensure that async excpetions are blocked when running the handler.
373     */
374     if (CurrentTSO->blocked_exceptions == NULL) {
375       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
376     }
377
378     /* Enter the handler, passing the exception value as an argument.
379      */
380     *Sp = R1.w;
381     TICK_ENT_VIA_NODE();
382     R1.cl = handler;
383     JMP_(GET_ENTRY(R1.cl));
384
385   FE_
386 }