[project @ 1999-12-02 09:52:41 by simonmar]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.2 1999/12/02 09:52:41 simonmar Exp $
3  *
4  * (c) The GHC Team, 1998-1999
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Rts.h"
11 #include "Exception.h"
12 #include "Schedule.h"
13 #include "StgRun.h"
14 #include "Storage.h"
15 #include "RtsUtils.h"
16 #include "RtsFlags.h"
17
18 /* -----------------------------------------------------------------------------
19    Exception Primitives
20
21    A thread can request that asynchronous exceptions not be delivered
22    ("blocked") for the duration of an I/O computation.  The primitive
23    
24         blockAsyncExceptions# :: IO a -> IO a
25
26    is used for this purpose.  During a blocked section, asynchronous
27    exceptions may be unblocked again temporarily:
28
29         unblockAsyncExceptions# :: IO a -> IO a
30
31    Furthermore, asynchronous exceptions are blocked automatically during
32    the execution of an exception handler.  Both of these primitives
33    leave a continuation on the stack which reverts to the previous
34    state (blocked or unblocked) on exit.
35
36    A thread which wants to raise an exception in another thread (using
37    killThread#) must block until the target thread is ready to receive
38    it.  The action of unblocking exceptions in a thread will release all
39    the threads waiting to deliver exceptions to that thread.
40
41    -------------------------------------------------------------------------- */
42
43 FN_(blockAsyncExceptionszh_fast)
44 {
45   FB_
46     /* Args: R1 :: IO a */
47     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
48
49     if (CurrentTSO->blocked_exceptions == NULL) {
50       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
51       Sp--;
52       Sp[0] = (W_)&unblockAsyncExceptionszh_ret_info;
53     }
54     Sp--;
55     Sp[0] = ARG_TAG(0);
56     JMP_(GET_ENTRY(R1.cl));
57   FE_
58 }
59
60 INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
61 FN_(unblockAsyncExceptionszh_ret_entry)
62 {
63   FB_
64     ASSERT(CurrentTSO->blocked_exceptions != NULL);
65     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
66     CurrentTSO->blocked_exceptions = NULL;
67     Sp++;
68     JMP_(ENTRY_CODE(Sp[0]));
69   FE_
70 }
71
72 FN_(unblockAsyncExceptionszh_fast)
73 {
74   FB_
75     /* Args: R1 :: IO a */
76     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
77
78     if (CurrentTSO->blocked_exceptions != NULL) {
79       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
80       CurrentTSO->blocked_exceptions = NULL;
81       Sp--;
82       Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
83     }
84     Sp--;
85     Sp[0] = ARG_TAG(0);
86     JMP_(GET_ENTRY(R1.cl));
87   FE_
88 }
89
90 INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
91 FN_(blockAsyncExceptionszh_ret_entry)
92 {
93   FB_
94     ASSERT(CurrentTSO->blocked_exceptions == NULL);
95     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
96     Sp++;
97     JMP_(ENTRY_CODE(Sp[0]));
98   FE_
99 }
100
101
102 FN_(killThreadzh_fast)
103 {
104   FB_
105   /* args: R1.p = TSO to kill, R2.p = Exception */
106
107   /* If the target thread is currently blocking async exceptions,
108    * we'll have to block until it's ready to accept them.
109    */
110   if (R1.t->blocked_exceptions != NULL) {
111
112         /* ToDo (SMP): locking if destination thread is currently
113          * running...
114          */
115         CurrentTSO->link = R1.t->blocked_exceptions;
116         R1.t->blocked_exceptions = CurrentTSO;
117
118         CurrentTSO->why_blocked = BlockedOnException;
119         CurrentTSO->block_info.tso = R1.t;
120
121         BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
122   }
123
124   /* Killed threads turn into zombies, which might be garbage
125    * collected at a later date.  That's why we don't have to
126    * explicitly remove them from any queues they might be on.
127    */
128
129   /* We might have killed ourselves.  In which case, better be *very*
130    * careful.  If the exception killed us, then return to the scheduler.
131    * If the exception went to a catch frame, we'll just continue from
132    * the handler.
133    */
134   if (R1.t == CurrentTSO) {
135         SaveThreadState();      /* inline! */
136         STGCALL2(raiseAsync, R1.t, R2.cl);
137         if (CurrentTSO->whatNext == ThreadKilled) {
138                 R1.w = ThreadYielding;
139                 JMP_(StgReturn);
140         }
141         LoadThreadState();
142         if (CurrentTSO->whatNext == ThreadEnterGHC) {
143                 R1.w = Sp[0];
144                 Sp++;
145                 JMP_(GET_ENTRY(R1.cl));
146         } else {
147                 barf("killThreadzh_fast");
148         }
149   } else {
150         STGCALL2(raiseAsync, R1.t, R2.cl);
151   }
152
153   JMP_(ENTRY_CODE(Sp[0]));
154   FE_
155 }
156
157 /* -----------------------------------------------------------------------------
158    Catch frames
159    -------------------------------------------------------------------------- */
160
161 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
162    FN_(label);                                  \
163    FN_(label)                                   \
164    {                                            \
165       FB_                                       \
166       Su = ((StgCatchFrame *)Sp)->link;         \
167       Sp += sizeofW(StgCatchFrame);             \
168       JMP_(ret);                                \
169       FE_                                       \
170    }
171
172 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[0]));
173 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[0],0));
174 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[0],1));
175 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[0],2));
176 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[0],3));
177 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[0],4));
178 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[0],5));
179 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[0],6));
180 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[0],7));
181
182 #ifdef PROFILING
183 #define CATCH_FRAME_BITMAP 7
184 #else
185 #define CATCH_FRAME_BITMAP 3
186 #endif
187
188 /* Catch frames are very similar to update frames, but when entering
189  * one we just pop the frame off the stack and perform the correct
190  * kind of return to the activation record underneath us on the stack.
191  */
192
193 VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
194
195 /* -----------------------------------------------------------------------------
196  * The catch infotable
197  *
198  * This should be exactly the same as would be generated by this STG code
199  *
200  * catch = {x,h} \n {} -> catch#{x,h}
201  *
202  * It is used in deleteThread when reverting blackholes.
203  * -------------------------------------------------------------------------- */
204
205 INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0);
206 STGFUN(catch_entry)
207 {
208   FB_
209   R2.cl = payloadCPtr(R1.cl,1); /* h */
210   R1.cl = payloadCPtr(R1.cl,0); /* x */
211   JMP_(catchzh_fast);
212   FE_
213 }
214
215 FN_(catchzh_fast)
216 {
217   StgCatchFrame *fp;
218   FB_
219
220     /* args: R1 = m, R2 = handler */
221     STK_CHK_GEN(sizeofW(StgCatchFrame), R1_PTR | R2_PTR, catchzh_fast, );
222     Sp -= sizeofW(StgCatchFrame);
223     fp = (StgCatchFrame *)Sp;
224     SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS);
225     fp -> handler = R2.cl;
226     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
227     fp -> link = Su;
228     Su = (StgUpdateFrame *)fp;
229     TICK_CATCHF_PUSHED();
230     TICK_ENT_VIA_NODE();
231     JMP_(GET_ENTRY(R1.cl));
232     
233   FE_
234 }      
235
236 /* -----------------------------------------------------------------------------
237  * The raise infotable
238  * 
239  * This should be exactly the same as would be generated by this STG code
240  *
241  *   raise = {err} \n {} -> raise#{err}
242  *
243  * It is used in raisezh_fast to update thunks on the update list
244  * -------------------------------------------------------------------------- */
245
246 INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0);
247 STGFUN(raise_entry)
248 {
249   FB_
250   R1.cl = R1.cl->payload[0];
251   JMP_(raisezh_fast);
252   FE_
253 }
254
255 FN_(raisezh_fast)
256 {
257   StgClosure *handler;
258   StgUpdateFrame *p;
259   StgClosure *raise_closure;
260   FB_
261     /* args : R1 = error */
262
263
264 #if defined(PROFILING)
265
266     /* Debugging tool: on raising an  exception, show where we are. */
267
268     /* ToDo: currently this is a hack.  Would be much better if
269      * the info was only displayed for an *uncaught* exception.
270      */
271     if (RtsFlags.ProfFlags.showCCSOnException) {
272       STGCALL2(print_ccs,stderr,CCCS);
273     }
274
275 #endif
276
277     p = Su;
278
279     /* This closure represents the expression 'raise# E' where E
280      * is the exception raise.  It is used to overwrite all the
281      * thunks which are currently under evaluataion.
282      */
283     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
284                                                sizeofW(StgClosure)+1);
285     raise_closure->header.info = &raise_info;
286     raise_closure->payload[0] = R1.cl;
287
288     while (1) {
289
290       switch (get_itbl(p)->type) {
291
292       case UPDATE_FRAME:
293         UPD_IND(p->updatee,raise_closure);
294         p = p->link;
295         continue;
296
297       case SEQ_FRAME:
298         p = ((StgSeqFrame *)p)->link;
299         continue;
300
301       case CATCH_FRAME:
302         /* found it! */
303         break;
304
305       case STOP_FRAME:
306         barf("raisezh_fast: STOP_FRAME");
307
308       default:
309         barf("raisezh_fast: weird activation record");
310       }
311       
312       break;
313
314     }
315     
316     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
317      * and including this frame, update Su, push R1, and enter the handler.
318      */
319     Su = ((StgCatchFrame *)p)->link; 
320     handler = ((StgCatchFrame *)p)->handler;
321     
322     Sp = (P_)p + sizeofW(StgCatchFrame) - 1;
323
324     /* Restore the blocked/unblocked state for asynchronous exceptions
325      * at the CATCH_FRAME.  
326      *
327      * If exceptions were unblocked, arrange that they are unblocked
328      * again after executing the handler by pushing an
329      * unblockAsyncExceptions_ret stack frame.
330      */
331     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
332       *(Sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
333     }
334
335     /* Ensure that async excpetions are blocked when running the handler.
336     */
337     if (CurrentTSO->blocked_exceptions == NULL) {
338       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
339     }
340
341     /* Enter the handler, passing the exception value as an argument.
342      */
343     *Sp = R1.w;
344     TICK_ENT_VIA_NODE();
345     R1.cl = handler;
346     JMP_(GET_ENTRY(R1.cl));
347
348   FE_
349 }