[project @ 1999-12-01 14:34:38 by simonmar]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.1 1999/12/01 14:34:38 simonmar Exp $
3  *
4  * (c) The GHC Team, 1998-1999
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Rts.h"
11 #include "Exception.h"
12 #include "Schedule.h"
13 #include "StgRun.h"
14 #include "Storage.h"
15 #include "RtsUtils.h"
16
17 /* -----------------------------------------------------------------------------
18    Exception Primitives
19
20    A thread can request that asynchronous exceptions not be delivered
21    ("blocked") for the duration of an I/O computation.  The primitive
22    
23         blockAsyncExceptions# :: IO a -> IO a
24
25    is used for this purpose.  During a blocked section, asynchronous
26    exceptions may be unblocked again temporarily:
27
28         unblockAsyncExceptions# :: IO a -> IO a
29
30    Furthermore, asynchronous exceptions are blocked automatically during
31    the execution of an exception handler.  Both of these primitives
32    leave a continuation on the stack which reverts to the previous
33    state (blocked or unblocked) on exit.
34
35    A thread which wants to raise an exception in another thread (using
36    killThread#) must block until the target thread is ready to receive
37    it.  The action of unblocking exceptions in a thread will release all
38    the threads waiting to deliver exceptions to that thread.
39
40    -------------------------------------------------------------------------- */
41
42 FN_(blockAsyncExceptionszh_fast)
43 {
44   FB_
45     /* Args: R1 :: IO a */
46     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
47
48     if (CurrentTSO->blocked_exceptions == NULL) {
49       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
50       Sp--;
51       Sp[0] = (W_)&unblockAsyncExceptionszh_ret_info;
52     }
53     Sp--;
54     Sp[0] = ARG_TAG(0);
55     JMP_(GET_ENTRY(R1.cl));
56   FE_
57 }
58
59 INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
60 FN_(unblockAsyncExceptionszh_ret_entry)
61 {
62   FB_
63     ASSERT(CurrentTSO->blocked_exceptions != NULL);
64     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
65     CurrentTSO->blocked_exceptions = NULL;
66     Sp++;
67     JMP_(ENTRY_CODE(Sp[0]));
68   FE_
69 }
70
71 FN_(unblockAsyncExceptionszh_fast)
72 {
73   FB_
74     /* Args: R1 :: IO a */
75     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
76
77     if (CurrentTSO->blocked_exceptions != NULL) {
78       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
79       CurrentTSO->blocked_exceptions = NULL;
80       Sp--;
81       Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
82     }
83     Sp--;
84     Sp[0] = ARG_TAG(0);
85     JMP_(GET_ENTRY(R1.cl));
86   FE_
87 }
88
89 INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
90 FN_(blockAsyncExceptionszh_ret_entry)
91 {
92   FB_
93     ASSERT(CurrentTSO->blocked_exceptions == NULL);
94     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
95     Sp++;
96     JMP_(ENTRY_CODE(Sp[0]));
97   FE_
98 }
99
100
101 FN_(killThreadzh_fast)
102 {
103   FB_
104   /* args: R1.p = TSO to kill, R2.p = Exception */
105
106   /* If the target thread is currently blocking async exceptions,
107    * we'll have to block until it's ready to accept them.
108    */
109   if (R1.t->blocked_exceptions != NULL) {
110
111         /* ToDo (SMP): locking if destination thread is currently
112          * running...
113          */
114         CurrentTSO->link = R1.t->blocked_exceptions;
115         R1.t->blocked_exceptions = CurrentTSO;
116
117         CurrentTSO->why_blocked = BlockedOnException;
118         CurrentTSO->block_info.tso = R1.t;
119
120         BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
121   }
122
123   /* Killed threads turn into zombies, which might be garbage
124    * collected at a later date.  That's why we don't have to
125    * explicitly remove them from any queues they might be on.
126    */
127
128   /* We might have killed ourselves.  In which case, better be *very*
129    * careful.  If the exception killed us, then return to the scheduler.
130    * If the exception went to a catch frame, we'll just continue from
131    * the handler.
132    */
133   if (R1.t == CurrentTSO) {
134         SaveThreadState();      /* inline! */
135         STGCALL2(raiseAsync, R1.t, R2.cl);
136         if (CurrentTSO->whatNext == ThreadKilled) {
137                 R1.w = ThreadYielding;
138                 JMP_(StgReturn);
139         }
140         LoadThreadState();
141         if (CurrentTSO->whatNext == ThreadEnterGHC) {
142                 R1.w = Sp[0];
143                 Sp++;
144                 JMP_(GET_ENTRY(R1.cl));
145         } else {
146                 barf("killThreadzh_fast");
147         }
148   } else {
149         STGCALL2(raiseAsync, R1.t, R2.cl);
150   }
151
152   JMP_(ENTRY_CODE(Sp[0]));
153   FE_
154 }
155
156 /* -----------------------------------------------------------------------------
157    Catch frames
158    -------------------------------------------------------------------------- */
159
160 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
161    FN_(label);                                  \
162    FN_(label)                                   \
163    {                                            \
164       FB_                                       \
165       Su = ((StgCatchFrame *)Sp)->link;         \
166       Sp += sizeofW(StgCatchFrame);             \
167       JMP_(ret);                                \
168       FE_                                       \
169    }
170
171 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[0]));
172 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[0],0));
173 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[0],1));
174 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[0],2));
175 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[0],3));
176 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[0],4));
177 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[0],5));
178 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[0],6));
179 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[0],7));
180
181 #ifdef PROFILING
182 #define CATCH_FRAME_BITMAP 7
183 #else
184 #define CATCH_FRAME_BITMAP 3
185 #endif
186
187 /* Catch frames are very similar to update frames, but when entering
188  * one we just pop the frame off the stack and perform the correct
189  * kind of return to the activation record underneath us on the stack.
190  */
191
192 VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
193
194 /* -----------------------------------------------------------------------------
195  * The catch infotable
196  *
197  * This should be exactly the same as would be generated by this STG code
198  *
199  * catch = {x,h} \n {} -> catch#{x,h}
200  *
201  * It is used in deleteThread when reverting blackholes.
202  * -------------------------------------------------------------------------- */
203
204 INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0);
205 STGFUN(catch_entry)
206 {
207   FB_
208   R2.cl = payloadCPtr(R1.cl,1); /* h */
209   R1.cl = payloadCPtr(R1.cl,0); /* x */
210   JMP_(catchzh_fast);
211   FE_
212 }
213
214 FN_(catchzh_fast)
215 {
216   StgCatchFrame *fp;
217   FB_
218
219     /* args: R1 = m, R2 = handler */
220     STK_CHK_GEN(sizeofW(StgCatchFrame), R1_PTR | R2_PTR, catchzh_fast, );
221     Sp -= sizeofW(StgCatchFrame);
222     fp = (StgCatchFrame *)Sp;
223     SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS);
224     fp -> handler = R2.cl;
225     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
226     fp -> link = Su;
227     Su = (StgUpdateFrame *)fp;
228     TICK_CATCHF_PUSHED();
229     TICK_ENT_VIA_NODE();
230     JMP_(GET_ENTRY(R1.cl));
231     
232   FE_
233 }      
234
235 /* -----------------------------------------------------------------------------
236  * The raise infotable
237  * 
238  * This should be exactly the same as would be generated by this STG code
239  *
240  *   raise = {err} \n {} -> raise#{err}
241  *
242  * It is used in raisezh_fast to update thunks on the update list
243  * -------------------------------------------------------------------------- */
244
245 INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0);
246 STGFUN(raise_entry)
247 {
248   FB_
249   R1.cl = R1.cl->payload[0];
250   JMP_(raisezh_fast);
251   FE_
252 }
253
254 FN_(raisezh_fast)
255 {
256   StgClosure *handler;
257   StgUpdateFrame *p;
258   StgClosure *raise_closure;
259   FB_
260     /* args : R1 = error */
261
262
263 #if defined(PROFILING)
264
265     /* Debugging tool: on raising an  exception, show where we are. */
266
267     /* ToDo: currently this is a hack.  Would be much better if
268      * the info was only displayed for an *uncaught* exception.
269      */
270     if (RtsFlags.ProfFlags.showCCSOnException) {
271       STGCALL2(print_ccs,stderr,CCCS);
272     }
273
274 #endif
275
276     p = Su;
277
278     /* This closure represents the expression 'raise# E' where E
279      * is the exception raise.  It is used to overwrite all the
280      * thunks which are currently under evaluataion.
281      */
282     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
283                                                sizeofW(StgClosure)+1);
284     raise_closure->header.info = &raise_info;
285     raise_closure->payload[0] = R1.cl;
286
287     while (1) {
288
289       switch (get_itbl(p)->type) {
290
291       case UPDATE_FRAME:
292         UPD_IND(p->updatee,raise_closure);
293         p = p->link;
294         continue;
295
296       case SEQ_FRAME:
297         p = ((StgSeqFrame *)p)->link;
298         continue;
299
300       case CATCH_FRAME:
301         /* found it! */
302         break;
303
304       case STOP_FRAME:
305         barf("raisezh_fast: STOP_FRAME");
306
307       default:
308         barf("raisezh_fast: weird activation record");
309       }
310       
311       break;
312
313     }
314     
315     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
316      * and including this frame, update Su, push R1, and enter the handler.
317      */
318     Su = ((StgCatchFrame *)p)->link; 
319     handler = ((StgCatchFrame *)p)->handler;
320     
321     Sp = (P_)p + sizeofW(StgCatchFrame) - 1;
322
323     /* Restore the blocked/unblocked state for asynchronous exceptions
324      * at the CATCH_FRAME.  
325      *
326      * If exceptions were unblocked, arrange that they are unblocked
327      * again after executing the handler by pushing an
328      * unblockAsyncExceptions_ret stack frame.
329      */
330     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
331       *(Sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
332     }
333
334     /* Ensure that async excpetions are blocked when running the handler.
335     */
336     if (CurrentTSO->blocked_exceptions == NULL) {
337       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
338     }
339
340     /* Enter the handler, passing the exception value as an argument.
341      */
342     *Sp = R1.w;
343     TICK_ENT_VIA_NODE();
344     R1.cl = handler;
345     JMP_(GET_ENTRY(R1.cl));
346
347   FE_
348 }