[project @ 2000-01-30 10:25:27 by simonmar]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.6 2000/01/30 10:25:28 simonmar Exp $
3  *
4  * (c) The GHC Team, 1998-1999
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Rts.h"
11 #include "Exception.h"
12 #include "Schedule.h"
13 #include "StgRun.h"
14 #include "Storage.h"
15 #include "RtsUtils.h"
16 #include "RtsFlags.h"
17 #if defined(PAR)
18 # include "FetchMe.h"
19 #endif
20
21 /* -----------------------------------------------------------------------------
22    Exception Primitives
23
24    A thread can request that asynchronous exceptions not be delivered
25    ("blocked") for the duration of an I/O computation.  The primitive
26    
27         blockAsyncExceptions# :: IO a -> IO a
28
29    is used for this purpose.  During a blocked section, asynchronous
30    exceptions may be unblocked again temporarily:
31
32         unblockAsyncExceptions# :: IO a -> IO a
33
34    Furthermore, asynchronous exceptions are blocked automatically during
35    the execution of an exception handler.  Both of these primitives
36    leave a continuation on the stack which reverts to the previous
37    state (blocked or unblocked) on exit.
38
39    A thread which wants to raise an exception in another thread (using
40    killThread#) must block until the target thread is ready to receive
41    it.  The action of unblocking exceptions in a thread will release all
42    the threads waiting to deliver exceptions to that thread.
43
44    -------------------------------------------------------------------------- */
45
46 FN_(blockAsyncExceptionszh_fast)
47 {
48   FB_
49     /* Args: R1 :: IO a */
50     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
51
52     if (CurrentTSO->blocked_exceptions == NULL) {
53       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
54       /* avoid growing the stack unnecessarily */
55       if (Sp[0] == (W_)&blockAsyncExceptionszh_ret_info) {
56         Sp++;
57       } else {
58         Sp--;
59         Sp[0] = (W_)&unblockAsyncExceptionszh_ret_info;
60       }
61     }
62     Sp--;
63     Sp[0] = ARG_TAG(0);
64     JMP_(GET_ENTRY(R1.cl));
65   FE_
66 }
67
68 INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
69 FN_(unblockAsyncExceptionszh_ret_entry)
70 {
71   FB_
72     ASSERT(CurrentTSO->blocked_exceptions != NULL);
73 #if defined(GRAN)
74       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
75                          CurrentTSO->block_info.closure);
76 #elif defined(PAR)
77       // is CurrentTSO->block_info.closure always set to the node
78       // holding the blocking queue !? -- HWL
79       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
80                          CurrentTSO->block_info.closure);
81 #else
82     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
83 #endif
84     CurrentTSO->blocked_exceptions = NULL;
85     Sp++;
86     JMP_(ENTRY_CODE(Sp[0]));
87   FE_
88 }
89
90 FN_(unblockAsyncExceptionszh_fast)
91 {
92   FB_
93     /* Args: R1 :: IO a */
94     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
95
96     if (CurrentTSO->blocked_exceptions != NULL) {
97 #if defined(GRAN)
98       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
99                          CurrentTSO->block_info.closure);
100 #elif defined(PAR)
101       // is CurrentTSO->block_info.closure always set to the node
102       // holding the blocking queue !? -- HWL
103       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
104                          CurrentTSO->block_info.closure);
105 #else
106       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
107 #endif
108       CurrentTSO->blocked_exceptions = NULL;
109
110       /* avoid growing the stack unnecessarily */
111       if (Sp[0] == (W_)&unblockAsyncExceptionszh_ret_info) {
112         Sp++;
113       } else {
114         Sp--;   
115         Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
116       }
117     }
118     Sp--;
119     Sp[0] = ARG_TAG(0);
120     JMP_(GET_ENTRY(R1.cl));
121   FE_
122 }
123
124 INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
125 FN_(blockAsyncExceptionszh_ret_entry)
126 {
127   FB_
128     ASSERT(CurrentTSO->blocked_exceptions == NULL);
129     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
130     Sp++;
131     JMP_(ENTRY_CODE(Sp[0]));
132   FE_
133 }
134
135
136 FN_(killThreadzh_fast)
137 {
138   FB_
139   /* args: R1.p = TSO to kill, R2.p = Exception */
140
141   /* This thread may have been relocated.
142    * (see Schedule.c:threadStackOverflow)
143    */
144   while (R1.t->whatNext == ThreadRelocated) {
145     R1.t = R1.t->link;
146   }
147
148   /* If the target thread is currently blocking async exceptions,
149    * we'll have to block until it's ready to accept them.
150    */
151   if (R1.t->blocked_exceptions != NULL) {
152
153         /* ToDo (SMP): locking if destination thread is currently
154          * running...
155          */
156         CurrentTSO->link = R1.t->blocked_exceptions;
157         R1.t->blocked_exceptions = CurrentTSO;
158
159         CurrentTSO->why_blocked = BlockedOnException;
160         CurrentTSO->block_info.tso = R1.t;
161
162         BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
163   }
164
165   /* Killed threads turn into zombies, which might be garbage
166    * collected at a later date.  That's why we don't have to
167    * explicitly remove them from any queues they might be on.
168    */
169
170   /* We might have killed ourselves.  In which case, better be *very*
171    * careful.  If the exception killed us, then return to the scheduler.
172    * If the exception went to a catch frame, we'll just continue from
173    * the handler.
174    */
175   if (R1.t == CurrentTSO) {
176         SaveThreadState();      /* inline! */
177         STGCALL2(raiseAsync, R1.t, R2.cl);
178         if (CurrentTSO->whatNext == ThreadKilled) {
179                 R1.w = ThreadYielding;
180                 JMP_(StgReturn);
181         }
182         LoadThreadState();
183         if (CurrentTSO->whatNext == ThreadEnterGHC) {
184                 R1.w = Sp[0];
185                 Sp++;
186                 JMP_(GET_ENTRY(R1.cl));
187         } else {
188                 barf("killThreadzh_fast");
189         }
190   } else {
191         STGCALL2(raiseAsync, R1.t, R2.cl);
192   }
193
194   JMP_(ENTRY_CODE(Sp[0]));
195   FE_
196 }
197
198 /* -----------------------------------------------------------------------------
199    Catch frames
200    -------------------------------------------------------------------------- */
201
202 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
203    FN_(label);                                  \
204    FN_(label)                                   \
205    {                                            \
206       FB_                                       \
207       Su = ((StgCatchFrame *)Sp)->link;         \
208       Sp += sizeofW(StgCatchFrame);             \
209       JMP_(ret);                                \
210       FE_                                       \
211    }
212
213 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[0]));
214 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[0],0));
215 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[0],1));
216 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[0],2));
217 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[0],3));
218 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[0],4));
219 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[0],5));
220 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[0],6));
221 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[0],7));
222
223 #ifdef PROFILING
224 #define CATCH_FRAME_BITMAP 7
225 #else
226 #define CATCH_FRAME_BITMAP 3
227 #endif
228
229 /* Catch frames are very similar to update frames, but when entering
230  * one we just pop the frame off the stack and perform the correct
231  * kind of return to the activation record underneath us on the stack.
232  */
233
234 VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
235
236 /* -----------------------------------------------------------------------------
237  * The catch infotable
238  *
239  * This should be exactly the same as would be generated by this STG code
240  *
241  * catch = {x,h} \n {} -> catch#{x,h}
242  *
243  * It is used in deleteThread when reverting blackholes.
244  * -------------------------------------------------------------------------- */
245
246 INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0);
247 STGFUN(catch_entry)
248 {
249   FB_
250   R2.cl = payloadCPtr(R1.cl,1); /* h */
251   R1.cl = payloadCPtr(R1.cl,0); /* x */
252   JMP_(catchzh_fast);
253   FE_
254 }
255
256 FN_(catchzh_fast)
257 {
258   StgCatchFrame *fp;
259   FB_
260
261     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
262     STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
263   
264     /* Set up the catch frame */
265     Sp -= sizeofW(StgCatchFrame);
266     fp = (StgCatchFrame *)Sp;
267     SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS);
268     fp -> handler = R2.cl;
269     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
270     fp -> link = Su;
271     Su = (StgUpdateFrame *)fp;
272     TICK_CATCHF_PUSHED();
273
274     /* Push realworld token and enter R1. */
275     Sp--;
276     Sp[0] = ARG_TAG(0);
277     TICK_ENT_VIA_NODE();
278     JMP_(GET_ENTRY(R1.cl));
279     
280   FE_
281 }      
282
283 /* -----------------------------------------------------------------------------
284  * The raise infotable
285  * 
286  * This should be exactly the same as would be generated by this STG code
287  *
288  *   raise = {err} \n {} -> raise#{err}
289  *
290  * It is used in raisezh_fast to update thunks on the update list
291  * -------------------------------------------------------------------------- */
292
293 INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0);
294 STGFUN(raise_entry)
295 {
296   FB_
297   R1.cl = R1.cl->payload[0];
298   JMP_(raisezh_fast);
299   FE_
300 }
301
302 FN_(raisezh_fast)
303 {
304   StgClosure *handler;
305   StgUpdateFrame *p;
306   StgClosure *raise_closure;
307   FB_
308     /* args : R1 = error */
309
310
311 #if defined(PROFILING)
312
313     /* Debugging tool: on raising an  exception, show where we are. */
314
315     /* ToDo: currently this is a hack.  Would be much better if
316      * the info was only displayed for an *uncaught* exception.
317      */
318     if (RtsFlags.ProfFlags.showCCSOnException) {
319       STGCALL2(print_ccs,stderr,CCCS);
320     }
321
322 #endif
323
324     p = Su;
325
326     /* This closure represents the expression 'raise# E' where E
327      * is the exception raise.  It is used to overwrite all the
328      * thunks which are currently under evaluataion.
329      */
330     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
331                                                sizeofW(StgClosure)+1);
332     raise_closure->header.info = &raise_info;
333     raise_closure->payload[0] = R1.cl;
334
335     while (1) {
336
337       switch (get_itbl(p)->type) {
338
339       case UPDATE_FRAME:
340         UPD_IND(p->updatee,raise_closure);
341         p = p->link;
342         continue;
343
344       case SEQ_FRAME:
345         p = ((StgSeqFrame *)p)->link;
346         continue;
347
348       case CATCH_FRAME:
349         /* found it! */
350         break;
351
352       case STOP_FRAME:
353         barf("raisezh_fast: STOP_FRAME");
354
355       default:
356         barf("raisezh_fast: weird activation record");
357       }
358       
359       break;
360
361     }
362     
363     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
364      * and including this frame, update Su, push R1, and enter the handler.
365      */
366     Su = ((StgCatchFrame *)p)->link; 
367     handler = ((StgCatchFrame *)p)->handler;
368     
369     Sp = (P_)p + sizeofW(StgCatchFrame);
370
371     /* Restore the blocked/unblocked state for asynchronous exceptions
372      * at the CATCH_FRAME.  
373      *
374      * If exceptions were unblocked, arrange that they are unblocked
375      * again after executing the handler by pushing an
376      * unblockAsyncExceptions_ret stack frame.
377      */
378     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
379       *(--Sp) = (W_)&unblockAsyncExceptionszh_ret_info;
380     }
381
382     /* Ensure that async excpetions are blocked when running the handler.
383     */
384     if (CurrentTSO->blocked_exceptions == NULL) {
385       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
386     }
387
388     /* Enter the handler, passing the exception value and a realworld
389      * token as arguments.
390      */
391     Sp -= 2;
392     Sp[0] = R1.w;
393     Sp[1] = ARG_TAG(0);
394     TICK_ENT_VIA_NODE();
395     R1.cl = handler;
396     JMP_(GET_ENTRY(R1.cl));
397
398   FE_
399 }