[project @ 2000-02-04 11:15:04 by simonmar]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.7 2000/02/04 11:15:04 simonmar Exp $
3  *
4  * (c) The GHC Team, 1998-1999
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Rts.h"
11 #include "Exception.h"
12 #include "Schedule.h"
13 #include "StgRun.h"
14 #include "Storage.h"
15 #include "RtsUtils.h"
16 #include "RtsFlags.h"
17 #if defined(PAR)
18 # include "FetchMe.h"
19 #endif
20
21 /* -----------------------------------------------------------------------------
22    Exception Primitives
23
24    A thread can request that asynchronous exceptions not be delivered
25    ("blocked") for the duration of an I/O computation.  The primitive
26    
27         blockAsyncExceptions# :: IO a -> IO a
28
29    is used for this purpose.  During a blocked section, asynchronous
30    exceptions may be unblocked again temporarily:
31
32         unblockAsyncExceptions# :: IO a -> IO a
33
34    Furthermore, asynchronous exceptions are blocked automatically during
35    the execution of an exception handler.  Both of these primitives
36    leave a continuation on the stack which reverts to the previous
37    state (blocked or unblocked) on exit.
38
39    A thread which wants to raise an exception in another thread (using
40    killThread#) must block until the target thread is ready to receive
41    it.  The action of unblocking exceptions in a thread will release all
42    the threads waiting to deliver exceptions to that thread.
43
44    -------------------------------------------------------------------------- */
45
46 FN_(blockAsyncExceptionszh_fast)
47 {
48   FB_
49     /* Args: R1 :: IO a */
50     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
51
52     if (CurrentTSO->blocked_exceptions == NULL) {
53       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
54       /* avoid growing the stack unnecessarily */
55       if (Sp[0] == (W_)&blockAsyncExceptionszh_ret_info) {
56         Sp++;
57       } else {
58         Sp--;
59         Sp[0] = (W_)&unblockAsyncExceptionszh_ret_info;
60       }
61     }
62     Sp--;
63     Sp[0] = ARG_TAG(0);
64     JMP_(GET_ENTRY(R1.cl));
65   FE_
66 }
67
68 INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
69 FN_(unblockAsyncExceptionszh_ret_entry)
70 {
71   FB_
72     ASSERT(CurrentTSO->blocked_exceptions != NULL);
73 #if defined(GRAN)
74       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
75                          CurrentTSO->block_info.closure);
76 #elif defined(PAR)
77       // is CurrentTSO->block_info.closure always set to the node
78       // holding the blocking queue !? -- HWL
79       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
80                          CurrentTSO->block_info.closure);
81 #else
82     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
83 #endif
84     CurrentTSO->blocked_exceptions = NULL;
85     Sp++;
86     JMP_(ENTRY_CODE(Sp[0]));
87   FE_
88 }
89
90 FN_(unblockAsyncExceptionszh_fast)
91 {
92   FB_
93     /* Args: R1 :: IO a */
94     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
95
96     if (CurrentTSO->blocked_exceptions != NULL) {
97 #if defined(GRAN)
98       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
99                          CurrentTSO->block_info.closure);
100 #elif defined(PAR)
101       // is CurrentTSO->block_info.closure always set to the node
102       // holding the blocking queue !? -- HWL
103       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
104                          CurrentTSO->block_info.closure);
105 #else
106       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
107 #endif
108       CurrentTSO->blocked_exceptions = NULL;
109
110       /* avoid growing the stack unnecessarily */
111       if (Sp[0] == (W_)&unblockAsyncExceptionszh_ret_info) {
112         Sp++;
113       } else {
114         Sp--;   
115         Sp[0] = (W_)&blockAsyncExceptionszh_ret_info;
116       }
117     }
118     Sp--;
119     Sp[0] = ARG_TAG(0);
120     JMP_(GET_ENTRY(R1.cl));
121   FE_
122 }
123
124 INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
125 FN_(blockAsyncExceptionszh_ret_entry)
126 {
127   FB_
128     ASSERT(CurrentTSO->blocked_exceptions == NULL);
129     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
130     Sp++;
131     JMP_(ENTRY_CODE(Sp[0]));
132   FE_
133 }
134
135
136 FN_(killThreadzh_fast)
137 {
138   FB_
139   /* args: R1.p = TSO to kill, R2.p = Exception */
140
141   /* This thread may have been relocated.
142    * (see Schedule.c:threadStackOverflow)
143    */
144   while (R1.t->whatNext == ThreadRelocated) {
145     R1.t = R1.t->link;
146   }
147
148   /* If the target thread is currently blocking async exceptions,
149    * we'll have to block until it's ready to accept them.
150    */
151   if (R1.t->blocked_exceptions != NULL) {
152
153         /* ToDo (SMP): locking if destination thread is currently
154          * running...
155          */
156         CurrentTSO->link = R1.t->blocked_exceptions;
157         R1.t->blocked_exceptions = CurrentTSO;
158
159         CurrentTSO->why_blocked = BlockedOnException;
160         CurrentTSO->block_info.tso = R1.t;
161
162         BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
163   }
164
165   /* Killed threads turn into zombies, which might be garbage
166    * collected at a later date.  That's why we don't have to
167    * explicitly remove them from any queues they might be on.
168    */
169
170   /* We might have killed ourselves.  In which case, better be *very*
171    * careful.  If the exception killed us, then return to the scheduler.
172    * If the exception went to a catch frame, we'll just continue from
173    * the handler.
174    */
175   if (R1.t == CurrentTSO) {
176         SaveThreadState();      /* inline! */
177         STGCALL2(raiseAsync, R1.t, R2.cl);
178         if (CurrentTSO->whatNext == ThreadKilled) {
179                 R1.w = ThreadYielding;
180                 JMP_(StgReturn);
181         }
182         LoadThreadState();
183         if (CurrentTSO->whatNext == ThreadEnterGHC) {
184                 R1.w = Sp[0];
185                 Sp++;
186                 JMP_(GET_ENTRY(R1.cl));
187         } else {
188                 barf("killThreadzh_fast");
189         }
190   } else {
191         STGCALL2(raiseAsync, R1.t, R2.cl);
192   }
193
194   JMP_(ENTRY_CODE(Sp[0]));
195   FE_
196 }
197
198 /* -----------------------------------------------------------------------------
199    Catch frames
200    -------------------------------------------------------------------------- */
201
202 #ifdef REG_R1
203 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
204    FN_(label);                                  \
205    FN_(label)                                   \
206    {                                            \
207       FB_                                       \
208       Su = ((StgCatchFrame *)Sp)->link;         \
209       Sp += sizeofW(StgCatchFrame);             \
210       JMP_(ret);                                \
211       FE_                                       \
212    }
213 #else
214 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
215    FN_(label);                                  \
216    FN_(label)                                   \
217    {                                            \
218       StgWord rval;                             \
219       FB_                                       \
220       rval = Sp[0];                             \
221       Sp++;                                     \
222       Su = ((StgCatchFrame *)Sp)->link;         \
223       Sp += sizeofW(StgCatchFrame) - 1;         \
224       Sp[0] = rval;                             \
225       JMP_(ret);                                \
226       FE_                                       \
227    }
228 #endif
229
230 #ifdef REG_R1
231 #define SP_OFF 0
232 #else
233 #define SP_OFF 1
234 #endif
235
236 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
237 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
238 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
239 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
240 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
241 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
242 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
243 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
244 CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
245
246 #ifdef PROFILING
247 #define CATCH_FRAME_BITMAP 7
248 #else
249 #define CATCH_FRAME_BITMAP 3
250 #endif
251
252 /* Catch frames are very similar to update frames, but when entering
253  * one we just pop the frame off the stack and perform the correct
254  * kind of return to the activation record underneath us on the stack.
255  */
256
257 VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
258
259 /* -----------------------------------------------------------------------------
260  * The catch infotable
261  *
262  * This should be exactly the same as would be generated by this STG code
263  *
264  * catch = {x,h} \n {} -> catch#{x,h}
265  *
266  * It is used in deleteThread when reverting blackholes.
267  * -------------------------------------------------------------------------- */
268
269 INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0);
270 STGFUN(catch_entry)
271 {
272   FB_
273   R2.cl = payloadCPtr(R1.cl,1); /* h */
274   R1.cl = payloadCPtr(R1.cl,0); /* x */
275   JMP_(catchzh_fast);
276   FE_
277 }
278
279 FN_(catchzh_fast)
280 {
281   StgCatchFrame *fp;
282   FB_
283
284     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
285     STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
286   
287     /* Set up the catch frame */
288     Sp -= sizeofW(StgCatchFrame);
289     fp = (StgCatchFrame *)Sp;
290     SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS);
291     fp -> handler = R2.cl;
292     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
293     fp -> link = Su;
294     Su = (StgUpdateFrame *)fp;
295     TICK_CATCHF_PUSHED();
296
297     /* Push realworld token and enter R1. */
298     Sp--;
299     Sp[0] = ARG_TAG(0);
300     TICK_ENT_VIA_NODE();
301     JMP_(GET_ENTRY(R1.cl));
302     
303   FE_
304 }      
305
306 /* -----------------------------------------------------------------------------
307  * The raise infotable
308  * 
309  * This should be exactly the same as would be generated by this STG code
310  *
311  *   raise = {err} \n {} -> raise#{err}
312  *
313  * It is used in raisezh_fast to update thunks on the update list
314  * -------------------------------------------------------------------------- */
315
316 INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0);
317 STGFUN(raise_entry)
318 {
319   FB_
320   R1.cl = R1.cl->payload[0];
321   JMP_(raisezh_fast);
322   FE_
323 }
324
325 FN_(raisezh_fast)
326 {
327   StgClosure *handler;
328   StgUpdateFrame *p;
329   StgClosure *raise_closure;
330   FB_
331     /* args : R1 = error */
332
333
334 #if defined(PROFILING)
335
336     /* Debugging tool: on raising an  exception, show where we are. */
337
338     /* ToDo: currently this is a hack.  Would be much better if
339      * the info was only displayed for an *uncaught* exception.
340      */
341     if (RtsFlags.ProfFlags.showCCSOnException) {
342       STGCALL2(print_ccs,stderr,CCCS);
343     }
344
345 #endif
346
347     p = Su;
348
349     /* This closure represents the expression 'raise# E' where E
350      * is the exception raise.  It is used to overwrite all the
351      * thunks which are currently under evaluataion.
352      */
353     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
354                                                sizeofW(StgClosure)+1);
355     raise_closure->header.info = &raise_info;
356     raise_closure->payload[0] = R1.cl;
357
358     while (1) {
359
360       switch (get_itbl(p)->type) {
361
362       case UPDATE_FRAME:
363         UPD_IND(p->updatee,raise_closure);
364         p = p->link;
365         continue;
366
367       case SEQ_FRAME:
368         p = ((StgSeqFrame *)p)->link;
369         continue;
370
371       case CATCH_FRAME:
372         /* found it! */
373         break;
374
375       case STOP_FRAME:
376         barf("raisezh_fast: STOP_FRAME");
377
378       default:
379         barf("raisezh_fast: weird activation record");
380       }
381       
382       break;
383
384     }
385     
386     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
387      * and including this frame, update Su, push R1, and enter the handler.
388      */
389     Su = ((StgCatchFrame *)p)->link; 
390     handler = ((StgCatchFrame *)p)->handler;
391     
392     Sp = (P_)p + sizeofW(StgCatchFrame);
393
394     /* Restore the blocked/unblocked state for asynchronous exceptions
395      * at the CATCH_FRAME.  
396      *
397      * If exceptions were unblocked, arrange that they are unblocked
398      * again after executing the handler by pushing an
399      * unblockAsyncExceptions_ret stack frame.
400      */
401     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
402       *(--Sp) = (W_)&unblockAsyncExceptionszh_ret_info;
403     }
404
405     /* Ensure that async excpetions are blocked when running the handler.
406     */
407     if (CurrentTSO->blocked_exceptions == NULL) {
408       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
409     }
410
411     /* Enter the handler, passing the exception value and a realworld
412      * token as arguments.
413      */
414     Sp -= 2;
415     Sp[0] = R1.w;
416     Sp[1] = ARG_TAG(0);
417     TICK_ENT_VIA_NODE();
418     R1.cl = handler;
419     JMP_(GET_ENTRY(R1.cl));
420
421   FE_
422 }