[project @ 2004-03-22 11:06:18 by simonmar]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.29 2003/06/26 20:47:08 panne Exp $
3  *
4  * (c) The GHC Team, 1998-2000
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Stg.h"
11 #include "Rts.h"
12 #include "Exception.h"
13 #include "Schedule.h"
14 #include "StgRun.h"
15 #include "Storage.h"
16 #include "RtsUtils.h"
17 #include "RtsFlags.h"
18 #if defined(PAR)
19 # include "FetchMe.h"
20 #endif
21 #if defined(PROFILING)
22 # include "Profiling.h"
23 #endif
24
25 /* -----------------------------------------------------------------------------
26    Exception Primitives
27
28    A thread can request that asynchronous exceptions not be delivered
29    ("blocked") for the duration of an I/O computation.  The primitive
30    
31         blockAsyncExceptions# :: IO a -> IO a
32
33    is used for this purpose.  During a blocked section, asynchronous
34    exceptions may be unblocked again temporarily:
35
36         unblockAsyncExceptions# :: IO a -> IO a
37
38    Furthermore, asynchronous exceptions are blocked automatically during
39    the execution of an exception handler.  Both of these primitives
40    leave a continuation on the stack which reverts to the previous
41    state (blocked or unblocked) on exit.
42
43    A thread which wants to raise an exception in another thread (using
44    killThread#) must block until the target thread is ready to receive
45    it.  The action of unblocking exceptions in a thread will release all
46    the threads waiting to deliver exceptions to that thread.
47
48    -------------------------------------------------------------------------- */
49
50 FN_(blockAsyncExceptionszh_fast)
51 {
52   FB_
53     /* Args: R1 :: IO a */
54     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast);
55
56     if (CurrentTSO->blocked_exceptions == NULL) {
57       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
58       /* avoid growing the stack unnecessarily */
59       if (Sp[0] == (W_)&stg_blockAsyncExceptionszh_ret_info) {
60         Sp++;
61       } else {
62         Sp--;
63         Sp[0] = (W_)&stg_unblockAsyncExceptionszh_ret_info;
64       }
65     }
66     Sp--;
67     JMP_(stg_ap_v_ret);
68   FE_
69 }
70
71 INFO_TABLE_RET( \
72   stg_unblockAsyncExceptionszh_ret_info, \
73   stg_unblockAsyncExceptionszh_ret_entry, \
74   MK_SMALL_BITMAP(0/*framesize*/, 0/*bitmap*/), \
75   0, 0, 0, RET_SMALL, , EF_, 0, 0 \
76 );
77
78 FN_(stg_unblockAsyncExceptionszh_ret_entry)
79 {
80   FB_
81     ASSERT(CurrentTSO->blocked_exceptions != NULL);
82 #if defined(GRAN)
83       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
84                          (StgClosure*)NULL); 
85 #elif defined(PAR)
86       /* we don't need node info (2nd arg) in this case
87          (note that CurrentTSO->block_info.closure isn't always set) */
88       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
89                          (StgClosure*)NULL); 
90 #else
91     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
92 #endif
93     CurrentTSO->blocked_exceptions = NULL;
94 #ifdef REG_R1
95     Sp++;
96     JMP_(ENTRY_CODE(Sp[0]));
97 #else
98     Sp[1] = Sp[0];
99     Sp++;
100     JMP_(ENTRY_CODE(Sp[1]));
101 #endif
102   FE_
103 }
104
105 FN_(unblockAsyncExceptionszh_fast)
106 {
107   FB_
108     /* Args: R1 :: IO a */
109     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast);
110
111     if (CurrentTSO->blocked_exceptions != NULL) {
112 #if defined(GRAN)
113       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
114                          CurrentTSO->block_info.closure);
115 #elif defined(PAR)
116       // is CurrentTSO->block_info.closure always set to the node
117       // holding the blocking queue !? -- HWL
118       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
119                          CurrentTSO->block_info.closure);
120 #else
121       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
122 #endif
123       CurrentTSO->blocked_exceptions = NULL;
124
125       /* avoid growing the stack unnecessarily */
126       if (Sp[0] == (W_)&stg_unblockAsyncExceptionszh_ret_info) {
127         Sp++;
128       } else {
129         Sp--;   
130         Sp[0] = (W_)&stg_blockAsyncExceptionszh_ret_info;
131       }
132     }
133     Sp--;
134     JMP_(stg_ap_v_ret);
135   FE_
136 }
137
138 INFO_TABLE_RET( \
139   stg_blockAsyncExceptionszh_ret_info, \
140   stg_blockAsyncExceptionszh_ret_entry, \
141   MK_SMALL_BITMAP(0/*framesize*/, 0/*bitmap*/), \
142   0, 0, 0, RET_SMALL, , EF_, 0, 0 \
143 );
144
145 FN_(stg_blockAsyncExceptionszh_ret_entry)
146 {
147   FB_
148     ASSERT(CurrentTSO->blocked_exceptions == NULL);
149     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
150 #ifdef REG_R1
151     Sp++;
152     JMP_(ENTRY_CODE(Sp[0]));
153 #else
154     Sp[1] = Sp[0];
155     Sp++;
156     JMP_(ENTRY_CODE(Sp[1]));
157 #endif
158   FE_
159 }
160
161 FN_(killThreadzh_fast)
162 {
163   FB_
164   /* args: R1.p = TSO to kill, R2.p = Exception */
165
166   /* This thread may have been relocated.
167    * (see Schedule.c:threadStackOverflow)
168    */
169   while (R1.t->what_next == ThreadRelocated) {
170     R1.t = R1.t->link;
171   }
172
173   /* If the target thread is currently blocking async exceptions,
174    * we'll have to block until it's ready to accept them.  The
175    * exception is interruptible threads - ie. those that are blocked
176    * on some resource.
177    */
178   if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) {
179     
180     /* ToDo (SMP): locking if destination thread is currently
181      * running...
182      */
183     CurrentTSO->link = R1.t->blocked_exceptions;
184     R1.t->blocked_exceptions = CurrentTSO;
185
186     CurrentTSO->why_blocked = BlockedOnException;
187     CurrentTSO->block_info.tso = R1.t;
188     
189     BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
190   }
191
192   /* Killed threads turn into zombies, which might be garbage
193    * collected at a later date.  That's why we don't have to
194    * explicitly remove them from any queues they might be on.
195    */
196
197   /* We might have killed ourselves.  In which case, better be *very*
198    * careful.  If the exception killed us, then return to the scheduler.
199    * If the exception went to a catch frame, we'll just continue from
200    * the handler.
201    */
202   if (R1.t == CurrentTSO) {
203         SaveThreadState();      /* inline! */
204         STGCALL2(raiseAsyncWithLock, R1.t, R2.cl);
205         if (CurrentTSO->what_next == ThreadKilled) {
206                 R1.w = ThreadFinished;
207                 JMP_(StgReturn);
208         } else {
209                 LoadThreadState();
210                 ASSERT(CurrentTSO->what_next == ThreadRunGHC);
211                 JMP_(ENTRY_CODE(Sp[0]));
212         }
213   } else {
214         STGCALL2(raiseAsyncWithLock, R1.t, R2.cl);
215   }
216
217   JMP_(ENTRY_CODE(Sp[0]));
218   FE_
219 }
220
221
222 /* -----------------------------------------------------------------------------
223    Catch frames
224    -------------------------------------------------------------------------- */
225
226 #ifdef REG_R1
227 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
228    FN_(label);                                  \
229    FN_(label)                                   \
230    {                                            \
231       FB_                                       \
232       Sp += sizeofW(StgCatchFrame);             \
233       JMP_(ret);                                \
234       FE_                                       \
235    }
236 #else
237 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
238    FN_(label);                                  \
239    FN_(label)                                   \
240    {                                            \
241       StgWord rval;                             \
242       FB_                                       \
243       rval = Sp[0];                             \
244       Sp++;                                     \
245       Sp += sizeofW(StgCatchFrame) - 1;         \
246       Sp[0] = rval;                             \
247       JMP_(ret);                                \
248       FE_                                       \
249    }
250 #endif
251
252 #ifdef REG_R1
253 #define SP_OFF 0
254 #else
255 #define SP_OFF 1
256 #endif
257
258 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_ret,ENTRY_CODE(Sp[SP_OFF]));
259 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_ret,RET_VEC(Sp[SP_OFF],0));
260 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_ret,RET_VEC(Sp[SP_OFF],1));
261 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_ret,RET_VEC(Sp[SP_OFF],2));
262 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_ret,RET_VEC(Sp[SP_OFF],3));
263 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_ret,RET_VEC(Sp[SP_OFF],4));
264 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_ret,RET_VEC(Sp[SP_OFF],5));
265 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_ret,RET_VEC(Sp[SP_OFF],6));
266 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_ret,RET_VEC(Sp[SP_OFF],7));
267
268 #if defined(PROFILING)
269 #define CATCH_FRAME_BITMAP 7
270 #define CATCH_FRAME_WORDS  4
271 #else
272 #define CATCH_FRAME_BITMAP 1
273 #define CATCH_FRAME_WORDS  2
274 #endif
275
276 /* Catch frames are very similar to update frames, but when entering
277  * one we just pop the frame off the stack and perform the correct
278  * kind of return to the activation record underneath us on the stack.
279  */
280
281 VEC_POLY_INFO_TABLE(stg_catch_frame, \
282         MK_SMALL_BITMAP(CATCH_FRAME_WORDS, CATCH_FRAME_BITMAP), \
283         NULL/*srt*/, 0/*srt_off*/, 0/*srt_bitmap*/, CATCH_FRAME,, EF_);
284
285 /* -----------------------------------------------------------------------------
286  * The catch infotable
287  *
288  * This should be exactly the same as would be generated by this STG code
289  *
290  * catch = {x,h} \n {} -> catch#{x,h}
291  *
292  * It is used in deleteThread when reverting blackholes.
293  * -------------------------------------------------------------------------- */
294
295 INFO_TABLE(stg_catch_info,stg_catch_entry,2,0,FUN,,EF_,0,0);
296 STGFUN(stg_catch_entry)
297 {
298   FB_
299   R2.cl = R1.cl->payload[1]; /* h */
300   R1.cl = R1.cl->payload[0]; /* x */
301   JMP_(catchzh_fast);
302   FE_
303 }
304
305 FN_(catchzh_fast)
306 {
307   StgCatchFrame *fp;
308   FB_
309
310     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
311     STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast);
312   
313     /* Set up the catch frame */
314     Sp -= sizeofW(StgCatchFrame);
315     fp = (StgCatchFrame *)Sp;
316     SET_HDR(fp,(StgInfoTable *)&stg_catch_frame_info,CCCS);
317     fp -> handler = R2.cl;
318     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
319     TICK_CATCHF_PUSHED();
320
321
322 /* Apply R1 to the realworld token */
323     Sp--;
324     JMP_(stg_ap_v_ret);
325   FE_
326 }      
327
328 /* -----------------------------------------------------------------------------
329  * The raise infotable
330  * 
331  * This should be exactly the same as would be generated by this STG code
332  *
333  *   raise = {err} \n {} -> raise#{err}
334  *
335  * It is used in raisezh_fast to update thunks on the update list
336  * -------------------------------------------------------------------------- */
337
338 INFO_TABLE(stg_raise_info,stg_raise_entry,1,0,THUNK,,EF_,0,0);
339 STGFUN(stg_raise_entry)
340 {
341   FB_
342   R1.cl = R1.cl->payload[0];
343   JMP_(raisezh_fast);
344   FE_
345 }
346
347 FN_(raisezh_fast)
348 {
349   StgClosure *handler;
350   StgPtr p;
351   StgClosure *raise_closure;
352   FB_
353     /* args : R1.p :: Exception */
354
355
356 #if defined(PROFILING)
357     /* Debugging tool: on raising an  exception, show where we are. */
358
359     /* ToDo: currently this is a hack.  Would be much better if
360      * the info was only displayed for an *uncaught* exception.
361      */
362     if (RtsFlags.ProfFlags.showCCSOnException) {
363       STGCALL2(fprintCCS,stderr,CCCS);
364     }
365 #endif
366
367     /* This closure represents the expression 'raise# E' where E
368      * is the exception raise.  It is used to overwrite all the
369      * thunks which are currently under evaluataion.
370      */
371     /*    
372     // @LDV profiling
373     // stg_raise_info has THUNK as its closure type. Since a THUNK takes at least
374     // MIN_UPD_SIZE words in its payload, MIN_UPD_SIZE is more approprate than 1.
375     // It seems that 1 does not cause any problem unless profiling is performed.
376     // However, when LDV profiling goes on, we need to linearly scan small object pool,
377     // where raise_closure is stored, so we should use MIN_UPD_SIZE.
378     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
379                                                sizeofW(StgClosure)+1);
380      */
381     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
382                                                sizeofW(StgClosure)+MIN_UPD_SIZE);
383     SET_HDR(raise_closure, &stg_raise_info, CCCS);
384     raise_closure->payload[0] = R1.cl;
385
386     // Walk up the stack, looking for the catch frame.  On the way,
387     // we update any closures pointed to from update frames with the
388     // raise closure that we just built.
389     {           
390         StgPtr next;
391         StgRetInfoTable *info;
392
393         p = Sp;
394         while(1) {
395
396             info = get_ret_itbl((StgClosure *)p);
397             next = p + stack_frame_sizeW((StgClosure *)p);
398             switch (info->i.type) {
399
400             case UPDATE_FRAME:
401                 UPD_IND(((StgUpdateFrame *)p)->updatee,raise_closure);
402                 p = next;
403                 continue;
404
405             case CATCH_FRAME:
406                 /* found it! */
407                 break;
408
409             case STOP_FRAME:
410                 /* We've stripped the entire stack, the thread is now dead. */
411                 Sp = CurrentTSO->stack + CurrentTSO->stack_size - 1;
412                 Sp[0] = R1.w;           /* save the exception */
413                 CurrentTSO->what_next = ThreadKilled;
414                 SaveThreadState();      /* inline! */
415                 R1.w = ThreadFinished;
416                 JMP_(StgReturn);
417                 
418             default:
419                 p = next; 
420                 continue;
421             }
422       
423             break;
424         }
425     }
426     
427     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
428      * and including this frame, update Su, push R1, and enter the handler.
429      */
430     handler = ((StgCatchFrame *)p)->handler;
431     
432     Sp = (P_)p + sizeofW(StgCatchFrame);
433
434     /* Restore the blocked/unblocked state for asynchronous exceptions
435      * at the CATCH_FRAME.  
436      *
437      * If exceptions were unblocked, arrange that they are unblocked
438      * again after executing the handler by pushing an
439      * unblockAsyncExceptions_ret stack frame.
440      */
441     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
442       *(--Sp) = (W_)&stg_unblockAsyncExceptionszh_ret_info;
443     }
444
445     /* Ensure that async excpetions are blocked when running the handler.
446     */
447     if (CurrentTSO->blocked_exceptions == NULL) {
448       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
449     }
450
451     /* Call the handler, passing the exception value and a realworld
452      * token as arguments.
453      */
454     Sp -= 2;
455     Sp[1] = (W_)&stg_ap_v_info;
456     Sp[0] = R1.w;
457     R1.cl = handler;
458     Sp--;
459     JMP_(stg_ap_p_ret);
460   FE_
461 }
462
463 FN_(raiseIOzh_fast)
464 {
465   FB_
466   /* Args :: R1.p :: Exception */
467   JMP_(raisezh_fast);
468   FE_
469 }