[project @ 2002-02-28 18:44:28 by sof]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.24 2002/02/28 18:44:28 sof Exp $
3  *
4  * (c) The GHC Team, 1998-2000
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Stg.h"
11 #include "Rts.h"
12 #include "Exception.h"
13 #include "Schedule.h"
14 #include "StgRun.h"
15 #include "Storage.h"
16 #include "RtsUtils.h"
17 #include "RtsFlags.h"
18 #if defined(PAR)
19 # include "FetchMe.h"
20 #endif
21 #if defined(PROFILING)
22 # include "Profiling.h"
23 #endif
24
25 /* -----------------------------------------------------------------------------
26    Exception Primitives
27
28    A thread can request that asynchronous exceptions not be delivered
29    ("blocked") for the duration of an I/O computation.  The primitive
30    
31         blockAsyncExceptions# :: IO a -> IO a
32
33    is used for this purpose.  During a blocked section, asynchronous
34    exceptions may be unblocked again temporarily:
35
36         unblockAsyncExceptions# :: IO a -> IO a
37
38    Furthermore, asynchronous exceptions are blocked automatically during
39    the execution of an exception handler.  Both of these primitives
40    leave a continuation on the stack which reverts to the previous
41    state (blocked or unblocked) on exit.
42
43    A thread which wants to raise an exception in another thread (using
44    killThread#) must block until the target thread is ready to receive
45    it.  The action of unblocking exceptions in a thread will release all
46    the threads waiting to deliver exceptions to that thread.
47
48    -------------------------------------------------------------------------- */
49
50 FN_(blockAsyncExceptionszh_fast)
51 {
52   FB_
53     /* Args: R1 :: IO a */
54     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
55
56     if (CurrentTSO->blocked_exceptions == NULL) {
57       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
58       /* avoid growing the stack unnecessarily */
59       if (Sp[0] == (W_)&stg_blockAsyncExceptionszh_ret_info) {
60         Sp++;
61       } else {
62         Sp--;
63         Sp[0] = (W_)&stg_unblockAsyncExceptionszh_ret_info;
64       }
65     }
66     Sp--;
67     Sp[0] = ARG_TAG(0);
68     JMP_(GET_ENTRY(R1.cl));
69   FE_
70 }
71
72 INFO_TABLE_SRT_BITMAP(stg_unblockAsyncExceptionszh_ret_info, stg_unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
73 FN_(stg_unblockAsyncExceptionszh_ret_entry)
74 {
75   FB_
76     ASSERT(CurrentTSO->blocked_exceptions != NULL);
77 #if defined(GRAN)
78       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
79                          (StgClosure*)NULL); 
80 #elif defined(PAR)
81       /* we don't need node info (2nd arg) in this case
82          (note that CurrentTSO->block_info.closure isn't always set) */
83       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
84                          (StgClosure*)NULL); 
85 #else
86     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
87 #endif
88     CurrentTSO->blocked_exceptions = NULL;
89 #ifdef REG_R1
90     Sp++;
91     JMP_(ENTRY_CODE(Sp[0]));
92 #else
93     Sp[1] = Sp[0];
94     Sp++;
95     JMP_(ENTRY_CODE(Sp[1]));
96 #endif
97   FE_
98 }
99
100 FN_(unblockAsyncExceptionszh_fast)
101 {
102   FB_
103     /* Args: R1 :: IO a */
104     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
105
106     if (CurrentTSO->blocked_exceptions != NULL) {
107 #if defined(GRAN)
108       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
109                          CurrentTSO->block_info.closure);
110 #elif defined(PAR)
111       // is CurrentTSO->block_info.closure always set to the node
112       // holding the blocking queue !? -- HWL
113       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
114                          CurrentTSO->block_info.closure);
115 #else
116       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
117 #endif
118       CurrentTSO->blocked_exceptions = NULL;
119
120       /* avoid growing the stack unnecessarily */
121       if (Sp[0] == (W_)&stg_unblockAsyncExceptionszh_ret_info) {
122         Sp++;
123       } else {
124         Sp--;   
125         Sp[0] = (W_)&stg_blockAsyncExceptionszh_ret_info;
126       }
127     }
128     Sp--;
129     Sp[0] = ARG_TAG(0);
130     JMP_(GET_ENTRY(R1.cl));
131   FE_
132 }
133
134 INFO_TABLE_SRT_BITMAP(stg_blockAsyncExceptionszh_ret_info, stg_blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
135 FN_(stg_blockAsyncExceptionszh_ret_entry)
136 {
137   FB_
138     ASSERT(CurrentTSO->blocked_exceptions == NULL);
139     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
140 #ifdef REG_R1
141     Sp++;
142     JMP_(ENTRY_CODE(Sp[0]));
143 #else
144     Sp[1] = Sp[0];
145     Sp++;
146     JMP_(ENTRY_CODE(Sp[1]));
147 #endif
148   FE_
149 }
150
151 FN_(killThreadzh_fast)
152 {
153   FB_
154   /* args: R1.p = TSO to kill, R2.p = Exception */
155
156   /* This thread may have been relocated.
157    * (see Schedule.c:threadStackOverflow)
158    */
159   while (R1.t->what_next == ThreadRelocated) {
160     R1.t = R1.t->link;
161   }
162
163   /* If the target thread is currently blocking async exceptions,
164    * we'll have to block until it's ready to accept them.  The
165    * exception is interruptible threads - ie. those that are blocked
166    * on some resource.
167    */
168   if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) {
169     
170     /* ToDo (SMP): locking if destination thread is currently
171      * running...
172      */
173     CurrentTSO->link = R1.t->blocked_exceptions;
174     R1.t->blocked_exceptions = CurrentTSO;
175
176     CurrentTSO->why_blocked = BlockedOnException;
177     CurrentTSO->block_info.tso = R1.t;
178     
179     BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
180   }
181
182   /* Killed threads turn into zombies, which might be garbage
183    * collected at a later date.  That's why we don't have to
184    * explicitly remove them from any queues they might be on.
185    */
186
187   /* We might have killed ourselves.  In which case, better be *very*
188    * careful.  If the exception killed us, then return to the scheduler.
189    * If the exception went to a catch frame, we'll just continue from
190    * the handler.
191    */
192   if (R1.t == CurrentTSO) {
193         SaveThreadState();      /* inline! */
194         STGCALL2(raiseAsync, R1.t, R2.cl);
195         if (CurrentTSO->what_next == ThreadKilled) {
196                 R1.w = ThreadFinished;
197                 JMP_(StgReturn);
198         }
199         LoadThreadState();
200         if (CurrentTSO->what_next == ThreadEnterGHC) {
201                 R1.w = Sp[0];
202                 Sp++;
203                 JMP_(GET_ENTRY(R1.cl));
204         } else {
205                 barf("killThreadzh_fast");
206         }
207   } else {
208         STGCALL2(raiseAsync, R1.t, R2.cl);
209   }
210
211   JMP_(ENTRY_CODE(Sp[0]));
212   FE_
213 }
214
215
216 /* -----------------------------------------------------------------------------
217    Catch frames
218    -------------------------------------------------------------------------- */
219
220 #ifdef REG_R1
221 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
222    FN_(label);                                  \
223    FN_(label)                                   \
224    {                                            \
225       FB_                                       \
226       Su = ((StgCatchFrame *)Sp)->link;         \
227       Sp += sizeofW(StgCatchFrame);             \
228       JMP_(ret);                                \
229       FE_                                       \
230    }
231 #else
232 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
233    FN_(label);                                  \
234    FN_(label)                                   \
235    {                                            \
236       StgWord rval;                             \
237       FB_                                       \
238       rval = Sp[0];                             \
239       Sp++;                                     \
240       Su = ((StgCatchFrame *)Sp)->link;         \
241       Sp += sizeofW(StgCatchFrame) - 1;         \
242       Sp[0] = rval;                             \
243       JMP_(ret);                                \
244       FE_                                       \
245    }
246 #endif
247
248 #ifdef REG_R1
249 #define SP_OFF 0
250 #else
251 #define SP_OFF 1
252 #endif
253
254 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
255 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
256 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
257 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
258 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
259 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
260 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
261 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
262 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
263
264 #if defined(PROFILING)
265 #define CATCH_FRAME_BITMAP 15
266 #else
267 #define CATCH_FRAME_BITMAP 3
268 #endif
269
270 /* Catch frames are very similar to update frames, but when entering
271  * one we just pop the frame off the stack and perform the correct
272  * kind of return to the activation record underneath us on the stack.
273  */
274
275 VEC_POLY_INFO_TABLE(stg_catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
276
277 /* -----------------------------------------------------------------------------
278  * The catch infotable
279  *
280  * This should be exactly the same as would be generated by this STG code
281  *
282  * catch = {x,h} \n {} -> catch#{x,h}
283  *
284  * It is used in deleteThread when reverting blackholes.
285  * -------------------------------------------------------------------------- */
286
287 INFO_TABLE(stg_catch_info,stg_catch_entry,2,0,FUN,,EF_,0,0);
288 STGFUN(stg_catch_entry)
289 {
290   FB_
291   R2.cl = R1.cl->payload[1]; /* h */
292   R1.cl = R1.cl->payload[0]; /* x */
293   JMP_(catchzh_fast);
294   FE_
295 }
296
297 FN_(catchzh_fast)
298 {
299   StgCatchFrame *fp;
300   FB_
301
302     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
303     STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
304   
305     /* Set up the catch frame */
306     Sp -= sizeofW(StgCatchFrame);
307     fp = (StgCatchFrame *)Sp;
308     SET_HDR(fp,(StgInfoTable *)&stg_catch_frame_info,CCCS);
309     fp -> handler = R2.cl;
310     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
311     fp -> link = Su;
312     Su = (StgUpdateFrame *)fp;
313     TICK_CATCHF_PUSHED();
314
315     /* Push realworld token and enter R1. */
316     Sp--;
317     Sp[0] = ARG_TAG(0);
318     TICK_ENT_VIA_NODE();
319     JMP_(GET_ENTRY(R1.cl));
320     
321   FE_
322 }      
323
324 /* -----------------------------------------------------------------------------
325  * The raise infotable
326  * 
327  * This should be exactly the same as would be generated by this STG code
328  *
329  *   raise = {err} \n {} -> raise#{err}
330  *
331  * It is used in raisezh_fast to update thunks on the update list
332  * -------------------------------------------------------------------------- */
333
334 INFO_TABLE(stg_raise_info,stg_raise_entry,1,0,THUNK,,EF_,0,0);
335 STGFUN(stg_raise_entry)
336 {
337   FB_
338   R1.cl = R1.cl->payload[0];
339   JMP_(raisezh_fast);
340   FE_
341 }
342
343 FN_(raisezh_fast)
344 {
345   StgClosure *handler;
346   StgUpdateFrame *p;
347   StgClosure *raise_closure;
348   FB_
349     /* args : R1 = exception */
350
351
352 #if defined(PROFILING)
353     /* Debugging tool: on raising an  exception, show where we are. */
354
355     /* ToDo: currently this is a hack.  Would be much better if
356      * the info was only displayed for an *uncaught* exception.
357      */
358     if (RtsFlags.ProfFlags.showCCSOnException) {
359       STGCALL2(fprintCCS,stderr,CCCS);
360     }
361 #endif
362
363     p = Su;
364
365     /* This closure represents the expression 'raise# E' where E
366      * is the exception raise.  It is used to overwrite all the
367      * thunks which are currently under evaluataion.
368      */
369     /*    
370     // @LDV profiling
371     // stg_raise_info has THUNK as its closure type. Since a THUNK takes at least
372     // MIN_UPD_SIZE words in its payload, MIN_UPD_SIZE is more approprate than 1.
373     // It seems that 1 does not cause any problem unless profiling is performed.
374     // However, when LDV profiling goes on, we need to linearly scan small object pool,
375     // where raise_closure is stored, so we should use MIN_UPD_SIZE.
376     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
377                                                sizeofW(StgClosure)+1);
378      */
379     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
380                                                sizeofW(StgClosure)+MIN_UPD_SIZE);
381     SET_HDR(raise_closure, &stg_raise_info, CCCS);
382     raise_closure->payload[0] = R1.cl;
383
384     while (1) {
385
386       switch (get_itbl(p)->type) {
387
388       case UPDATE_FRAME:
389         UPD_IND(p->updatee,raise_closure);
390         p = p->link;
391         continue;
392
393       case SEQ_FRAME:
394         p = ((StgSeqFrame *)p)->link;
395         continue;
396
397       case CATCH_FRAME:
398         /* found it! */
399         break;
400
401       case STOP_FRAME:
402         /* We've stripped the entire stack, the thread is now dead. */
403         Sp = CurrentTSO->stack + CurrentTSO->stack_size - 1;
404         Sp[0] = R1.w;           /* save the exception */
405         Su = (StgUpdateFrame *)(Sp+1);
406         CurrentTSO->what_next = ThreadKilled;
407         SaveThreadState();      /* inline! */
408         R1.w = ThreadFinished;
409         JMP_(StgReturn);
410       
411       default:
412         barf("raisezh_fast: weird activation record");
413       }
414       
415       break;
416
417     }
418     
419     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
420      * and including this frame, update Su, push R1, and enter the handler.
421      */
422     Su = ((StgCatchFrame *)p)->link; 
423     handler = ((StgCatchFrame *)p)->handler;
424     
425     Sp = (P_)p + sizeofW(StgCatchFrame);
426
427     /* Restore the blocked/unblocked state for asynchronous exceptions
428      * at the CATCH_FRAME.  
429      *
430      * If exceptions were unblocked, arrange that they are unblocked
431      * again after executing the handler by pushing an
432      * unblockAsyncExceptions_ret stack frame.
433      */
434     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
435       *(--Sp) = (W_)&stg_unblockAsyncExceptionszh_ret_info;
436     }
437
438     /* Ensure that async excpetions are blocked when running the handler.
439     */
440     if (CurrentTSO->blocked_exceptions == NULL) {
441       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
442     }
443
444     /* Enter the handler, passing the exception value and a realworld
445      * token as arguments.
446      */
447     Sp -= 2;
448     Sp[0] = R1.w;
449     Sp[1] = ARG_TAG(0);
450     TICK_ENT_VIA_NODE();
451     R1.cl = handler;
452     JMP_(GET_ENTRY(R1.cl));
453
454   FE_
455 }