[project @ 2001-12-05 17:35:12 by sewardj]
[ghc-hetmet.git] / ghc / rts / Exception.hc
1 /* -----------------------------------------------------------------------------
2  * $Id: Exception.hc,v 1.23 2001/12/05 17:35:15 sewardj Exp $
3  *
4  * (c) The GHC Team, 1998-2000
5  *
6  * Exception support
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #include "Stg.h"
11 #include "Rts.h"
12 #include "Exception.h"
13 #include "Schedule.h"
14 #include "StgRun.h"
15 #include "Storage.h"
16 #include "RtsUtils.h"
17 #include "RtsFlags.h"
18 #if defined(PAR)
19 # include "FetchMe.h"
20 #endif
21 #if defined(PROFILING)
22 # include "Profiling.h"
23 #endif
24
25 /* -----------------------------------------------------------------------------
26    Exception Primitives
27
28    A thread can request that asynchronous exceptions not be delivered
29    ("blocked") for the duration of an I/O computation.  The primitive
30    
31         blockAsyncExceptions# :: IO a -> IO a
32
33    is used for this purpose.  During a blocked section, asynchronous
34    exceptions may be unblocked again temporarily:
35
36         unblockAsyncExceptions# :: IO a -> IO a
37
38    Furthermore, asynchronous exceptions are blocked automatically during
39    the execution of an exception handler.  Both of these primitives
40    leave a continuation on the stack which reverts to the previous
41    state (blocked or unblocked) on exit.
42
43    A thread which wants to raise an exception in another thread (using
44    killThread#) must block until the target thread is ready to receive
45    it.  The action of unblocking exceptions in a thread will release all
46    the threads waiting to deliver exceptions to that thread.
47
48    -------------------------------------------------------------------------- */
49
50 FN_(blockAsyncExceptionszh_fast)
51 {
52   FB_
53     /* Args: R1 :: IO a */
54     STK_CHK_GEN( 2/* worst case */, R1_PTR, blockAsyncExceptionszh_fast, );
55
56     if (CurrentTSO->blocked_exceptions == NULL) {
57       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
58       /* avoid growing the stack unnecessarily */
59       if (Sp[0] == (W_)&stg_blockAsyncExceptionszh_ret_info) {
60         Sp++;
61       } else {
62         Sp--;
63         Sp[0] = (W_)&stg_unblockAsyncExceptionszh_ret_info;
64       }
65     }
66     Sp--;
67     Sp[0] = ARG_TAG(0);
68     JMP_(GET_ENTRY(R1.cl));
69   FE_
70 }
71
72 INFO_TABLE_SRT_BITMAP(stg_unblockAsyncExceptionszh_ret_info, stg_unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
73 FN_(stg_unblockAsyncExceptionszh_ret_entry)
74 {
75   FB_
76     ASSERT(CurrentTSO->blocked_exceptions != NULL);
77 #if defined(GRAN)
78       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
79                          (StgClosure*)NULL); 
80 #elif defined(PAR)
81       /* we don't need node info (2nd arg) in this case
82          (note that CurrentTSO->block_info.closure isn't always set) */
83       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
84                          (StgClosure*)NULL); 
85 #else
86     awakenBlockedQueue(CurrentTSO->blocked_exceptions);
87 #endif
88     CurrentTSO->blocked_exceptions = NULL;
89 #ifdef REG_R1
90     Sp++;
91     JMP_(ENTRY_CODE(Sp[0]));
92 #else
93     Sp[1] = Sp[0];
94     Sp++;
95     JMP_(ENTRY_CODE(Sp[1]));
96 #endif
97   FE_
98 }
99
100 FN_(unblockAsyncExceptionszh_fast)
101 {
102   FB_
103     /* Args: R1 :: IO a */
104     STK_CHK_GEN(2, R1_PTR, unblockAsyncExceptionszh_fast, );
105
106     if (CurrentTSO->blocked_exceptions != NULL) {
107 #if defined(GRAN)
108       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
109                          CurrentTSO->block_info.closure);
110 #elif defined(PAR)
111       // is CurrentTSO->block_info.closure always set to the node
112       // holding the blocking queue !? -- HWL
113       awakenBlockedQueue(CurrentTSO->blocked_exceptions, 
114                          CurrentTSO->block_info.closure);
115 #else
116       awakenBlockedQueue(CurrentTSO->blocked_exceptions);
117 #endif
118       CurrentTSO->blocked_exceptions = NULL;
119
120       /* avoid growing the stack unnecessarily */
121       if (Sp[0] == (W_)&stg_unblockAsyncExceptionszh_ret_info) {
122         Sp++;
123       } else {
124         Sp--;   
125         Sp[0] = (W_)&stg_blockAsyncExceptionszh_ret_info;
126       }
127     }
128     Sp--;
129     Sp[0] = ARG_TAG(0);
130     JMP_(GET_ENTRY(R1.cl));
131   FE_
132 }
133
134 INFO_TABLE_SRT_BITMAP(stg_blockAsyncExceptionszh_ret_info, stg_blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0);
135 FN_(stg_blockAsyncExceptionszh_ret_entry)
136 {
137   FB_
138     ASSERT(CurrentTSO->blocked_exceptions == NULL);
139     CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
140 #ifdef REG_R1
141     Sp++;
142     JMP_(ENTRY_CODE(Sp[0]));
143 #else
144     Sp[1] = Sp[0];
145     Sp++;
146     JMP_(ENTRY_CODE(Sp[1]));
147 #endif
148   FE_
149 }
150
151 FN_(killThreadzh_fast)
152 {
153   FB_
154   /* args: R1.p = TSO to kill, R2.p = Exception */
155
156   /* This thread may have been relocated.
157    * (see Schedule.c:threadStackOverflow)
158    */
159   while (R1.t->what_next == ThreadRelocated) {
160     R1.t = R1.t->link;
161   }
162
163   /* If the target thread is currently blocking async exceptions,
164    * we'll have to block until it's ready to accept them.  The
165    * exception is interruptible threads - ie. those that are blocked
166    * on some resource.
167    */
168   if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) {
169     
170     /* ToDo (SMP): locking if destination thread is currently
171      * running...
172      */
173     CurrentTSO->link = R1.t->blocked_exceptions;
174     R1.t->blocked_exceptions = CurrentTSO;
175
176     CurrentTSO->why_blocked = BlockedOnException;
177     CurrentTSO->block_info.tso = R1.t;
178     
179     BLOCK( R1_PTR | R2_PTR, killThreadzh_fast );
180   }
181
182   /* Killed threads turn into zombies, which might be garbage
183    * collected at a later date.  That's why we don't have to
184    * explicitly remove them from any queues they might be on.
185    */
186
187   /* We might have killed ourselves.  In which case, better be *very*
188    * careful.  If the exception killed us, then return to the scheduler.
189    * If the exception went to a catch frame, we'll just continue from
190    * the handler.
191    */
192   if (R1.t == CurrentTSO) {
193         SaveThreadState();      /* inline! */
194         STGCALL2(raiseAsync, R1.t, R2.cl);
195         if (CurrentTSO->what_next == ThreadKilled) {
196                 R1.w = ThreadFinished;
197                 JMP_(StgReturn);
198         }
199         LoadThreadState();
200         if (CurrentTSO->what_next == ThreadEnterGHC) {
201                 R1.w = Sp[0];
202                 Sp++;
203                 JMP_(GET_ENTRY(R1.cl));
204         } else {
205                 barf("killThreadzh_fast");
206         }
207   } else {
208         STGCALL2(raiseAsync, R1.t, R2.cl);
209   }
210
211   JMP_(ENTRY_CODE(Sp[0]));
212   FE_
213 }
214
215
216 FN_(myThreadIdzh_fast)
217 {
218   /* no args. */
219   FB_
220   R1.p = (P_)CurrentTSO;
221   JMP_(ENTRY_CODE(Sp[0]));
222   FE_
223 }
224
225
226 /* -----------------------------------------------------------------------------
227    Catch frames
228    -------------------------------------------------------------------------- */
229
230 #ifdef REG_R1
231 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
232    FN_(label);                                  \
233    FN_(label)                                   \
234    {                                            \
235       FB_                                       \
236       Su = ((StgCatchFrame *)Sp)->link;         \
237       Sp += sizeofW(StgCatchFrame);             \
238       JMP_(ret);                                \
239       FE_                                       \
240    }
241 #else
242 #define CATCH_FRAME_ENTRY_TEMPLATE(label,ret)   \
243    FN_(label);                                  \
244    FN_(label)                                   \
245    {                                            \
246       StgWord rval;                             \
247       FB_                                       \
248       rval = Sp[0];                             \
249       Sp++;                                     \
250       Su = ((StgCatchFrame *)Sp)->link;         \
251       Sp += sizeofW(StgCatchFrame) - 1;         \
252       Sp[0] = rval;                             \
253       JMP_(ret);                                \
254       FE_                                       \
255    }
256 #endif
257
258 #ifdef REG_R1
259 #define SP_OFF 0
260 #else
261 #define SP_OFF 1
262 #endif
263
264 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_entry,ENTRY_CODE(Sp[SP_OFF]));
265 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0));
266 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1));
267 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2));
268 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3));
269 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4));
270 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5));
271 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6));
272 CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7));
273
274 #if defined(PROFILING)
275 #define CATCH_FRAME_BITMAP 15
276 #else
277 #define CATCH_FRAME_BITMAP 3
278 #endif
279
280 /* Catch frames are very similar to update frames, but when entering
281  * one we just pop the frame off the stack and perform the correct
282  * kind of return to the activation record underneath us on the stack.
283  */
284
285 VEC_POLY_INFO_TABLE(stg_catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_);
286
287 /* -----------------------------------------------------------------------------
288  * The catch infotable
289  *
290  * This should be exactly the same as would be generated by this STG code
291  *
292  * catch = {x,h} \n {} -> catch#{x,h}
293  *
294  * It is used in deleteThread when reverting blackholes.
295  * -------------------------------------------------------------------------- */
296
297 INFO_TABLE(stg_catch_info,stg_catch_entry,2,0,FUN,,EF_,0,0);
298 STGFUN(stg_catch_entry)
299 {
300   FB_
301   R2.cl = R1.cl->payload[1]; /* h */
302   R1.cl = R1.cl->payload[0]; /* x */
303   JMP_(catchzh_fast);
304   FE_
305 }
306
307 FN_(catchzh_fast)
308 {
309   StgCatchFrame *fp;
310   FB_
311
312     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
313     STK_CHK_GEN(sizeofW(StgCatchFrame) + 1, R1_PTR | R2_PTR, catchzh_fast, );
314   
315     /* Set up the catch frame */
316     Sp -= sizeofW(StgCatchFrame);
317     fp = (StgCatchFrame *)Sp;
318     SET_HDR(fp,(StgInfoTable *)&stg_catch_frame_info,CCCS);
319     fp -> handler = R2.cl;
320     fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL);
321     fp -> link = Su;
322     Su = (StgUpdateFrame *)fp;
323     TICK_CATCHF_PUSHED();
324
325     /* Push realworld token and enter R1. */
326     Sp--;
327     Sp[0] = ARG_TAG(0);
328     TICK_ENT_VIA_NODE();
329     JMP_(GET_ENTRY(R1.cl));
330     
331   FE_
332 }      
333
334 /* -----------------------------------------------------------------------------
335  * The raise infotable
336  * 
337  * This should be exactly the same as would be generated by this STG code
338  *
339  *   raise = {err} \n {} -> raise#{err}
340  *
341  * It is used in raisezh_fast to update thunks on the update list
342  * -------------------------------------------------------------------------- */
343
344 INFO_TABLE(stg_raise_info,stg_raise_entry,1,0,THUNK,,EF_,0,0);
345 STGFUN(stg_raise_entry)
346 {
347   FB_
348   R1.cl = R1.cl->payload[0];
349   JMP_(raisezh_fast);
350   FE_
351 }
352
353 FN_(raisezh_fast)
354 {
355   StgClosure *handler;
356   StgUpdateFrame *p;
357   StgClosure *raise_closure;
358   FB_
359     /* args : R1 = exception */
360
361
362 #if defined(PROFILING)
363     /* Debugging tool: on raising an  exception, show where we are. */
364
365     /* ToDo: currently this is a hack.  Would be much better if
366      * the info was only displayed for an *uncaught* exception.
367      */
368     if (RtsFlags.ProfFlags.showCCSOnException) {
369       STGCALL2(fprintCCS,stderr,CCCS);
370     }
371 #endif
372
373     p = Su;
374
375     /* This closure represents the expression 'raise# E' where E
376      * is the exception raise.  It is used to overwrite all the
377      * thunks which are currently under evaluataion.
378      */
379     /*    
380     // @LDV profiling
381     // stg_raise_info has THUNK as its closure type. Since a THUNK takes at least
382     // MIN_UPD_SIZE words in its payload, MIN_UPD_SIZE is more approprate than 1.
383     // It seems that 1 does not cause any problem unless profiling is performed.
384     // However, when LDV profiling goes on, we need to linearly scan small object pool,
385     // where raise_closure is stored, so we should use MIN_UPD_SIZE.
386     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
387                                                sizeofW(StgClosure)+1);
388      */
389     raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
390                                                sizeofW(StgClosure)+MIN_UPD_SIZE);
391     SET_HDR(raise_closure, &stg_raise_info, CCCS);
392     raise_closure->payload[0] = R1.cl;
393
394     while (1) {
395
396       switch (get_itbl(p)->type) {
397
398       case UPDATE_FRAME:
399         UPD_IND(p->updatee,raise_closure);
400         p = p->link;
401         continue;
402
403       case SEQ_FRAME:
404         p = ((StgSeqFrame *)p)->link;
405         continue;
406
407       case CATCH_FRAME:
408         /* found it! */
409         break;
410
411       case STOP_FRAME:
412         /* We've stripped the entire stack, the thread is now dead. */
413         Sp = CurrentTSO->stack + CurrentTSO->stack_size - 1;
414         Sp[0] = R1.w;           /* save the exception */
415         Su = (StgUpdateFrame *)(Sp+1);
416         CurrentTSO->what_next = ThreadKilled;
417         SaveThreadState();      /* inline! */
418         R1.w = ThreadFinished;
419         JMP_(StgReturn);
420       
421       default:
422         barf("raisezh_fast: weird activation record");
423       }
424       
425       break;
426
427     }
428     
429     /* Ok, p points to the enclosing CATCH_FRAME.  Pop everything down to
430      * and including this frame, update Su, push R1, and enter the handler.
431      */
432     Su = ((StgCatchFrame *)p)->link; 
433     handler = ((StgCatchFrame *)p)->handler;
434     
435     Sp = (P_)p + sizeofW(StgCatchFrame);
436
437     /* Restore the blocked/unblocked state for asynchronous exceptions
438      * at the CATCH_FRAME.  
439      *
440      * If exceptions were unblocked, arrange that they are unblocked
441      * again after executing the handler by pushing an
442      * unblockAsyncExceptions_ret stack frame.
443      */
444     if (! ((StgCatchFrame *)p)->exceptions_blocked) {
445       *(--Sp) = (W_)&stg_unblockAsyncExceptionszh_ret_info;
446     }
447
448     /* Ensure that async excpetions are blocked when running the handler.
449     */
450     if (CurrentTSO->blocked_exceptions == NULL) {
451       CurrentTSO->blocked_exceptions = END_TSO_QUEUE;
452     }
453
454     /* Enter the handler, passing the exception value and a realworld
455      * token as arguments.
456      */
457     Sp -= 2;
458     Sp[0] = R1.w;
459     Sp[1] = ARG_TAG(0);
460     TICK_ENT_VIA_NODE();
461     R1.cl = handler;
462     JMP_(GET_ENTRY(R1.cl));
463
464   FE_
465 }