X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FException.hc;h=81c1c36c03bec5cf69b6e6c36667b182be9e4ec3;hb=97906cfcc30dd591e840921d336fdabeb1b8a315;hp=1ad991d55206d8f38bad2ef4fd96d43514dc7c64;hpb=fcc9fa26d8b6f34a646aeb570fb93fc67c49b7ab;p=ghc-hetmet.git diff --git a/ghc/rts/Exception.hc b/ghc/rts/Exception.hc index 1ad991d..81c1c36 100644 --- a/ghc/rts/Exception.hc +++ b/ghc/rts/Exception.hc @@ -1,12 +1,13 @@ /* ----------------------------------------------------------------------------- - * $Id: Exception.hc,v 1.7 2000/02/04 11:15:04 simonmar Exp $ + * $Id: Exception.hc,v 1.25 2002/04/23 06:34:26 sof Exp $ * - * (c) The GHC Team, 1998-1999 + * (c) The GHC Team, 1998-2000 * * Exception support * * ---------------------------------------------------------------------------*/ +#include "Stg.h" #include "Rts.h" #include "Exception.h" #include "Schedule.h" @@ -17,6 +18,9 @@ #if defined(PAR) # include "FetchMe.h" #endif +#if defined(PROFILING) +# include "Profiling.h" +#endif /* ----------------------------------------------------------------------------- Exception Primitives @@ -52,11 +56,11 @@ FN_(blockAsyncExceptionszh_fast) if (CurrentTSO->blocked_exceptions == NULL) { CurrentTSO->blocked_exceptions = END_TSO_QUEUE; /* avoid growing the stack unnecessarily */ - if (Sp[0] == (W_)&blockAsyncExceptionszh_ret_info) { + if (Sp[0] == (W_)&stg_blockAsyncExceptionszh_ret_info) { Sp++; } else { Sp--; - Sp[0] = (W_)&unblockAsyncExceptionszh_ret_info; + Sp[0] = (W_)&stg_unblockAsyncExceptionszh_ret_info; } } Sp--; @@ -65,25 +69,31 @@ FN_(blockAsyncExceptionszh_fast) FE_ } -INFO_TABLE_SRT_BITMAP(unblockAsyncExceptionszh_ret_info, unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0); -FN_(unblockAsyncExceptionszh_ret_entry) +INFO_TABLE_SRT_BITMAP(stg_unblockAsyncExceptionszh_ret_info, stg_unblockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0); +FN_(stg_unblockAsyncExceptionszh_ret_entry) { FB_ ASSERT(CurrentTSO->blocked_exceptions != NULL); #if defined(GRAN) awakenBlockedQueue(CurrentTSO->blocked_exceptions, - CurrentTSO->block_info.closure); + (StgClosure*)NULL); #elif defined(PAR) - // is CurrentTSO->block_info.closure always set to the node - // holding the blocking queue !? -- HWL + /* we don't need node info (2nd arg) in this case + (note that CurrentTSO->block_info.closure isn't always set) */ awakenBlockedQueue(CurrentTSO->blocked_exceptions, - CurrentTSO->block_info.closure); + (StgClosure*)NULL); #else awakenBlockedQueue(CurrentTSO->blocked_exceptions); #endif CurrentTSO->blocked_exceptions = NULL; +#ifdef REG_R1 Sp++; JMP_(ENTRY_CODE(Sp[0])); +#else + Sp[1] = Sp[0]; + Sp++; + JMP_(ENTRY_CODE(Sp[1])); +#endif FE_ } @@ -108,11 +118,11 @@ FN_(unblockAsyncExceptionszh_fast) CurrentTSO->blocked_exceptions = NULL; /* avoid growing the stack unnecessarily */ - if (Sp[0] == (W_)&unblockAsyncExceptionszh_ret_info) { + if (Sp[0] == (W_)&stg_unblockAsyncExceptionszh_ret_info) { Sp++; } else { Sp--; - Sp[0] = (W_)&blockAsyncExceptionszh_ret_info; + Sp[0] = (W_)&stg_blockAsyncExceptionszh_ret_info; } } Sp--; @@ -121,18 +131,23 @@ FN_(unblockAsyncExceptionszh_fast) FE_ } -INFO_TABLE_SRT_BITMAP(blockAsyncExceptionszh_ret_info, blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0); -FN_(blockAsyncExceptionszh_ret_entry) +INFO_TABLE_SRT_BITMAP(stg_blockAsyncExceptionszh_ret_info, stg_blockAsyncExceptionszh_ret_entry, 0, 0, 0, 0, RET_SMALL, , EF_, 0, 0); +FN_(stg_blockAsyncExceptionszh_ret_entry) { FB_ ASSERT(CurrentTSO->blocked_exceptions == NULL); CurrentTSO->blocked_exceptions = END_TSO_QUEUE; +#ifdef REG_R1 Sp++; JMP_(ENTRY_CODE(Sp[0])); +#else + Sp[1] = Sp[0]; + Sp++; + JMP_(ENTRY_CODE(Sp[1])); +#endif FE_ } - FN_(killThreadzh_fast) { FB_ @@ -141,25 +156,27 @@ FN_(killThreadzh_fast) /* This thread may have been relocated. * (see Schedule.c:threadStackOverflow) */ - while (R1.t->whatNext == ThreadRelocated) { + while (R1.t->what_next == ThreadRelocated) { R1.t = R1.t->link; } /* If the target thread is currently blocking async exceptions, - * we'll have to block until it's ready to accept them. + * we'll have to block until it's ready to accept them. The + * exception is interruptible threads - ie. those that are blocked + * on some resource. */ - if (R1.t->blocked_exceptions != NULL) { - - /* ToDo (SMP): locking if destination thread is currently - * running... - */ - CurrentTSO->link = R1.t->blocked_exceptions; - R1.t->blocked_exceptions = CurrentTSO; - - CurrentTSO->why_blocked = BlockedOnException; - CurrentTSO->block_info.tso = R1.t; + if (R1.t->blocked_exceptions != NULL && !interruptible(R1.t) ) { + + /* ToDo (SMP): locking if destination thread is currently + * running... + */ + CurrentTSO->link = R1.t->blocked_exceptions; + R1.t->blocked_exceptions = CurrentTSO; - BLOCK( R1_PTR | R2_PTR, killThreadzh_fast ); + CurrentTSO->why_blocked = BlockedOnException; + CurrentTSO->block_info.tso = R1.t; + + BLOCK( R1_PTR | R2_PTR, killThreadzh_fast ); } /* Killed threads turn into zombies, which might be garbage @@ -174,13 +191,13 @@ FN_(killThreadzh_fast) */ if (R1.t == CurrentTSO) { SaveThreadState(); /* inline! */ - STGCALL2(raiseAsync, R1.t, R2.cl); - if (CurrentTSO->whatNext == ThreadKilled) { - R1.w = ThreadYielding; + STGCALL2(raiseAsyncWithLock, R1.t, R2.cl); + if (CurrentTSO->what_next == ThreadKilled) { + R1.w = ThreadFinished; JMP_(StgReturn); } LoadThreadState(); - if (CurrentTSO->whatNext == ThreadEnterGHC) { + if (CurrentTSO->what_next == ThreadEnterGHC) { R1.w = Sp[0]; Sp++; JMP_(GET_ENTRY(R1.cl)); @@ -188,13 +205,14 @@ FN_(killThreadzh_fast) barf("killThreadzh_fast"); } } else { - STGCALL2(raiseAsync, R1.t, R2.cl); + STGCALL2(raiseAsyncWithLock, R1.t, R2.cl); } JMP_(ENTRY_CODE(Sp[0])); FE_ } + /* ----------------------------------------------------------------------------- Catch frames -------------------------------------------------------------------------- */ @@ -233,18 +251,18 @@ FN_(killThreadzh_fast) #define SP_OFF 1 #endif -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_entry,ENTRY_CODE(Sp[SP_OFF])); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0)); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1)); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2)); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3)); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4)); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5)); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6)); -CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7)); - -#ifdef PROFILING -#define CATCH_FRAME_BITMAP 7 +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_entry,ENTRY_CODE(Sp[SP_OFF])); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_entry,RET_VEC(Sp[SP_OFF],0)); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_entry,RET_VEC(Sp[SP_OFF],1)); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_entry,RET_VEC(Sp[SP_OFF],2)); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_entry,RET_VEC(Sp[SP_OFF],3)); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_entry,RET_VEC(Sp[SP_OFF],4)); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_entry,RET_VEC(Sp[SP_OFF],5)); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_entry,RET_VEC(Sp[SP_OFF],6)); +CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7)); + +#if defined(PROFILING) +#define CATCH_FRAME_BITMAP 15 #else #define CATCH_FRAME_BITMAP 3 #endif @@ -254,7 +272,7 @@ CATCH_FRAME_ENTRY_TEMPLATE(catch_frame_7_entry,RET_VEC(Sp[SP_OFF],7)); * kind of return to the activation record underneath us on the stack. */ -VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_); +VEC_POLY_INFO_TABLE(stg_catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, CATCH_FRAME,, EF_); /* ----------------------------------------------------------------------------- * The catch infotable @@ -266,12 +284,12 @@ VEC_POLY_INFO_TABLE(catch_frame, CATCH_FRAME_BITMAP, NULL/*srt*/, 0/*srt_off*/, * It is used in deleteThread when reverting blackholes. * -------------------------------------------------------------------------- */ -INFO_TABLE(catch_info,catch_entry,2,0,FUN,,EF_,0,0); -STGFUN(catch_entry) +INFO_TABLE(stg_catch_info,stg_catch_entry,2,0,FUN,,EF_,0,0); +STGFUN(stg_catch_entry) { FB_ - R2.cl = payloadCPtr(R1.cl,1); /* h */ - R1.cl = payloadCPtr(R1.cl,0); /* x */ + R2.cl = R1.cl->payload[1]; /* h */ + R1.cl = R1.cl->payload[0]; /* x */ JMP_(catchzh_fast); FE_ } @@ -287,7 +305,7 @@ FN_(catchzh_fast) /* Set up the catch frame */ Sp -= sizeofW(StgCatchFrame); fp = (StgCatchFrame *)Sp; - SET_HDR(fp,(StgInfoTable *)&catch_frame_info,CCCS); + SET_HDR(fp,(StgInfoTable *)&stg_catch_frame_info,CCCS); fp -> handler = R2.cl; fp -> exceptions_blocked = (CurrentTSO->blocked_exceptions != NULL); fp -> link = Su; @@ -313,8 +331,8 @@ FN_(catchzh_fast) * It is used in raisezh_fast to update thunks on the update list * -------------------------------------------------------------------------- */ -INFO_TABLE(raise_info,raise_entry,1,0,FUN,,EF_,0,0); -STGFUN(raise_entry) +INFO_TABLE(stg_raise_info,stg_raise_entry,1,0,THUNK,,EF_,0,0); +STGFUN(stg_raise_entry) { FB_ R1.cl = R1.cl->payload[0]; @@ -328,20 +346,18 @@ FN_(raisezh_fast) StgUpdateFrame *p; StgClosure *raise_closure; FB_ - /* args : R1 = error */ + /* args : R1 = exception */ #if defined(PROFILING) - /* Debugging tool: on raising an exception, show where we are. */ /* ToDo: currently this is a hack. Would be much better if * the info was only displayed for an *uncaught* exception. */ if (RtsFlags.ProfFlags.showCCSOnException) { - STGCALL2(print_ccs,stderr,CCCS); + STGCALL2(fprintCCS,stderr,CCCS); } - #endif p = Su; @@ -350,9 +366,19 @@ FN_(raisezh_fast) * is the exception raise. It is used to overwrite all the * thunks which are currently under evaluataion. */ + /* + // @LDV profiling + // stg_raise_info has THUNK as its closure type. Since a THUNK takes at least + // MIN_UPD_SIZE words in its payload, MIN_UPD_SIZE is more approprate than 1. + // It seems that 1 does not cause any problem unless profiling is performed. + // However, when LDV profiling goes on, we need to linearly scan small object pool, + // where raise_closure is stored, so we should use MIN_UPD_SIZE. raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate, sizeofW(StgClosure)+1); - raise_closure->header.info = &raise_info; + */ + raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate, + sizeofW(StgClosure)+MIN_UPD_SIZE); + SET_HDR(raise_closure, &stg_raise_info, CCCS); raise_closure->payload[0] = R1.cl; while (1) { @@ -373,8 +399,15 @@ FN_(raisezh_fast) break; case STOP_FRAME: - barf("raisezh_fast: STOP_FRAME"); - + /* We've stripped the entire stack, the thread is now dead. */ + Sp = CurrentTSO->stack + CurrentTSO->stack_size - 1; + Sp[0] = R1.w; /* save the exception */ + Su = (StgUpdateFrame *)(Sp+1); + CurrentTSO->what_next = ThreadKilled; + SaveThreadState(); /* inline! */ + R1.w = ThreadFinished; + JMP_(StgReturn); + default: barf("raisezh_fast: weird activation record"); } @@ -399,7 +432,7 @@ FN_(raisezh_fast) * unblockAsyncExceptions_ret stack frame. */ if (! ((StgCatchFrame *)p)->exceptions_blocked) { - *(--Sp) = (W_)&unblockAsyncExceptionszh_ret_info; + *(--Sp) = (W_)&stg_unblockAsyncExceptionszh_ret_info; } /* Ensure that async excpetions are blocked when running the handler.