X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2FException.cmm;h=24da1c690e50784451e5f144edce893029cb6f2c;hp=f0eae988fe58e0badffa33d34a7cc3d33df90348;hb=0885017a4e92fe5710d1427c214adb87b92987e5;hpb=65064489375b670ab54cde381162f6383eeb8384 diff --git a/rts/Exception.cmm b/rts/Exception.cmm index f0eae98..24da1c6 100644 --- a/rts/Exception.cmm +++ b/rts/Exception.cmm @@ -13,7 +13,7 @@ #include "Cmm.h" #include "RaiseAsync.h" -import ghczmprim_GHCziBool_True_closure; +import ghczmprim_GHCziTypes_True_closure; /* ----------------------------------------------------------------------------- Exception Primitives @@ -21,12 +21,12 @@ import ghczmprim_GHCziBool_True_closure; A thread can request that asynchronous exceptions not be delivered ("blocked") for the duration of an I/O computation. The primitive - blockAsyncExceptions# :: IO a -> IO a + maskAsyncExceptions# :: IO a -> IO a is used for this purpose. During a blocked section, asynchronous exceptions may be unblocked again temporarily: - unblockAsyncExceptions# :: IO a -> IO a + unmaskAsyncExceptions# :: IO a -> IO a Furthermore, asynchronous exceptions are blocked automatically during the execution of an exception handler. Both of these primitives @@ -39,32 +39,33 @@ import ghczmprim_GHCziBool_True_closure; the threads waiting to deliver exceptions to that thread. NB. there's a bug in here. If a thread is inside an - unsafePerformIO, and inside blockAsyncExceptions# (there is an - unblockAsyncExceptions_ret on the stack), and it is blocked in an + unsafePerformIO, and inside maskAsyncExceptions# (there is an + unmaskAsyncExceptions_ret on the stack), and it is blocked in an interruptible operation, and it receives an exception, then the unsafePerformIO thunk will be updated with a stack object - containing the unblockAsyncExceptions_ret frame. Later, when + containing the unmaskAsyncExceptions_ret frame. Later, when someone else evaluates this thunk, the blocked exception state is not restored. -------------------------------------------------------------------------- */ -INFO_TABLE_RET( stg_unblockAsyncExceptionszh_ret, RET_SMALL ) + +INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL) { CInt r; - StgTSO_flags(CurrentTSO) = StgTSO_flags(CurrentTSO) & - ~(TSO_BLOCKEX::I32|TSO_INTERRUPTIBLE::I32); + StgTSO_flags(CurrentTSO) = %lobits32( + TO_W_(StgTSO_flags(CurrentTSO)) & ~(TSO_BLOCKEX|TSO_INTERRUPTIBLE)); /* Eagerly raise a blocked exception, if there is one */ if (StgTSO_blocked_exceptions(CurrentTSO) != END_TSO_QUEUE) { + + STK_CHK_GEN( WDS(2), R1_PTR, stg_unmaskAsyncExceptionszh_ret_info); /* * We have to be very careful here, as in killThread#, since * we are about to raise an async exception in the current * thread, which might result in the thread being killed. */ - - STK_CHK_GEN( WDS(2), R1_PTR, stg_unblockAsyncExceptionszh_ret_info); Sp_adj(-2); Sp(1) = R1; Sp(0) = stg_gc_unpt_r1_info; @@ -81,50 +82,108 @@ INFO_TABLE_RET( stg_unblockAsyncExceptionszh_ret, RET_SMALL ) jump %ENTRY_CODE(Sp(0)); } } + else { + /* + the thread might have been removed from the + blocked_exception list by someone else in the meantime. + Just restore the stack pointer and continue. + */ + Sp_adj(2); + } } Sp_adj(1); jump %ENTRY_CODE(Sp(0)); } -INFO_TABLE_RET( stg_blockAsyncExceptionszh_ret, RET_SMALL ) +INFO_TABLE_RET(stg_maskAsyncExceptionszh_ret, RET_SMALL) +{ + StgTSO_flags(CurrentTSO) = + %lobits32( + TO_W_(StgTSO_flags(CurrentTSO)) + | TSO_BLOCKEX | TSO_INTERRUPTIBLE + ); + + Sp_adj(1); + jump %ENTRY_CODE(Sp(0)); +} + +INFO_TABLE_RET(stg_maskUninterruptiblezh_ret, RET_SMALL) { StgTSO_flags(CurrentTSO) = - StgTSO_flags(CurrentTSO) | TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32; + %lobits32( + (TO_W_(StgTSO_flags(CurrentTSO)) + | TSO_BLOCKEX) + & ~TSO_INTERRUPTIBLE + ); Sp_adj(1); jump %ENTRY_CODE(Sp(0)); } -blockAsyncExceptionszh_fast +stg_maskAsyncExceptionszh { /* Args: R1 :: IO a */ - STK_CHK_GEN( WDS(2)/* worst case */, R1_PTR, blockAsyncExceptionszh_fast); + STK_CHK_GEN( WDS(1)/* worst case */, R1_PTR, stg_maskAsyncExceptionszh); if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) { - - StgTSO_flags(CurrentTSO) = - StgTSO_flags(CurrentTSO) | TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32; + /* avoid growing the stack unnecessarily */ + if (Sp(0) == stg_maskAsyncExceptionszh_ret_info) { + Sp_adj(1); + } else { + Sp_adj(-1); + Sp(0) = stg_unmaskAsyncExceptionszh_ret_info; + } + } else { + if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) == 0) { + Sp_adj(-1); + Sp(0) = stg_maskUninterruptiblezh_ret_info; + } + } - /* avoid growing the stack unnecessarily */ - if (Sp(0) == stg_blockAsyncExceptionszh_ret_info) { - Sp_adj(1); - } else { - Sp_adj(-1); - Sp(0) = stg_unblockAsyncExceptionszh_ret_info; - } + StgTSO_flags(CurrentTSO) = %lobits32( + TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX | TSO_INTERRUPTIBLE); + + TICK_UNKNOWN_CALL(); + TICK_SLOW_CALL_v(); + jump stg_ap_v_fast; +} + +stg_maskUninterruptiblezh +{ + /* Args: R1 :: IO a */ + STK_CHK_GEN( WDS(1)/* worst case */, R1_PTR, stg_maskAsyncExceptionszh); + + if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) { + /* avoid growing the stack unnecessarily */ + if (Sp(0) == stg_maskUninterruptiblezh_ret_info) { + Sp_adj(1); + } else { + Sp_adj(-1); + Sp(0) = stg_unmaskAsyncExceptionszh_ret_info; + } + } else { + if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0) { + Sp_adj(-1); + Sp(0) = stg_maskAsyncExceptionszh_ret_info; + } } + + StgTSO_flags(CurrentTSO) = %lobits32( + (TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX) & ~TSO_INTERRUPTIBLE); + TICK_UNKNOWN_CALL(); TICK_SLOW_CALL_v(); jump stg_ap_v_fast; } -unblockAsyncExceptionszh_fast +stg_unmaskAsyncExceptionszh { CInt r; + W_ level; /* Args: R1 :: IO a */ - STK_CHK_GEN( WDS(4), R1_PTR, unblockAsyncExceptionszh_fast); + STK_CHK_GEN( WDS(4), R1_PTR, stg_unmaskAsyncExceptionszh); /* 4 words: one for the unblock frame, 3 for setting up the * stack to call maybePerformBlockedException() below. */ @@ -132,17 +191,21 @@ unblockAsyncExceptionszh_fast /* If exceptions are already unblocked, there's nothing to do */ if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) { - StgTSO_flags(CurrentTSO) = StgTSO_flags(CurrentTSO) & - ~(TSO_BLOCKEX::I32|TSO_INTERRUPTIBLE::I32); - /* avoid growing the stack unnecessarily */ - if (Sp(0) == stg_unblockAsyncExceptionszh_ret_info) { + if (Sp(0) == stg_unmaskAsyncExceptionszh_ret_info) { Sp_adj(1); } else { Sp_adj(-1); - Sp(0) = stg_blockAsyncExceptionszh_ret_info; + if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0) { + Sp(0) = stg_maskAsyncExceptionszh_ret_info; + } else { + Sp(0) = stg_maskUninterruptiblezh_ret_info; + } } + StgTSO_flags(CurrentTSO) = %lobits32( + TO_W_(StgTSO_flags(CurrentTSO)) & ~(TSO_BLOCKEX|TSO_INTERRUPTIBLE)); + /* Eagerly raise a blocked exception, if there is one */ if (StgTSO_blocked_exceptions(CurrentTSO) != END_TSO_QUEUE) { /* @@ -185,17 +248,20 @@ unblockAsyncExceptionszh_fast jump stg_ap_v_fast; } -asyncExceptionsBlockedzh_fast + +stg_getMaskingStatezh { /* args: none */ - if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) { - RET_N(1); - } else { - RET_N(0); - } + /* + returns: 0 == unmasked, + 1 == masked, non-interruptible, + 2 == masked, interruptible + */ + RET_N(((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) + + ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0)); } -killThreadzh_fast +stg_killThreadzh { /* args: R1 = TSO to kill, R2 = Exception */ @@ -207,7 +273,9 @@ killThreadzh_fast exception = R2; /* Needs 3 words because throwToSingleThreaded uses some stack */ - STK_CHK_GEN( WDS(3), R1_PTR & R2_PTR, killThreadzh_fast); + STK_CHK_GEN( WDS(3), R1_PTR & R2_PTR, stg_killThreadzh); + /* We call allocate in throwTo(), so better check for GC */ + MAYBE_GC(R1_PTR & R2_PTR, stg_killThreadzh); /* * We might have killed ourselves. In which case, better be *very* @@ -215,11 +283,6 @@ killThreadzh_fast * If the exception went to a catch frame, we'll just continue from * the handler. */ - loop: - if (StgTSO_what_next(target) == ThreadRelocated::I16) { - target = StgTSO__link(target); - goto loop; - } if (target == CurrentTSO) { /* * So what should happen if a thread calls "throwTo self" inside @@ -227,7 +290,7 @@ killThreadzh_fast * thread? Presumably it should behave as if throwTo just returned, * and then continue from there. See #3279, #3288. This is what * happens: on resumption, we will just jump to the next frame on - * the stack, which is the return point for killThreadzh_fast. + * the stack, which is the return point for stg_killThreadzh. */ SAVE_THREAD_STATE(); /* ToDo: what if the current thread is blocking exceptions? */ @@ -242,27 +305,22 @@ killThreadzh_fast } } else { W_ out; - W_ retcode; + W_ msg; out = Sp - WDS(1); /* ok to re-use stack space here */ - (retcode) = foreign "C" throwTo(MyCapability() "ptr", - CurrentTSO "ptr", - target "ptr", - exception "ptr", - out "ptr") [R1,R2]; + (msg) = foreign "C" throwTo(MyCapability() "ptr", + CurrentTSO "ptr", + target "ptr", + exception "ptr") [R1,R2]; - switch [THROWTO_SUCCESS .. THROWTO_BLOCKED] (retcode) { - - case THROWTO_SUCCESS: { + if (msg == NULL) { jump %ENTRY_CODE(Sp(0)); - } - - case THROWTO_BLOCKED: { - R3 = W_[out]; - // we must block, and call throwToReleaseTarget() before returning + } else { + StgTSO_why_blocked(CurrentTSO) = BlockedOnMsgThrowTo; + StgTSO_block_info(CurrentTSO) = msg; + // we must block, and unlock the message before returning jump stg_block_throwto; } - } } } @@ -301,20 +359,21 @@ INFO_TABLE(stg_catch,2,0,FUN,"catch","catch") { R2 = StgClosure_payload(R1,1); /* h */ R1 = StgClosure_payload(R1,0); /* x */ - jump catchzh_fast; + jump stg_catchzh; } -catchzh_fast +stg_catchzh { /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */ - STK_CHK_GEN(SIZEOF_StgCatchFrame + WDS(1), R1_PTR & R2_PTR, catchzh_fast); + STK_CHK_GEN(SIZEOF_StgCatchFrame + WDS(1), R1_PTR & R2_PTR, stg_catchzh); /* Set up the catch frame */ Sp = Sp - SIZEOF_StgCatchFrame; SET_HDR(Sp,stg_catch_frame_info,W_[CCCS]); StgCatchFrame_handler(Sp) = R2; - StgCatchFrame_exceptions_blocked(Sp) = TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX; + StgCatchFrame_exceptions_blocked(Sp) = + TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE); TICK_CATCHF_PUSHED(); /* Apply R1 to the realworld token */ @@ -330,13 +389,13 @@ catchzh_fast * * raise = {err} \n {} -> raise#{err} * - * It is used in raisezh_fast to update thunks on the update list + * It is used in stg_raisezh to update thunks on the update list * -------------------------------------------------------------------------- */ INFO_TABLE(stg_raise,1,0,THUNK_1_0,"raise","raise") { R1 = StgThunk_payload(R1,0); - jump raisezh_fast; + jump stg_raisezh; } section "data" { @@ -348,10 +407,10 @@ INFO_TABLE_RET(stg_raise_ret, RET_SMALL, P_ arg1) R1 = Sp(1); Sp = Sp + WDS(2); W_[no_break_on_exception] = 1; - jump raisezh_fast; + jump stg_raisezh; } -raisezh_fast +stg_raisezh { W_ handler; W_ frame_type; @@ -372,9 +431,9 @@ raisezh_fast #endif retry_pop_stack: - StgTSO_sp(CurrentTSO) = Sp; + SAVE_THREAD_STATE(); (frame_type) = foreign "C" raiseExceptionHelper(BaseReg "ptr", CurrentTSO "ptr", exception "ptr") []; - Sp = StgTSO_sp(CurrentTSO); + LOAD_THREAD_STATE(); if (frame_type == ATOMICALLY_FRAME) { /* The exception has reached the edge of a memory transaction. Check that * the transaction is valid. If not then perhaps the exception should @@ -388,7 +447,7 @@ retry_pop_stack: W_ r; trec = StgTSO_trec(CurrentTSO); (r) = foreign "C" stmValidateNestOfTransactions(trec "ptr") []; - ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") []; + outer = StgTRecHeader_enclosing_trec(trec); foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") []; foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") []; @@ -428,15 +487,14 @@ retry_pop_stack: // deadlock if an exception is raised in InteractiveUI, // for exmplae. Perhaps the stop_on_exception flag should // be per-thread. - W_[rts_stop_on_exception] = 0; + CInt[rts_stop_on_exception] = 0; ("ptr" ioAction) = foreign "C" deRefStablePtr (W_[rts_breakpoint_io_action] "ptr") []; - Sp = Sp - WDS(7); - Sp(6) = exception; - Sp(5) = stg_raise_ret_info; - Sp(4) = stg_noforceIO_info; // required for unregisterised + Sp = Sp - WDS(6); + Sp(5) = exception; + Sp(4) = stg_raise_ret_info; Sp(3) = exception; // the AP_STACK - Sp(2) = ghczmprim_GHCziBool_True_closure; // dummy breakpoint info - Sp(1) = ghczmprim_GHCziBool_True_closure; // True <=> a breakpoint + Sp(2) = ghczmprim_GHCziTypes_True_closure; // dummy breakpoint info + Sp(1) = ghczmprim_GHCziTypes_True_closure; // True <=> a breakpoint R1 = ioAction; jump RET_LBL(stg_ap_pppv); } @@ -448,8 +506,10 @@ retry_pop_stack: * We will leave the stack in a GC'able state, see the stg_stop_thread * entry code in StgStartup.cmm. */ - Sp = CurrentTSO + TSO_OFFSET_StgTSO_stack - + WDS(TO_W_(StgTSO_stack_size(CurrentTSO))) - WDS(2); + W_ stack; + stack = StgTSO_stackobj(CurrentTSO); + Sp = stack + OFFSET_StgStack_stack + + WDS(TO_W_(StgStack_stack_size(stack))) - WDS(2); Sp(1) = exception; /* save the exception */ Sp(0) = stg_enter_info; /* so that GC can traverse this stack */ StgTSO_what_next(CurrentTSO) = ThreadKilled::I16; @@ -472,7 +532,7 @@ retry_pop_stack: * * If exceptions were unblocked, arrange that they are unblocked * again after executing the handler by pushing an - * unblockAsyncExceptions_ret stack frame. + * unmaskAsyncExceptions_ret stack frame. * * If we've reached an STM catch frame then roll back the nested * transaction we were using. @@ -481,14 +541,14 @@ retry_pop_stack: frame = Sp; if (frame_type == CATCH_FRAME) { Sp = Sp + SIZEOF_StgCatchFrame; - if (StgCatchFrame_exceptions_blocked(frame) == 0) { - Sp_adj(-1); - Sp(0) = stg_unblockAsyncExceptionszh_ret_info; + if ((StgCatchFrame_exceptions_blocked(frame) & TSO_BLOCKEX) == 0) { + Sp_adj(-1); + Sp(0) = stg_unmaskAsyncExceptionszh_ret_info; } } else { W_ trec, outer; trec = StgTSO_trec(CurrentTSO); - ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") []; + outer = StgTRecHeader_enclosing_trec(trec); foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") []; foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") []; StgTSO_trec(CurrentTSO) = outer; @@ -496,9 +556,18 @@ retry_pop_stack: } /* Ensure that async excpetions are blocked when running the handler. + * The interruptible state is inherited from the context of the + * catch frame. */ - StgTSO_flags(CurrentTSO) = - StgTSO_flags(CurrentTSO) | TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32; + StgTSO_flags(CurrentTSO) = %lobits32( + TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX); + if ((StgCatchFrame_exceptions_blocked(frame) & TSO_INTERRUPTIBLE) == 0) { + StgTSO_flags(CurrentTSO) = %lobits32( + TO_W_(StgTSO_flags(CurrentTSO)) & ~TSO_INTERRUPTIBLE); + } else { + StgTSO_flags(CurrentTSO) = %lobits32( + TO_W_(StgTSO_flags(CurrentTSO)) | TSO_INTERRUPTIBLE); + } /* Call the handler, passing the exception value and a realworld * token as arguments. @@ -512,8 +581,8 @@ retry_pop_stack: jump RET_LBL(stg_ap_pv); } -raiseIOzh_fast +stg_raiseIOzh { /* Args :: R1 :: Exception */ - jump raisezh_fast; + jump stg_raisezh; }