X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FPrimOps.cmm;h=c666a4207b2a17e20cf5629ba5dc39058115d27a;hb=6cd3d0dc3021fb705fa7603f24afcb19b49f8b3b;hp=e8b216fa5cab0a7c1c5e102ebf5e2c4ef02fd870;hpb=5f923aab146d892f019ff1c1627edf20378a3aac;p=ghc-hetmet.git diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm index e8b216f..c666a42 100644 --- a/rts/PrimOps.cmm +++ b/rts/PrimOps.cmm @@ -49,7 +49,7 @@ import __gmpz_com; import pthread_mutex_lock; import pthread_mutex_unlock; #endif -import base_GHCziIOBase_NestedAtomically_closure; +import base_ControlziExceptionziBase_nestedAtomically_closure; import EnterCriticalSection; import LeaveCriticalSection; @@ -193,7 +193,7 @@ newMutVarzh_fast atomicModifyMutVarzh_fast { - W_ mv, z, x, y, r; + W_ mv, f, z, x, y, r, h; /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */ /* If x is the current contents of the MutVar#, then @@ -232,19 +232,15 @@ atomicModifyMutVarzh_fast HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, atomicModifyMutVarzh_fast); -#if defined(THREADED_RTS) - ACQUIRE_LOCK(atomic_modify_mutvar_mutex "ptr") [R1,R2]; -#endif - - x = StgMutVar_var(R1); + mv = R1; + f = R2; TICK_ALLOC_THUNK_2(); CCCS_ALLOC(THUNK_2_SIZE); z = Hp - THUNK_2_SIZE + WDS(1); SET_HDR(z, stg_ap_2_upd_info, W_[CCCS]); LDV_RECORD_CREATE(z); - StgThunk_payload(z,0) = R2; - StgThunk_payload(z,1) = x; + StgThunk_payload(z,0) = f; TICK_ALLOC_THUNK_1(); CCCS_ALLOC(THUNK_1_SIZE); @@ -253,9 +249,6 @@ atomicModifyMutVarzh_fast LDV_RECORD_CREATE(y); StgThunk_payload(y,0) = z; - StgMutVar_var(R1) = y; - foreign "C" dirty_MUT_VAR(BaseReg "ptr", R1 "ptr") [R1]; - TICK_ALLOC_THUNK_1(); CCCS_ALLOC(THUNK_1_SIZE); r = y - THUNK_1_SIZE; @@ -263,10 +256,20 @@ atomicModifyMutVarzh_fast LDV_RECORD_CREATE(r); StgThunk_payload(r,0) = z; -#if defined(THREADED_RTS) - RELEASE_LOCK(atomic_modify_mutvar_mutex "ptr") []; + retry: + x = StgMutVar_var(mv); + StgThunk_payload(z,1) = x; +#ifdef THREADED_RTS + (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y) []; + if (h != x) { goto retry; } +#else + StgMutVar_var(mv) = y; #endif + if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) { + foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") []; + } + RET_P(r); } @@ -970,7 +973,7 @@ forkzh_fast foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") []; // switch at the earliest opportunity - CInt[context_switch] = 1 :: CInt; + Capability_context_switch(MyCapability()) = 1 :: CInt; RET_P(threadid); } @@ -999,7 +1002,7 @@ forkOnzh_fast foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") []; // switch at the earliest opportunity - CInt[context_switch] = 1 :: CInt; + Capability_context_switch(MyCapability()) = 1 :: CInt; RET_P(threadid); } @@ -1034,18 +1037,45 @@ isCurrentThreadBoundzh_fast RET_N(r); } +threadStatuszh_fast +{ + /* args: R1 :: ThreadId# */ + W_ tso; + W_ why_blocked; + W_ what_next; + W_ ret; + + tso = R1; + loop: + if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) { + tso = StgTSO__link(tso); + goto loop; + } + + what_next = TO_W_(StgTSO_what_next(tso)); + why_blocked = TO_W_(StgTSO_why_blocked(tso)); + // Note: these two reads are not atomic, so they might end up + // being inconsistent. It doesn't matter, since we + // only return one or the other. If we wanted to return the + // contents of block_info too, then we'd have to do some synchronisation. + + if (what_next == ThreadComplete) { + ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus + } else { + if (what_next == ThreadKilled) { + ret = 17; + } else { + ret = why_blocked; + } + } + RET_N(ret); +} /* ----------------------------------------------------------------------------- * TVar primitives * -------------------------------------------------------------------------- */ -#ifdef REG_R1 #define SP_OFF 0 -#define IF_NOT_REG_R1(x) -#else -#define SP_OFF 1 -#define IF_NOT_REG_R1(x) x -#endif // Catch retry frame ------------------------------------------------------------ @@ -1056,7 +1086,6 @@ INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME, W_ unused3, "ptr" W_ unused4, "ptr" W_ unused5) { W_ r, frame, trec, outer; - IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); ) frame = Sp; trec = StgTSO_trec(CurrentTSO); @@ -1066,7 +1095,6 @@ INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME, /* Succeeded (either first branch or second branch) */ StgTSO_trec(CurrentTSO) = outer; Sp = Sp + SIZEOF_StgCatchRetryFrame; - IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;) jump %ENTRY_CODE(Sp(SP_OFF)); } else { /* Did not commit: re-execute */ @@ -1092,7 +1120,6 @@ INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME, "ptr" W_ unused3, "ptr" W_ unused4) { W_ frame, trec, valid, next_invariant, q, outer; - IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); ) frame = Sp; trec = StgTSO_trec(CurrentTSO); @@ -1136,7 +1163,6 @@ INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME, /* Transaction was valid: commit succeeded */ StgTSO_trec(CurrentTSO) = NO_TREC; Sp = Sp + SIZEOF_StgAtomicallyFrame; - IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;) jump %ENTRY_CODE(Sp(SP_OFF)); } else { /* Transaction was not valid: try again */ @@ -1156,7 +1182,6 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME, "ptr" W_ unused3, "ptr" W_ unused4) { W_ frame, trec, valid; - IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); ) frame = Sp; @@ -1164,9 +1189,6 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME, (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") []; if (valid != 0) { /* Previous attempt is still valid: no point trying again yet */ - IF_NOT_REG_R1(Sp_adj(-2); - Sp(1) = stg_NO_FINALIZER_closure; - Sp(0) = stg_ut_1_0_unreg_info;) jump stg_block_noregs; } else { /* Previous attempt is no longer valid: try again */ @@ -1180,11 +1202,7 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME, // STM catch frame -------------------------------------------------------------- -#ifdef REG_R1 #define SP_OFF 0 -#else -#define SP_OFF 1 -#endif /* Catch frames are very similar to update frames, but when entering * one we just pop the frame off the stack and perform the correct @@ -1197,7 +1215,6 @@ INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME, #endif "ptr" W_ unused3, "ptr" W_ unused4) { - IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); ) W_ r, frame, trec, outer; frame = Sp; trec = StgTSO_trec(CurrentTSO); @@ -1207,7 +1224,6 @@ INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME, /* Commit succeeded */ StgTSO_trec(CurrentTSO) = outer; Sp = Sp + SIZEOF_StgCatchSTMFrame; - IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;) jump Sp(SP_OFF); } else { /* Commit failed */ @@ -1238,7 +1254,7 @@ atomicallyzh_fast /* Nested transactions are not allowed; raise an exception */ if (old_trec != NO_TREC) { - R1 = base_GHCziIOBase_NestedAtomically_closure; + R1 = base_ControlziExceptionziBase_nestedAtomically_closure; jump raisezh_fast; } @@ -1379,9 +1395,6 @@ retry_pop_stack: StgHeader_info(frame) = stg_atomically_waiting_frame_info; Sp = frame; // Fix up the stack in the unregisterised case: the return convention is different. - IF_NOT_REG_R1(Sp_adj(-2); - Sp(1) = stg_NO_FINALIZER_closure; - Sp(0) = stg_ut_1_0_unreg_info;) R3 = trec; // passing to stmWaitUnblock() jump stg_block_stmwait; } else { @@ -1522,16 +1535,9 @@ newMVarzh_fast } -/* If R1 isn't available, pass it on the stack */ -#ifdef REG_R1 #define PerformTake(tso, value) \ W_[StgTSO_sp(tso) + WDS(1)] = value; \ W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info; -#else -#define PerformTake(tso, value) \ - W_[StgTSO_sp(tso) + WDS(1)] = value; \ - W_[StgTSO_sp(tso) + WDS(0)] = stg_ut_1_0_unreg_info; -#endif #define PerformPut(tso,lval) \ StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \ @@ -1740,7 +1746,7 @@ putMVarzh_fast /* actually perform the takeMVar */ tso = StgMVar_head(mvar); - PerformTake(tso, R2); + PerformTake(tso, val); if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) { foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") []; } @@ -1763,7 +1769,7 @@ putMVarzh_fast else { /* No further takes, the MVar is now full. */ - StgMVar_value(mvar) = R2; + StgMVar_value(mvar) = val; #if defined(THREADED_RTS) unlockClosure(mvar, stg_MVAR_DIRTY_info);