import pthread_mutex_lock;
import pthread_mutex_unlock;
#endif
-import base_GHCziIOBase_NestedAtomically_closure;
+import base_ControlziExceptionziBase_nestedAtomically_closure;
import EnterCriticalSection;
import LeaveCriticalSection;
RET_N(r);
}
+threadStatuszh_fast
+{
+ /* args: R1 :: ThreadId# */
+ W_ tso;
+ W_ why_blocked;
+ W_ what_next;
+ W_ ret;
+
+ tso = R1;
+ loop:
+ if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
+ tso = StgTSO__link(tso);
+ goto loop;
+ }
+
+ what_next = TO_W_(StgTSO_what_next(tso));
+ why_blocked = TO_W_(StgTSO_why_blocked(tso));
+ // Note: these two reads are not atomic, so they might end up
+ // being inconsistent. It doesn't matter, since we
+ // only return one or the other. If we wanted to return the
+ // contents of block_info too, then we'd have to do some synchronisation.
+
+ if (what_next == ThreadComplete) {
+ ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
+ } else {
+ if (what_next == ThreadKilled) {
+ ret = 17;
+ } else {
+ ret = why_blocked;
+ }
+ }
+ RET_N(ret);
+}
/* -----------------------------------------------------------------------------
* TVar primitives
* -------------------------------------------------------------------------- */
-#ifdef REG_R1
#define SP_OFF 0
-#define IF_NOT_REG_R1(x)
-#else
-#define SP_OFF 1
-#define IF_NOT_REG_R1(x) x
-#endif
// Catch retry frame ------------------------------------------------------------
W_ unused3, "ptr" W_ unused4, "ptr" W_ unused5)
{
W_ r, frame, trec, outer;
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
/* Succeeded (either first branch or second branch) */
StgTSO_trec(CurrentTSO) = outer;
Sp = Sp + SIZEOF_StgCatchRetryFrame;
- IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
jump %ENTRY_CODE(Sp(SP_OFF));
} else {
/* Did not commit: re-execute */
"ptr" W_ unused3, "ptr" W_ unused4)
{
W_ frame, trec, valid, next_invariant, q, outer;
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
/* Transaction was valid: commit succeeded */
StgTSO_trec(CurrentTSO) = NO_TREC;
Sp = Sp + SIZEOF_StgAtomicallyFrame;
- IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
jump %ENTRY_CODE(Sp(SP_OFF));
} else {
/* Transaction was not valid: try again */
"ptr" W_ unused3, "ptr" W_ unused4)
{
W_ frame, trec, valid;
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
frame = Sp;
(valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
if (valid != 0) {
/* Previous attempt is still valid: no point trying again yet */
- IF_NOT_REG_R1(Sp_adj(-2);
- Sp(1) = stg_NO_FINALIZER_closure;
- Sp(0) = stg_ut_1_0_unreg_info;)
jump stg_block_noregs;
} else {
/* Previous attempt is no longer valid: try again */
// STM catch frame --------------------------------------------------------------
-#ifdef REG_R1
#define SP_OFF 0
-#else
-#define SP_OFF 1
-#endif
/* Catch frames are very similar to update frames, but when entering
* one we just pop the frame off the stack and perform the correct
#endif
"ptr" W_ unused3, "ptr" W_ unused4)
{
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
W_ r, frame, trec, outer;
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
/* Commit succeeded */
StgTSO_trec(CurrentTSO) = outer;
Sp = Sp + SIZEOF_StgCatchSTMFrame;
- IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
jump Sp(SP_OFF);
} else {
/* Commit failed */
/* Nested transactions are not allowed; raise an exception */
if (old_trec != NO_TREC) {
- R1 = base_GHCziIOBase_NestedAtomically_closure;
+ R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
jump raisezh_fast;
}
StgHeader_info(frame) = stg_atomically_waiting_frame_info;
Sp = frame;
// Fix up the stack in the unregisterised case: the return convention is different.
- IF_NOT_REG_R1(Sp_adj(-2);
- Sp(1) = stg_NO_FINALIZER_closure;
- Sp(0) = stg_ut_1_0_unreg_info;)
R3 = trec; // passing to stmWaitUnblock()
jump stg_block_stmwait;
} else {
}
-/* If R1 isn't available, pass it on the stack */
-#ifdef REG_R1
#define PerformTake(tso, value) \
W_[StgTSO_sp(tso) + WDS(1)] = value; \
W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
-#else
-#define PerformTake(tso, value) \
- W_[StgTSO_sp(tso) + WDS(1)] = value; \
- W_[StgTSO_sp(tso) + WDS(0)] = stg_ut_1_0_unreg_info;
-#endif
#define PerformPut(tso,lval) \
StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
#endif
if (info == stg_MVAR_CLEAN_info) {
- foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
}
/* If the MVar is empty, put ourselves on its blocking queue,
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
- CurrentTSO);
+ foreign "C" setTSOLink(MyCapability() "ptr",
+ StgMVar_tail(mvar) "ptr",
+ CurrentTSO) [];
}
StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
+ R1 = mvar;
jump stg_block_takemvar;
}
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
- foreign "C" dirty_TSO(MyCapability(), tso);
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
}
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- ("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar),mvar) [];
- StgMVar_head(mvar) = tso;
-#else
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
- foreign "C" dirty_TSO(MyCapability(), tso);
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
}
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- ("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar) "ptr", mvar "ptr") [];
- StgMVar_head(mvar) = tso;
-#else
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
putMVarzh_fast
{
- W_ mvar, info, tso;
+ W_ mvar, val, info, tso;
/* args: R1 = MVar, R2 = value */
mvar = R1;
+ val = R2;
#if defined(THREADED_RTS)
- ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
+ ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
#else
info = GET_INFO(mvar);
#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
- CurrentTSO);
+ foreign "C" setTSOLink(MyCapability() "ptr",
+ StgMVar_tail(mvar) "ptr",
+ CurrentTSO) [];
}
StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
+ R1 = mvar;
+ R2 = val;
jump stg_block_putmvar;
}
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
- PerformTake(tso, R2);
- if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
- foreign "C" dirty_TSO(MyCapability(), tso);
+ PerformTake(tso, val);
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
}
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
- StgMVar_head(mvar) = tso;
-#else
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
else
{
/* No further takes, the MVar is now full. */
- StgMVar_value(mvar) = R2;
+ StgMVar_value(mvar) = val;
#if defined(THREADED_RTS)
unlockClosure(mvar, stg_MVAR_DIRTY_info);
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
- if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
- foreign "C" dirty_TSO(MyCapability(), tso);
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
}
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
- StgMVar_head(mvar) = tso;
-#else
("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
W_[blocked_queue_hd] = tso; \
} else { \
- foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl], tso); \
+ foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
} \
W_[blocked_queue_tl] = tso;
if (prev == NULL) {
W_[sleeping_queue] = CurrentTSO;
} else {
- foreign "C" setTSOLink(MyCapability() "ptr", prev, CurrentTSO) [];
+ foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
}
jump stg_block_noregs;
#endif