#include "Cmm.h"
+#ifdef __PIC__
+#ifndef mingw32_HOST_OS
+import __gmpz_init;
+import __gmpz_add;
+import __gmpz_sub;
+import __gmpz_mul;
+import __gmpz_gcd;
+import __gmpn_gcd_1;
+import __gmpn_cmp;
+import __gmpz_tdiv_q;
+import __gmpz_tdiv_r;
+import __gmpz_tdiv_qr;
+import __gmpz_fdiv_qr;
+import __gmpz_divexact;
+import __gmpz_and;
+import __gmpz_xor;
+import __gmpz_ior;
+import __gmpz_com;
+#endif
+import pthread_mutex_lock;
+import pthread_mutex_unlock;
+#endif
+import base_ControlziExceptionziBase_nestedAtomically_closure;
+import EnterCriticalSection;
+import LeaveCriticalSection;
+
/*-----------------------------------------------------------------------------
Array Primitives
n = R1;
payload_words = ROUNDUP_BYTES_TO_WDS(n);
words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
- "ptr" p = foreign "C" allocateLocal(MyCapability() "ptr",words) [];
+ ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr",words) [];
TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
StgArrWords_words(p) = payload_words;
words = words + 1;
}
- "ptr" p = foreign "C" allocatePinned(words) [];
+ ("ptr" p) = foreign "C" allocatePinned(words) [];
TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
// Again, if the ArrWords header isn't a multiple of 8 bytes, we
MAYBE_GC(R2_PTR,newArrayzh_fast);
words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + n;
- "ptr" arr = foreign "C" allocateLocal(MyCapability() "ptr",words) [R2];
+ ("ptr" arr) = foreign "C" allocateLocal(MyCapability() "ptr",words) [R2];
TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, atomicModifyMutVarzh_fast);
#if defined(THREADED_RTS)
- foreign "C" ACQUIRE_LOCK(atomic_modify_mutvar_mutex "ptr") [R1,R2];
+ ACQUIRE_LOCK(atomic_modify_mutvar_mutex "ptr") [R1,R2];
#endif
x = StgMutVar_var(R1);
StgThunk_payload(r,0) = z;
#if defined(THREADED_RTS)
- foreign "C" RELEASE_LOCK(atomic_modify_mutvar_mutex "ptr") [];
+ RELEASE_LOCK(atomic_modify_mutvar_mutex "ptr") [];
#endif
RET_P(r);
hi = TO_W_(val >> 32);
lo = TO_W_(val);
- if ( hi != 0 && hi != 0xFFFFFFFF ) {
- words_needed = 2;
- } else {
+ if ( hi == 0 || (hi == 0xFFFFFFFF && lo != 0) ) {
// minimum is one word
words_needed = 1;
+ } else {
+ words_needed = 2;
}
ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
FETCH_MP_TEMP(mp_tmp_w);
W_[mp_tmp_w] = R1;
- r = foreign "C" __gmpn_gcd_1(mp_tmp_w "ptr", 1, R2) [];
+ (r) = foreign "C" __gmpn_gcd_1(mp_tmp_w "ptr", 1, R2) [];
R1 = r;
/* Result parked in R1, return via info-pointer at TOS */
gcdIntegerIntzh_fast
{
/* R1 = s1; R2 = d1; R3 = the int */
- R1 = foreign "C" __gmpn_gcd_1( BYTE_ARR_CTS(R2) "ptr", R1, R3) [];
+ W_ s1;
+ (s1) = foreign "C" __gmpn_gcd_1( BYTE_ARR_CTS(R2) "ptr", R1, R3) [];
+ R1 = s1;
/* Result parked in R1, return via info-pointer at TOS */
jump %ENTRY_CODE(Sp(0));
up = BYTE_ARR_CTS(R2);
vp = BYTE_ARR_CTS(R4);
- cmp = foreign "C" __gmpn_cmp(up "ptr", vp "ptr", size) [];
+ (cmp) = foreign "C" __gmpn_cmp(up "ptr", vp "ptr", size) [];
if (cmp == 0 :: CInt) {
R1 = 0;
RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
}
+decodeFloatzuIntzh_fast
+{
+ W_ p;
+ F_ arg;
+ FETCH_MP_TEMP(mp_tmp1);
+ FETCH_MP_TEMP(mp_tmp_w);
+
+ /* arguments: F1 = Float# */
+ arg = F1;
+
+ /* Perform the operation */
+ foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
+
+ /* returns: (Int# (mantissa), Int# (exponent)) */
+ RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
+}
+
#define DOUBLE_MANTISSA_SIZE SIZEOF_DOUBLE
#define ARR_SIZE (SIZEOF_StgArrWords + DOUBLE_MANTISSA_SIZE)
RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
}
+decodeDoublezu2Intzh_fast
+{
+ D_ arg;
+ W_ p;
+ FETCH_MP_TEMP(mp_tmp1);
+ FETCH_MP_TEMP(mp_tmp2);
+ FETCH_MP_TEMP(mp_result1);
+ FETCH_MP_TEMP(mp_result2);
+
+ /* arguments: D1 = Double# */
+ arg = D1;
+
+ /* Perform the operation */
+ foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
+ mp_result1 "ptr", mp_result2 "ptr",
+ arg) [];
+
+ /* returns:
+ (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
+ RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
+}
+
/* -----------------------------------------------------------------------------
* Concurrency primitives
* -------------------------------------------------------------------------- */
W_ threadid;
closure = R1;
- "ptr" threadid = foreign "C" createIOThread( MyCapability() "ptr",
+ ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
RtsFlags_GcFlags_initialStkSize(RtsFlags),
closure "ptr") [];
+
+ /* start blocked if the current thread is blocked */
+ StgTSO_flags(threadid) =
+ StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
+ (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
+
foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
// switch at the earliest opportunity
cpu = R1;
closure = R2;
- "ptr" threadid = foreign "C" createIOThread( MyCapability() "ptr",
+ ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
RtsFlags_GcFlags_initialStkSize(RtsFlags),
closure "ptr") [];
+
+ /* start blocked if the current thread is blocked */
+ StgTSO_flags(threadid) =
+ StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
+ (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
+
foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
// switch at the earliest opportunity
{
/* no args */
W_ r;
- r = foreign "C" isThreadBound(CurrentTSO) [];
+ (r) = foreign "C" isThreadBound(CurrentTSO) [];
RET_N(r);
}
+threadStatuszh_fast
+{
+ /* args: R1 :: ThreadId# */
+ W_ tso;
+ W_ why_blocked;
+ W_ what_next;
+ W_ ret;
+
+ tso = R1;
+ loop:
+ if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
+ tso = StgTSO__link(tso);
+ goto loop;
+ }
+
+ what_next = TO_W_(StgTSO_what_next(tso));
+ why_blocked = TO_W_(StgTSO_why_blocked(tso));
+ // Note: these two reads are not atomic, so they might end up
+ // being inconsistent. It doesn't matter, since we
+ // only return one or the other. If we wanted to return the
+ // contents of block_info too, then we'd have to do some synchronisation.
+
+ if (what_next == ThreadComplete) {
+ ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
+ } else {
+ if (what_next == ThreadKilled) {
+ ret = 17;
+ } else {
+ ret = why_blocked;
+ }
+ }
+ RET_N(ret);
+}
/* -----------------------------------------------------------------------------
* TVar primitives
* -------------------------------------------------------------------------- */
-#ifdef REG_R1
#define SP_OFF 0
-#define IF_NOT_REG_R1(x)
-#else
-#define SP_OFF 1
-#define IF_NOT_REG_R1(x) x
-#endif
// Catch retry frame ------------------------------------------------------------
+INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
#if defined(PROFILING)
-#define CATCH_RETRY_FRAME_BITMAP 7
-#define CATCH_RETRY_FRAME_WORDS 5
-#else
-#define CATCH_RETRY_FRAME_BITMAP 1
-#define CATCH_RETRY_FRAME_WORDS 3
+ W_ unused1, W_ unused2,
#endif
-
-INFO_TABLE_RET(stg_catch_retry_frame,
- CATCH_RETRY_FRAME_WORDS, CATCH_RETRY_FRAME_BITMAP,
- CATCH_RETRY_FRAME)
+ W_ unused3, "ptr" W_ unused4, "ptr" W_ unused5)
{
W_ r, frame, trec, outer;
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
- "ptr" outer = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
- r = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
+ ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+ (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
if (r != 0) {
/* Succeeded (either first branch or second branch) */
StgTSO_trec(CurrentTSO) = outer;
Sp = Sp + SIZEOF_StgCatchRetryFrame;
- IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
jump %ENTRY_CODE(Sp(SP_OFF));
} else {
/* Did not commit: re-execute */
W_ new_trec;
- "ptr" new_trec = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
StgTSO_trec(CurrentTSO) = new_trec;
if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
R1 = StgCatchRetryFrame_alt_code(frame);
// Atomically frame ------------------------------------------------------------
+INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
#if defined(PROFILING)
-#define ATOMICALLY_FRAME_BITMAP 3
-#define ATOMICALLY_FRAME_WORDS 4
-#else
-#define ATOMICALLY_FRAME_BITMAP 0
-#define ATOMICALLY_FRAME_WORDS 2
+ W_ unused1, W_ unused2,
#endif
-
-INFO_TABLE_RET(stg_atomically_frame,
- ATOMICALLY_FRAME_WORDS, ATOMICALLY_FRAME_BITMAP,
- ATOMICALLY_FRAME)
+ "ptr" W_ unused3, "ptr" W_ unused4)
{
W_ frame, trec, valid, next_invariant, q, outer;
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
- "ptr" outer = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+ ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
if (outer == NO_TREC) {
/* First time back at the atomically frame -- pick up invariants */
- "ptr" q = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
+ ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
StgAtomicallyFrame_next_invariant_to_check(frame) = q;
} else {
if (q != END_INVARIANT_CHECK_QUEUE) {
/* We can't commit yet: another invariant to check */
- "ptr" trec = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
+ ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
StgTSO_trec(CurrentTSO) = trec;
next_invariant = StgInvariantCheckQueue_invariant(q);
} else {
/* We've got no more invariants to check, try to commit */
- valid = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
+ (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
if (valid != 0) {
/* Transaction was valid: commit succeeded */
StgTSO_trec(CurrentTSO) = NO_TREC;
Sp = Sp + SIZEOF_StgAtomicallyFrame;
- IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
jump %ENTRY_CODE(Sp(SP_OFF));
} else {
/* Transaction was not valid: try again */
- "ptr" trec = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
+ ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
StgTSO_trec(CurrentTSO) = trec;
StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
R1 = StgAtomicallyFrame_code(frame);
}
}
-INFO_TABLE_RET(stg_atomically_waiting_frame,
- ATOMICALLY_FRAME_WORDS, ATOMICALLY_FRAME_BITMAP,
- ATOMICALLY_FRAME)
+INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
+#if defined(PROFILING)
+ W_ unused1, W_ unused2,
+#endif
+ "ptr" W_ unused3, "ptr" W_ unused4)
{
W_ frame, trec, valid;
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
frame = Sp;
/* The TSO is currently waiting: should we stop waiting? */
- valid = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
+ (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
if (valid != 0) {
/* Previous attempt is still valid: no point trying again yet */
- IF_NOT_REG_R1(Sp_adj(-2);
- Sp(1) = stg_NO_FINALIZER_closure;
- Sp(0) = stg_ut_1_0_unreg_info;)
jump stg_block_noregs;
} else {
/* Previous attempt is no longer valid: try again */
- "ptr" trec = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
+ ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
StgTSO_trec(CurrentTSO) = trec;
StgHeader_info(frame) = stg_atomically_frame_info;
R1 = StgAtomicallyFrame_code(frame);
// STM catch frame --------------------------------------------------------------
-#ifdef REG_R1
#define SP_OFF 0
-#else
-#define SP_OFF 1
-#endif
-
-#if defined(PROFILING)
-#define CATCH_STM_FRAME_BITMAP 3
-#define CATCH_STM_FRAME_WORDS 4
-#else
-#define CATCH_STM_FRAME_BITMAP 0
-#define CATCH_STM_FRAME_WORDS 2
-#endif
/* Catch frames are very similar to update frames, but when entering
* one we just pop the frame off the stack and perform the correct
* kind of return to the activation record underneath us on the stack.
*/
-INFO_TABLE_RET(stg_catch_stm_frame,
- CATCH_STM_FRAME_WORDS, CATCH_STM_FRAME_BITMAP,
- CATCH_STM_FRAME)
+INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
+#if defined(PROFILING)
+ W_ unused1, W_ unused2,
+#endif
+ "ptr" W_ unused3, "ptr" W_ unused4)
{
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
W_ r, frame, trec, outer;
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
- "ptr" outer = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
- r = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
+ ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+ (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
if (r != 0) {
/* Commit succeeded */
StgTSO_trec(CurrentTSO) = outer;
Sp = Sp + SIZEOF_StgCatchSTMFrame;
- IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
jump Sp(SP_OFF);
} else {
/* Commit failed */
W_ new_trec;
- "ptr" new_trec = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
StgTSO_trec(CurrentTSO) = new_trec;
R1 = StgCatchSTMFrame_code(frame);
jump stg_ap_v_fast;
/* Nested transactions are not allowed; raise an exception */
if (old_trec != NO_TREC) {
- R1 = base_GHCziIOBase_NestedAtomically_closure;
+ R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
jump raisezh_fast;
}
StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
/* Start the memory transcation */
- "ptr" new_trec = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
+ ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
StgTSO_trec(CurrentTSO) = new_trec;
/* Apply R1 to the realworld token */
W_ cur_trec;
W_ new_trec;
cur_trec = StgTSO_trec(CurrentTSO);
- "ptr" new_trec = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
+ ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
StgTSO_trec(CurrentTSO) = new_trec;
/* Apply R1 to the realworld token */
/* Start a nested transaction within which to run the first code */
trec = StgTSO_trec(CurrentTSO);
- "ptr" new_trec = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
+ ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
StgTSO_trec(CurrentTSO) = new_trec;
/* Set up the catch-retry frame */
// Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
retry_pop_stack:
StgTSO_sp(CurrentTSO) = Sp;
- frame_type = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
+ (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
Sp = StgTSO_sp(CurrentTSO);
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
- "ptr" outer = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+ ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
if (frame_type == CATCH_RETRY_FRAME) {
// The retry reaches a CATCH_RETRY_FRAME before the atomic frame
foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
// Retry in the first branch: try the alternative
- "ptr" trec = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
StgTSO_trec(CurrentTSO) = trec;
StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
R1 = StgCatchRetryFrame_alt_code(frame);
foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
trec = outer;
- StgTSO_trec(CurrentTSO) = trec;
- "ptr" outer = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+ StgTSO_trec(CurrentTSO) = trec;
+ ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
}
ASSERT(outer == NO_TREC);
- r = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
+ (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
if (r != 0) {
// Transaction was valid: stmWait put us on the TVars' queues, we now block
StgHeader_info(frame) = stg_atomically_waiting_frame_info;
Sp = frame;
// Fix up the stack in the unregisterised case: the return convention is different.
- IF_NOT_REG_R1(Sp_adj(-2);
- Sp(1) = stg_NO_FINALIZER_closure;
- Sp(0) = stg_ut_1_0_unreg_info;)
R3 = trec; // passing to stmWaitUnblock()
jump stg_block_stmwait;
} else {
// Transaction was not valid: retry immediately
- "ptr" trec = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
StgTSO_trec(CurrentTSO) = trec;
R1 = StgAtomicallyFrame_code(frame);
Sp = frame;
MAYBE_GC (R1_PTR, newTVarzh_fast);
new_value = R1;
- "ptr" tv = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
+ ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
RET_P(tv);
}
MAYBE_GC (R1_PTR, readTVarzh_fast); // Call to stmReadTVar may allocate
trec = StgTSO_trec(CurrentTSO);
tvar = R1;
- "ptr" result = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
+ ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
RET_P(result);
}
{
/* args: R1 = MVar closure */
- if (GET_INFO(R1) == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
RET_N(1);
} else {
RET_N(0);
ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
mvar = Hp - SIZEOF_StgMVar + WDS(1);
- SET_HDR(mvar,stg_EMPTY_MVAR_info,W_[CCCS]);
+ SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
+ // MVARs start dirty: generation 0 has no mutable list
StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
}
-/* If R1 isn't available, pass it on the stack */
-#ifdef REG_R1
#define PerformTake(tso, value) \
W_[StgTSO_sp(tso) + WDS(1)] = value; \
W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
-#else
-#define PerformTake(tso, value) \
- W_[StgTSO_sp(tso) + WDS(1)] = value; \
- W_[StgTSO_sp(tso) + WDS(0)] = stg_ut_1_0_unreg_info;
-#endif
#define PerformPut(tso,lval) \
StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
mvar = R1;
#if defined(THREADED_RTS)
- "ptr" info = foreign "C" lockClosure(mvar "ptr") [];
+ ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
#else
info = GET_INFO(mvar);
#endif
+
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
+ }
/* If the MVar is empty, put ourselves on its blocking queue,
* and wait until we're woken up.
*/
- if (info == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr",
+ StgMVar_tail(mvar) "ptr",
+ CurrentTSO) [];
}
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
+ R1 = mvar;
jump stg_block_takemvar;
}
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- dirtyTSO(tso);
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar),mvar) [];
- StgMVar_head(mvar) = tso;
-#else
- "ptr" tso = foreign "C" unblockOne(MyCapability() "ptr",
- StgMVar_head(mvar) "ptr") [];
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
+ }
+
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
}
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
mvar = R1;
#if defined(THREADED_RTS)
- "ptr" info = foreign "C" lockClosure(mvar "ptr") [];
+ ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
#else
info = GET_INFO(mvar);
#endif
- if (info == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info) [];
+ unlockClosure(mvar, info);
#endif
/* HACK: we need a pointer to pass back,
* so we abuse NO_FINALIZER_closure
RET_NP(0, stg_NO_FINALIZER_closure);
}
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
+ }
+
/* we got the value... */
val = StgMVar_value(mvar);
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- dirtyTSO(tso);
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
+ }
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr", mvar "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#else
- "ptr" tso = foreign "C" unblockOne(MyCapability() "ptr",
- StgMVar_head(mvar) "ptr") [];
- StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
putMVarzh_fast
{
- W_ mvar, info, tso;
+ W_ mvar, val, info, tso;
/* args: R1 = MVar, R2 = value */
mvar = R1;
+ val = R2;
#if defined(THREADED_RTS)
- "ptr" info = foreign "C" lockClosure(mvar "ptr") [R2];
+ ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
#else
info = GET_INFO(mvar);
#endif
- if (info == stg_FULL_MVAR_info) {
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
+ }
+
+ if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr",
+ StgMVar_tail(mvar) "ptr",
+ CurrentTSO) [];
}
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
+ R1 = mvar;
+ R2 = val;
jump stg_block_putmvar;
}
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
- PerformTake(tso, R2);
- dirtyTSO(tso);
+ PerformTake(tso, val);
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
+ }
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
- StgMVar_head(mvar) = tso;
-#else
- "ptr" tso = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
else
{
/* No further takes, the MVar is now full. */
- StgMVar_value(mvar) = R2;
+ StgMVar_value(mvar) = val;
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
mvar = R1;
#if defined(THREADED_RTS)
- "ptr" info = foreign "C" lockClosure(mvar "ptr") [R2];
+ ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
#else
info = GET_INFO(mvar);
#endif
- if (info == stg_FULL_MVAR_info) {
+ if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info) [];
+ unlockClosure(mvar, info);
#endif
RET_N(0);
}
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
+ }
+
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
/* There are takeMVar(s) waiting: wake up the first one
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
- dirtyTSO(tso);
+ if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
+ }
-#if defined(GRAN) || defined(PAR)
- /* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
- StgMVar_head(mvar) = tso;
-#else
- "ptr" tso = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
-#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
StgMVar_value(mvar) = R2;
#if defined(THREADED_RTS)
- foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info) [];
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, makeStableNamezh_fast );
- index = foreign "C" lookupStableName(R1 "ptr") [];
+ (index) = foreign "C" lookupStableName(R1 "ptr") [];
/* Is there already a StableName for this heap object?
* stable_ptr_table is a pointer to an array of snEntry structs.
/* Args: R1 = a */
W_ sp;
MAYBE_GC(R1_PTR, makeStablePtrzh_fast);
- "ptr" sp = foreign "C" getStablePtr(R1 "ptr") [];
+ ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
RET_N(sp);
}
W_ bco, bitmap_arr, bytes, words;
bitmap_arr = R5;
+
words = BYTES_TO_WDS(SIZEOF_StgBCO) + StgArrWords_words(bitmap_arr);
bytes = WDS(words);
RET_P(ap);
}
-infoPtrzh_fast
+unpackClosurezh_fast
{
/* args: R1 = closure to analyze */
-
- MAYBE_GC(R1_PTR, infoPtrzh_fast);
+// TODO: Consider the absence of ptrs or nonptrs as a special case ?
- W_ info;
- info = %GET_STD_INFO(R1);
- RET_N(info);
-}
+ W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
+ info = %GET_STD_INFO(UNTAG(R1));
+
+ // Some closures have non-standard layout, so we omit those here.
+ W_ type;
+ type = TO_W_(%INFO_TYPE(info));
+ switch [0 .. N_CLOSURE_TYPES] type {
+ case THUNK_SELECTOR : {
+ ptrs = 1;
+ nptrs = 0;
+ goto out;
+ }
+ case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
+ THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
+ ptrs = 0;
+ nptrs = 0;
+ goto out;
+ }
+ default: {
+ ptrs = TO_W_(%INFO_PTRS(info));
+ nptrs = TO_W_(%INFO_NPTRS(info));
+ goto out;
+ }}
+out:
-closurePayloadzh_fast
-{
-/* args: R1 = closure to analyze */
-// TODO: Consider the absence of ptrs or nonptrs as a special case ?
+ W_ ptrs_arr_sz, nptrs_arr_sz;
+ nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
+ ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs);
- MAYBE_GC(R1_PTR, closurePayloadzh_fast);
+ ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, unpackClosurezh_fast);
- W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
- info = %GET_STD_INFO(R1);
- ptrs = TO_W_(%INFO_PTRS(info));
- nptrs = TO_W_(%INFO_NPTRS(info));
- p = 0;
+ W_ clos;
+ clos = UNTAG(R1);
+
+ ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
+ nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
- ALLOC_PRIM (SIZEOF_StgMutArrPtrs + WDS(ptrs), R1_PTR, closurePayloadzh_fast);
- ptrs_arr = Hp - SIZEOF_StgMutArrPtrs - WDS(ptrs) + WDS(1);
SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
+ p = 0;
for:
if(p < ptrs) {
- W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(R1,p);
+ W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
p = p + 1;
goto for;
}
- ALLOC_PRIM (SIZEOF_StgArrWords + WDS(nptrs), R1_PTR, closurePayloadzh_fast);
- nptrs_arr = Hp - SIZEOF_StgArrWords - WDS(nptrs) + WDS(1);
SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
StgArrWords_words(nptrs_arr) = nptrs;
p = 0;
for2:
if(p < nptrs) {
- W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(R1, p+ptrs);
+ W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
p = p + 1;
goto for2;
}
- RET_PP(ptrs_arr, nptrs_arr);
+ RET_NPP(info, ptrs_arr, nptrs_arr);
}
/* -----------------------------------------------------------------------------
* macro in Schedule.h).
*/
#define APPEND_TO_BLOCKED_QUEUE(tso) \
- ASSERT(StgTSO_link(tso) == END_TSO_QUEUE); \
+ ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
W_[blocked_queue_hd] = tso; \
} else { \
- StgTSO_link(W_[blocked_queue_tl]) = tso; \
+ foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
} \
W_[blocked_queue_tl] = tso;
{
/* args: R1 */
#ifdef THREADED_RTS
- foreign "C" barf("waitRead# on threaded RTS");
+ foreign "C" barf("waitRead# on threaded RTS") never returns;
#else
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
{
/* args: R1 */
#ifdef THREADED_RTS
- foreign "C" barf("waitWrite# on threaded RTS");
+ foreign "C" barf("waitWrite# on threaded RTS") never returns;
#else
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
#endif
#ifdef THREADED_RTS
- foreign "C" barf("delay# on threaded RTS");
+ foreign "C" barf("delay# on threaded RTS") never returns;
#else
/* args: R1 (microsecond delay amount) */
#ifdef mingw32_HOST_OS
/* could probably allocate this on the heap instead */
- "ptr" ares = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
stg_delayzh_malloc_str);
- reqID = foreign "C" addDelayRequest(R1);
+ (reqID) = foreign "C" addDelayRequest(R1);
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
StgAsyncIOResult_errCode(ares) = 0;
W_ time;
W_ divisor;
- time = foreign "C" getourtimeofday() [R1];
- divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags))*1000;
+ (time) = foreign "C" getourtimeofday() [R1];
+ divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags));
+ if (divisor == 0) {
+ divisor = 50;
+ }
+ divisor = divisor * 1000;
target = ((R1 + divisor - 1) / divisor) /* divide rounding up */
+ time + 1; /* Add 1 as getourtimeofday rounds down */
StgTSO_block_info(CurrentTSO) = target;
while:
if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
prev = t;
- t = StgTSO_link(t);
+ t = StgTSO__link(t);
goto while;
}
- StgTSO_link(CurrentTSO) = t;
+ StgTSO__link(CurrentTSO) = t;
if (prev == NULL) {
W_[sleeping_queue] = CurrentTSO;
} else {
- StgTSO_link(prev) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
}
jump stg_block_noregs;
#endif
CInt reqID;
#ifdef THREADED_RTS
- foreign "C" barf("asyncRead# on threaded RTS");
+ foreign "C" barf("asyncRead# on threaded RTS") never returns;
#else
/* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
/* could probably allocate this on the heap instead */
- "ptr" ares = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
stg_asyncReadzh_malloc_str)
[R1,R2,R3,R4];
- reqID = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
+ (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
StgAsyncIOResult_errCode(ares) = 0;
CInt reqID;
#ifdef THREADED_RTS
- foreign "C" barf("asyncWrite# on threaded RTS");
+ foreign "C" barf("asyncWrite# on threaded RTS") never returns;
#else
/* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
- "ptr" ares = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
stg_asyncWritezh_malloc_str)
[R1,R2,R3,R4];
- reqID = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
+ (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
CInt reqID;
#ifdef THREADED_RTS
- foreign "C" barf("asyncDoProc# on threaded RTS");
+ foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
#else
/* args: R1 = proc, R2 = param */
StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
/* could probably allocate this on the heap instead */
- "ptr" ares = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
stg_asyncDoProczh_malloc_str)
[R1,R2];
- reqID = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
+ (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
StgAsyncIOResult_errCode(ares) = 0;
#endif
}
#endif
+
+// noDuplicate# tries to ensure that none of the thunks under
+// evaluation by the current thread are also under evaluation by
+// another thread. It relies on *both* threads doing noDuplicate#;
+// the second one will get blocked if they are duplicating some work.
+noDuplicatezh_fast
+{
+ SAVE_THREAD_STATE();
+ ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
+ foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
+
+ if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
+ jump stg_threadFinished;
+ } else {
+ LOAD_THREAD_STATE();
+ ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
+ jump %ENTRY_CODE(Sp(0));
+ }
+}
+
+getApStackValzh_fast
+{
+ W_ ap_stack, offset, val, ok;
+
+ /* args: R1 = AP_STACK, R2 = offset */
+ ap_stack = R1;
+ offset = R2;
+
+ if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
+ ok = 1;
+ val = StgAP_STACK_payload(ap_stack,offset);
+ } else {
+ ok = 0;
+ val = R1;
+ }
+ RET_NP(ok,val);
+}