/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team, 1998-2004
+ * (c) The GHC Team, 1998-2011
*
* Out-of-line primitive operations
*
import base_ControlziExceptionziBase_nestedAtomically_closure;
import EnterCriticalSection;
import LeaveCriticalSection;
-import ghczmprim_GHCziBool_False_closure;
+import ghczmprim_GHCziTypes_False_closure;
#if !defined(mingw32_HOST_OS)
import sm_mutex;
#endif
}
}
+#define COPY_CARDS(src_start, src_cards_start, dst_start, dst_cards_start, n, copy) \
+ if (src_start & mutArrCardMask == dst_start & mutArrCardMask) { \
+ foreign "C" copy(dst_cards_start + mutArrPtrCardUp(dst_start), src_cards_start + mutArrPtrCardUp(src_start), mutArrPtrCardDown(n)); \
+ \
+ I8[dst_cards_start + mutArrPtrCardDown(dst_start)] = I8[dst_cards_start + mutArrPtrCardDown(dst_start)] | I8[src_cards_start + mutArrPtrCardDown(src_start)]; \
+ I8[dst_cards_start + mutArrPtrCardUp(n)] = I8[dst_cards_start + mutArrPtrCardUp(dst_start + n)] | I8[src_cards_start + mutArrPtrCardUp(src_start + n)]; \
+ } else { \
+ foreign "C" memset(dst_cards_start "ptr", 1, mutArrPtrCardDown(n)); \
+ }
+
+stg_copyArrayzh
+{
+ W_ bytes, n, src, dst, src_start, dst_start, src_start_ptr, dst_start_ptr;
+ W_ src_cards_start, dst_cards_start;
+
+ src = R1;
+ src_start = R2;
+ dst = R3;
+ dst_start = R4;
+ n = R5;
+ MAYBE_GC(R1_PTR & R3_PTR, stg_copyArrayzh);
+
+ bytes = WDS(n);
+
+ src_start_ptr = src + SIZEOF_StgMutArrPtrs + WDS(src_start);
+ dst_start_ptr = dst + SIZEOF_StgMutArrPtrs + WDS(dst_start);
+
+ // Copy data (we assume the arrays aren't overlapping since they're of different types)
+ foreign "C" memcpy(dst_start_ptr "ptr", src_start_ptr "ptr", bytes);
+
+ // The base address of both source and destination card tables
+ src_cards_start = src + SIZEOF_StgMutArrPtrs + WDS(StgMutArrPtrs_ptrs(src));
+ dst_cards_start = dst + SIZEOF_StgMutArrPtrs + WDS(StgMutArrPtrs_ptrs(dst));
+
+ COPY_CARDS(src_start, src_cards_start, dst_start, dst_cards_start, n, memcpy);
+
+ jump %ENTRY_CODE(Sp(0));
+}
+
+stg_copyMutableArrayzh
+{
+ W_ bytes, n, src, dst, src_start, dst_start, src_start_ptr, dst_start_ptr;
+ W_ src_cards_start, dst_cards_start;
+
+ src = R1;
+ src_start = R2;
+ dst = R3;
+ dst_start = R4;
+ n = R5;
+ MAYBE_GC(R1_PTR & R3_PTR, stg_copyMutableArrayzh);
+
+ bytes = WDS(n);
+
+ src_start_ptr = src + SIZEOF_StgMutArrPtrs + WDS(src_start);
+ dst_start_ptr = dst + SIZEOF_StgMutArrPtrs + WDS(dst_start);
+
+ src_cards_start = src + SIZEOF_StgMutArrPtrs + WDS(StgMutArrPtrs_ptrs(src));
+ dst_cards_start = dst + SIZEOF_StgMutArrPtrs + WDS(StgMutArrPtrs_ptrs(dst));
+
+ // The only time the memory might overlap is when the two arrays we were provided are the same array!
+ if (src == dst) {
+ foreign "C" memmove(dst_start_ptr "ptr", src_start_ptr "ptr", bytes);
+ COPY_CARDS(src_start, src_cards_start, dst_start, dst_cards_start, n, memmove);
+ } else {
+ foreign "C" memcpy(dst_start_ptr "ptr", src_start_ptr "ptr", bytes);
+ COPY_CARDS(src_start, src_cards_start, dst_start, dst_cards_start, n, memcpy);
+ }
+
+ jump %ENTRY_CODE(Sp(0));
+}
+
+#define ARRAY_CLONE(name, type) \
+ name \
+ { \
+ W_ src, src_off, words, n, init, arr, src_p, dst_p, size; \
+ \
+ src = R1; \
+ src_off = R2; \
+ n = R3; \
+ \
+ MAYBE_GC(R1_PTR, name); \
+ \
+ size = n + mutArrPtrsCardWords(n); \
+ words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + size; \
+ ("ptr" arr) = foreign "C" allocate(MyCapability() "ptr", words) [R2]; \
+ TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0); \
+ \
+ SET_HDR(arr, type, W_[CCCS]); \
+ StgMutArrPtrs_ptrs(arr) = n; \
+ StgMutArrPtrs_size(arr) = size; \
+ \
+ dst_p = arr + SIZEOF_StgMutArrPtrs; \
+ src_p = src + SIZEOF_StgMutArrPtrs + WDS(src_off); \
+ \
+ foreign "C" memcpy(dst_p "ptr", src_p "ptr", WDS(n)); \
+ \
+ foreign "C" memset(dst_p + WDS(n), 0, WDS(mutArrPtrsCardWords(n))); \
+ RET_P(arr); \
+ }
+
+ARRAY_CLONE(stg_cloneArrayzh, stg_MUT_ARR_PTRS_FROZEN0_info)
+ARRAY_CLONE(stg_cloneMutableArrayzh, stg_MUT_ARR_PTRS_DIRTY_info)
+ARRAY_CLONE(stg_freezzeArrayzh, stg_MUT_ARR_PTRS_FROZEN0_info)
+ARRAY_CLONE(stg_thawArrayzh, stg_MUT_ARR_PTRS_DIRTY_info)
+
+
/* -----------------------------------------------------------------------------
MutVar primitives
-------------------------------------------------------------------------- */
RET_P(mv);
}
+stg_casMutVarzh
+ /* MutVar# s a -> a -> a -> State# s -> (# State#, Int#, a #) */
+{
+ W_ mv, old, new, h;
+
+ mv = R1;
+ old = R2;
+ new = R3;
+
+ (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var,
+ old, new) [];
+ if (h != old) {
+ RET_NP(1,h);
+ } else {
+ RET_NP(0,h);
+ }
+}
+
+
stg_atomicModifyMutVarzh
{
W_ mv, f, z, x, y, r, h;
W_ tso;
W_ why_blocked;
W_ what_next;
- W_ ret;
+ W_ ret, cap, locked;
tso = R1;
- loop:
- if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
- tso = StgTSO__link(tso);
- goto loop;
- }
what_next = TO_W_(StgTSO_what_next(tso));
why_blocked = TO_W_(StgTSO_why_blocked(tso));
ret = why_blocked;
}
}
- RET_N(ret);
+
+ cap = TO_W_(Capability_no(StgTSO_cap(tso)));
+
+ if ((TO_W_(StgTSO_flags(tso)) & TSO_LOCKED) != 0) {
+ locked = 1;
+ } else {
+ locked = 0;
+ }
+
+ RET_NNN(ret,cap,locked);
}
/* -----------------------------------------------------------------------------
// Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
retry_pop_stack:
- StgTSO_sp(CurrentTSO) = Sp;
- (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
- Sp = StgTSO_sp(CurrentTSO);
+ SAVE_THREAD_STATE();
+ (frame_type) = foreign "C" findRetryFrameHelper(MyCapability(), CurrentTSO "ptr") [];
+ LOAD_THREAD_STATE();
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
outer = StgTRecHeader_enclosing_trec(trec);
}
-#define PerformTake(tso, value) \
- W_[StgTSO_sp(tso) + WDS(1)] = value; \
- W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
+#define PerformTake(stack, value) \
+ W_ sp; \
+ sp = StgStack_sp(stack); \
+ W_[sp + WDS(1)] = value; \
+ W_[sp + WDS(0)] = stg_gc_unpt_r1_info;
-#define PerformPut(tso,lval) \
- StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
- lval = W_[StgTSO_sp(tso) - WDS(1)];
+#define PerformPut(stack,lval) \
+ W_ sp; \
+ sp = StgStack_sp(stack) + WDS(3); \
+ StgStack_sp(stack) = sp; \
+ lval = W_[sp - WDS(1)];
stg_takeMVarzh
{
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
-loop2:
- if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
- tso = StgTSO__link(tso);
- goto loop2;
- }
-
ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
ASSERT(StgTSO_block_info(tso) == mvar);
// actually perform the putMVar for the thread that we just woke up
- PerformPut(tso,StgMVar_value(mvar));
+ W_ stack;
+ stack = StgTSO_stackobj(tso);
+ PerformPut(stack, StgMVar_value(mvar));
// indicate that the MVar operation has now completed.
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
// no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
- foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
+ foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
unlockClosure(mvar, stg_MVAR_DIRTY_info);
RET_P(val);
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
-loop2:
- if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
- tso = StgTSO__link(tso);
- goto loop2;
- }
-
ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
ASSERT(StgTSO_block_info(tso) == mvar);
// actually perform the putMVar for the thread that we just woke up
- PerformPut(tso,StgMVar_value(mvar));
+ W_ stack;
+ stack = StgTSO_stackobj(tso);
+ PerformPut(stack, StgMVar_value(mvar));
// indicate that the MVar operation has now completed.
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
// no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
- foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
+ foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- RET_P(val);
+ RET_NP(1,val);
}
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
-loop2:
- if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
- tso = StgTSO__link(tso);
- goto loop2;
- }
-
ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
ASSERT(StgTSO_block_info(tso) == mvar);
// actually perform the takeMVar
- PerformTake(tso, val);
+ W_ stack;
+ stack = StgTSO_stackobj(tso);
+ PerformTake(stack, val);
// indicate that the MVar operation has now completed.
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
-
- if (TO_W_(StgTSO_dirty(tso)) == 0) {
- foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
+
+ if (TO_W_(StgStack_dirty(stack)) == 0) {
+ foreign "C" dirty_STACK(MyCapability() "ptr", stack "ptr") [];
}
- foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
+ foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
unlockClosure(mvar, stg_MVAR_DIRTY_info);
jump %ENTRY_CODE(Sp(0));
/* No further takes, the MVar is now full. */
StgMVar_value(mvar) = val;
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- jump %ENTRY_CODE(Sp(0));
+ RET_N(1);
}
if (StgHeader_info(q) == stg_IND_info ||
StgHeader_info(q) == stg_MSG_NULL_info) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
-loop2:
- if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
- tso = StgTSO__link(tso);
- goto loop2;
- }
-
ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
ASSERT(StgTSO_block_info(tso) == mvar);
// actually perform the takeMVar
- PerformTake(tso, val);
+ W_ stack;
+ stack = StgTSO_stackobj(tso);
+ PerformTake(stack, val);
// indicate that the MVar operation has now completed.
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
- if (TO_W_(StgTSO_dirty(tso)) == 0) {
- foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
+ if (TO_W_(StgStack_dirty(stack)) == 0) {
+ foreign "C" dirty_STACK(MyCapability() "ptr", stack "ptr") [];
}
- foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
+ foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- jump %ENTRY_CODE(Sp(0));
+ RET_N(1);
}
W_ spark;
#ifndef THREADED_RTS
- RET_NP(0,ghczmprim_GHCziBool_False_closure);
+ RET_NP(0,ghczmprim_GHCziTypes_False_closure);
#else
(spark) = foreign "C" findSpark(MyCapability());
if (spark != 0) {
RET_NP(1,spark);
} else {
- RET_NP(0,ghczmprim_GHCziBool_False_closure);
+ RET_NP(0,ghczmprim_GHCziTypes_False_closure);
}
#endif
}
// We should go through the macro HASKELLEVENT_USER_MSG_ENABLED from
// RtsProbes.h, but that header file includes unistd.h, which doesn't
// work in Cmm
+#if !defined(solaris2_TARGET_OS)
(enabled) = foreign "C" __dtrace_isenabled$HaskellEvent$user__msg$v1() [];
+#else
+ // Solaris' DTrace can't handle the
+ // __dtrace_isenabled$HaskellEvent$user__msg$v1
+ // call above. This call is just for testing whether the user__msg
+ // probe is enabled, and is here for just performance optimization.
+ // Since preparation for the probe is not that complex I disable usage of
+ // this test above for Solaris and enable the probe usage manually
+ // here. Please note that this does not mean that the probe will be
+ // used during the runtime! You still need to enable it by consumption
+ // in your dtrace script as you do with any other probe.
+ enabled = 1;
+#endif
if (enabled != 0) {
foreign "C" dtraceUserMsgWrapper(MyCapability() "ptr", msg "ptr") [];
}