1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Out-of-line primitive operations
7 * This file contains the implementations of all the primitive
8 * operations ("primops") which are not expanded inline. See
9 * ghc/compiler/prelude/primops.txt.pp for a list of all the primops;
10 * this file contains code for most of those with the attribute
13 * Entry convention: the entry convention for a primop is that all the
14 * args are in Stg registers (R1, R2, etc.). This is to make writing
15 * the primops easier. (see compiler/codeGen/CgCallConv.hs).
17 * Return convention: results from a primop are generally returned
18 * using the ordinary unboxed tuple return convention. The C-- parser
19 * implements the RET_xxxx() macros to perform unboxed-tuple returns
20 * based on the prevailing return convention.
22 * This file is written in a subset of C--, extended with various
23 * features specific to GHC. It is compiled by GHC directly. For the
24 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
26 * ---------------------------------------------------------------------------*/
31 #ifndef mingw32_HOST_OS
41 import __gmpz_tdiv_qr;
42 import __gmpz_fdiv_qr;
43 import __gmpz_divexact;
49 import pthread_mutex_lock;
50 import pthread_mutex_unlock;
52 import base_GHCziIOBase_NestedAtomically_closure;
53 import EnterCriticalSection;
54 import LeaveCriticalSection;
56 /*-----------------------------------------------------------------------------
59 Basically just new*Array - the others are all inline macros.
61 The size arg is always passed in R1, and the result returned in R1.
63 The slow entry point is for returning from a heap check, the saved
64 size argument must be re-loaded from the stack.
65 -------------------------------------------------------------------------- */
67 /* for objects that are *less* than the size of a word, make sure we
68 * round up to the nearest word for the size of the array.
73 W_ words, payload_words, n, p;
74 MAYBE_GC(NO_PTRS,newByteArrayzh_fast);
76 payload_words = ROUNDUP_BYTES_TO_WDS(n);
77 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
78 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr",words) [];
79 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
80 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
81 StgArrWords_words(p) = payload_words;
85 newPinnedByteArrayzh_fast
87 W_ words, payload_words, n, p;
89 MAYBE_GC(NO_PTRS,newPinnedByteArrayzh_fast);
91 payload_words = ROUNDUP_BYTES_TO_WDS(n);
93 // We want an 8-byte aligned array. allocatePinned() gives us
94 // 8-byte aligned memory by default, but we want to align the
95 // *goods* inside the ArrWords object, so we have to check the
96 // size of the ArrWords header and adjust our size accordingly.
97 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
98 if ((SIZEOF_StgArrWords & 7) != 0) {
102 ("ptr" p) = foreign "C" allocatePinned(words) [];
103 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
105 // Again, if the ArrWords header isn't a multiple of 8 bytes, we
106 // have to push the object forward one word so that the goods
107 // fall on an 8-byte boundary.
108 if ((SIZEOF_StgArrWords & 7) != 0) {
112 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
113 StgArrWords_words(p) = payload_words;
119 W_ words, n, init, arr, p;
120 /* Args: R1 = words, R2 = initialisation value */
123 MAYBE_GC(R2_PTR,newArrayzh_fast);
125 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + n;
126 ("ptr" arr) = foreign "C" allocateLocal(MyCapability() "ptr",words) [R2];
127 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
129 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
130 StgMutArrPtrs_ptrs(arr) = n;
132 // Initialise all elements of the the array with the value in R2
134 p = arr + SIZEOF_StgMutArrPtrs;
136 if (p < arr + WDS(words)) {
145 unsafeThawArrayzh_fast
147 // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
149 // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
150 // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
151 // it on the mutable list for the GC to remove (removing something from
152 // the mutable list is not easy, because the mut_list is only singly-linked).
154 // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
155 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
156 // to indicate that it is still on the mutable list.
158 // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
159 // either it is on a mut_list, or it isn't. We adopt the convention that
160 // the closure type is MUT_ARR_PTRS_FROZEN0 if it is on the mutable list,
161 // and MUT_ARR_PTRS_FROZEN otherwise. In fact it wouldn't matter if
162 // we put it on the mutable list more than once, but it would get scavenged
163 // multiple times during GC, which would be unnecessarily slow.
165 if (StgHeader_info(R1) != stg_MUT_ARR_PTRS_FROZEN0_info) {
166 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
167 recordMutable(R1, R1);
168 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
171 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
176 /* -----------------------------------------------------------------------------
178 -------------------------------------------------------------------------- */
183 /* Args: R1 = initialisation value */
185 ALLOC_PRIM( SIZEOF_StgMutVar, R1_PTR, newMutVarzh_fast);
187 mv = Hp - SIZEOF_StgMutVar + WDS(1);
188 SET_HDR(mv,stg_MUT_VAR_DIRTY_info,W_[CCCS]);
189 StgMutVar_var(mv) = R1;
194 atomicModifyMutVarzh_fast
197 /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */
199 /* If x is the current contents of the MutVar#, then
200 We want to make the new contents point to
204 and the return value is
208 obviously we can share (f x).
210 z = [stg_ap_2 f x] (max (HS + 2) MIN_UPD_SIZE)
211 y = [stg_sel_0 z] (max (HS + 1) MIN_UPD_SIZE)
212 r = [stg_sel_1 z] (max (HS + 1) MIN_UPD_SIZE)
216 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
217 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),WDS(MIN_UPD_SIZE-1))
219 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(1))
220 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),0)
224 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
225 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),WDS(MIN_UPD_SIZE-2))
227 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(2))
228 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),0)
231 #define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
233 HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, atomicModifyMutVarzh_fast);
235 #if defined(THREADED_RTS)
236 ACQUIRE_LOCK(atomic_modify_mutvar_mutex "ptr") [R1,R2];
239 x = StgMutVar_var(R1);
241 TICK_ALLOC_THUNK_2();
242 CCCS_ALLOC(THUNK_2_SIZE);
243 z = Hp - THUNK_2_SIZE + WDS(1);
244 SET_HDR(z, stg_ap_2_upd_info, W_[CCCS]);
245 LDV_RECORD_CREATE(z);
246 StgThunk_payload(z,0) = R2;
247 StgThunk_payload(z,1) = x;
249 TICK_ALLOC_THUNK_1();
250 CCCS_ALLOC(THUNK_1_SIZE);
251 y = z - THUNK_1_SIZE;
252 SET_HDR(y, stg_sel_0_upd_info, W_[CCCS]);
253 LDV_RECORD_CREATE(y);
254 StgThunk_payload(y,0) = z;
256 StgMutVar_var(R1) = y;
257 foreign "C" dirty_MUT_VAR(BaseReg "ptr", R1 "ptr") [R1];
259 TICK_ALLOC_THUNK_1();
260 CCCS_ALLOC(THUNK_1_SIZE);
261 r = y - THUNK_1_SIZE;
262 SET_HDR(r, stg_sel_1_upd_info, W_[CCCS]);
263 LDV_RECORD_CREATE(r);
264 StgThunk_payload(r,0) = z;
266 #if defined(THREADED_RTS)
267 RELEASE_LOCK(atomic_modify_mutvar_mutex "ptr") [];
273 /* -----------------------------------------------------------------------------
274 Weak Pointer Primitives
275 -------------------------------------------------------------------------- */
277 STRING(stg_weak_msg,"New weak pointer at %p\n")
283 R3 = finalizer (or NULL)
288 R3 = stg_NO_FINALIZER_closure;
291 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, mkWeakzh_fast );
293 w = Hp - SIZEOF_StgWeak + WDS(1);
294 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
297 StgWeak_value(w) = R2;
298 StgWeak_finalizer(w) = R3;
300 StgWeak_link(w) = W_[weak_ptr_list];
301 W_[weak_ptr_list] = w;
303 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
318 if (GET_INFO(w) == stg_DEAD_WEAK_info) {
319 RET_NP(0,stg_NO_FINALIZER_closure);
325 // A weak pointer is inherently used, so we do not need to call
326 // LDV_recordDead_FILL_SLOP_DYNAMIC():
327 // LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)w);
328 // or, LDV_recordDead():
329 // LDV_recordDead((StgClosure *)w, sizeofW(StgWeak) - sizeofW(StgProfHeader));
330 // Furthermore, when PROFILING is turned on, dead weak pointers are exactly as
331 // large as weak pointers, so there is no need to fill the slop, either.
332 // See stg_DEAD_WEAK_info in StgMiscClosures.hc.
336 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
338 SET_INFO(w,stg_DEAD_WEAK_info);
339 LDV_RECORD_CREATE(w);
341 f = StgWeak_finalizer(w);
342 StgDeadWeak_link(w) = StgWeak_link(w);
344 /* return the finalizer */
345 if (f == stg_NO_FINALIZER_closure) {
346 RET_NP(0,stg_NO_FINALIZER_closure);
358 if (GET_INFO(w) == stg_WEAK_info) {
360 val = StgWeak_value(w);
368 /* -----------------------------------------------------------------------------
369 Arbitrary-precision Integer operations.
371 There are some assumptions in this code that mp_limb_t == W_. This is
372 the case for all the platforms that GHC supports, currently.
373 -------------------------------------------------------------------------- */
377 /* arguments: R1 = Int# */
379 W_ val, s, p; /* to avoid aliasing */
382 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, int2Integerzh_fast );
384 p = Hp - SIZEOF_StgArrWords;
385 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
386 StgArrWords_words(p) = 1;
388 /* mpz_set_si is inlined here, makes things simpler */
401 /* returns (# size :: Int#,
410 /* arguments: R1 = Word# */
412 W_ val, s, p; /* to avoid aliasing */
416 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, word2Integerzh_fast);
418 p = Hp - SIZEOF_StgArrWords;
419 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
420 StgArrWords_words(p) = 1;
429 /* returns (# size :: Int#,
430 data :: ByteArray# #)
437 * 'long long' primops for converting to/from Integers.
440 #ifdef SUPPORT_LONG_LONGS
442 int64ToIntegerzh_fast
444 /* arguments: L1 = Int64# */
447 W_ hi, lo, s, neg, words_needed, p;
452 hi = TO_W_(val >> 32);
455 if ( hi == 0 || (hi == 0xFFFFFFFF && lo != 0) ) {
456 // minimum is one word
462 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
463 NO_PTRS, int64ToIntegerzh_fast );
465 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
466 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
467 StgArrWords_words(p) = words_needed;
479 if ( words_needed == 2 ) {
487 } else /* val==0 */ {
495 /* returns (# size :: Int#,
496 data :: ByteArray# #)
500 word64ToIntegerzh_fast
502 /* arguments: L1 = Word64# */
505 W_ hi, lo, s, words_needed, p;
508 hi = TO_W_(val >> 32);
517 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
518 NO_PTRS, word64ToIntegerzh_fast );
520 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
521 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
522 StgArrWords_words(p) = words_needed;
532 } else /* val==0 */ {
537 /* returns (# size :: Int#,
538 data :: ByteArray# #)
545 #endif /* SUPPORT_LONG_LONGS */
547 /* ToDo: this is shockingly inefficient */
552 bits8 [SIZEOF_MP_INT];
557 bits8 [SIZEOF_MP_INT];
562 bits8 [SIZEOF_MP_INT];
567 bits8 [SIZEOF_MP_INT];
572 #define FETCH_MP_TEMP(X) \
574 X = BaseReg + (OFFSET_StgRegTable_r ## X);
576 #define FETCH_MP_TEMP(X) /* Nothing */
579 #define GMP_TAKE2_RET1(name,mp_fun) \
584 FETCH_MP_TEMP(mp_tmp1); \
585 FETCH_MP_TEMP(mp_tmp2); \
586 FETCH_MP_TEMP(mp_result1) \
587 FETCH_MP_TEMP(mp_result2); \
589 /* call doYouWantToGC() */ \
590 MAYBE_GC(R2_PTR & R4_PTR, name); \
597 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
598 MP_INT__mp_size(mp_tmp1) = (s1); \
599 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
600 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
601 MP_INT__mp_size(mp_tmp2) = (s2); \
602 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
604 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
606 /* Perform the operation */ \
607 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
609 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
610 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
613 #define GMP_TAKE1_RET1(name,mp_fun) \
618 FETCH_MP_TEMP(mp_tmp1); \
619 FETCH_MP_TEMP(mp_result1) \
621 /* call doYouWantToGC() */ \
622 MAYBE_GC(R2_PTR, name); \
627 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
628 MP_INT__mp_size(mp_tmp1) = (s1); \
629 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
631 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
633 /* Perform the operation */ \
634 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr") []; \
636 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
637 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
640 #define GMP_TAKE2_RET2(name,mp_fun) \
645 FETCH_MP_TEMP(mp_tmp1); \
646 FETCH_MP_TEMP(mp_tmp2); \
647 FETCH_MP_TEMP(mp_result1) \
648 FETCH_MP_TEMP(mp_result2) \
650 /* call doYouWantToGC() */ \
651 MAYBE_GC(R2_PTR & R4_PTR, name); \
658 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
659 MP_INT__mp_size(mp_tmp1) = (s1); \
660 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
661 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
662 MP_INT__mp_size(mp_tmp2) = (s2); \
663 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
665 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
666 foreign "C" __gmpz_init(mp_result2 "ptr") []; \
668 /* Perform the operation */ \
669 foreign "C" mp_fun(mp_result1 "ptr",mp_result2 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
671 RET_NPNP(TO_W_(MP_INT__mp_size(mp_result1)), \
672 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords, \
673 TO_W_(MP_INT__mp_size(mp_result2)), \
674 MP_INT__mp_d(mp_result2) - SIZEOF_StgArrWords); \
677 GMP_TAKE2_RET1(plusIntegerzh_fast, __gmpz_add)
678 GMP_TAKE2_RET1(minusIntegerzh_fast, __gmpz_sub)
679 GMP_TAKE2_RET1(timesIntegerzh_fast, __gmpz_mul)
680 GMP_TAKE2_RET1(gcdIntegerzh_fast, __gmpz_gcd)
681 GMP_TAKE2_RET1(quotIntegerzh_fast, __gmpz_tdiv_q)
682 GMP_TAKE2_RET1(remIntegerzh_fast, __gmpz_tdiv_r)
683 GMP_TAKE2_RET1(divExactIntegerzh_fast, __gmpz_divexact)
684 GMP_TAKE2_RET1(andIntegerzh_fast, __gmpz_and)
685 GMP_TAKE2_RET1(orIntegerzh_fast, __gmpz_ior)
686 GMP_TAKE2_RET1(xorIntegerzh_fast, __gmpz_xor)
687 GMP_TAKE1_RET1(complementIntegerzh_fast, __gmpz_com)
689 GMP_TAKE2_RET2(quotRemIntegerzh_fast, __gmpz_tdiv_qr)
690 GMP_TAKE2_RET2(divModIntegerzh_fast, __gmpz_fdiv_qr)
694 mp_tmp_w: W_; // NB. mp_tmp_w is really an here mp_limb_t
700 /* R1 = the first Int#; R2 = the second Int# */
702 FETCH_MP_TEMP(mp_tmp_w);
705 (r) = foreign "C" __gmpn_gcd_1(mp_tmp_w "ptr", 1, R2) [];
708 /* Result parked in R1, return via info-pointer at TOS */
709 jump %ENTRY_CODE(Sp(0));
715 /* R1 = s1; R2 = d1; R3 = the int */
717 (s1) = foreign "C" __gmpn_gcd_1( BYTE_ARR_CTS(R2) "ptr", R1, R3) [];
720 /* Result parked in R1, return via info-pointer at TOS */
721 jump %ENTRY_CODE(Sp(0));
727 /* R1 = s1; R2 = d1; R3 = the int */
728 W_ usize, vsize, v_digit, u_digit;
734 // paraphrased from __gmpz_cmp_si() in the GMP sources
735 if (%gt(v_digit,0)) {
738 if (%lt(v_digit,0)) {
744 if (usize != vsize) {
746 jump %ENTRY_CODE(Sp(0));
751 jump %ENTRY_CODE(Sp(0));
754 u_digit = W_[BYTE_ARR_CTS(R2)];
756 if (u_digit == v_digit) {
758 jump %ENTRY_CODE(Sp(0));
761 if (%gtu(u_digit,v_digit)) { // NB. unsigned: these are mp_limb_t's
767 jump %ENTRY_CODE(Sp(0));
772 /* R1 = s1; R2 = d1; R3 = s2; R4 = d2 */
773 W_ usize, vsize, size, up, vp;
776 // paraphrased from __gmpz_cmp() in the GMP sources
780 if (usize != vsize) {
782 jump %ENTRY_CODE(Sp(0));
787 jump %ENTRY_CODE(Sp(0));
790 if (%lt(usize,0)) { // NB. not <, which is unsigned
796 up = BYTE_ARR_CTS(R2);
797 vp = BYTE_ARR_CTS(R4);
799 (cmp) = foreign "C" __gmpn_cmp(up "ptr", vp "ptr", size) [];
801 if (cmp == 0 :: CInt) {
803 jump %ENTRY_CODE(Sp(0));
806 if (%lt(cmp,0 :: CInt) == %lt(usize,0)) {
811 /* Result parked in R1, return via info-pointer at TOS */
812 jump %ENTRY_CODE(Sp(0));
824 r = W_[R2 + SIZEOF_StgArrWords];
829 /* Result parked in R1, return via info-pointer at TOS */
831 jump %ENTRY_CODE(Sp(0));
843 r = W_[R2 + SIZEOF_StgArrWords];
848 /* Result parked in R1, return via info-pointer at TOS */
850 jump %ENTRY_CODE(Sp(0));
857 FETCH_MP_TEMP(mp_tmp1);
858 FETCH_MP_TEMP(mp_tmp_w);
860 /* arguments: F1 = Float# */
863 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, decodeFloatzh_fast );
865 /* Be prepared to tell Lennart-coded __decodeFloat
866 where mantissa._mp_d can be put (it does not care about the rest) */
867 p = Hp - SIZEOF_StgArrWords;
868 SET_HDR(p,stg_ARR_WORDS_info,W_[CCCS]);
869 StgArrWords_words(p) = 1;
870 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
872 /* Perform the operation */
873 foreign "C" __decodeFloat(mp_tmp1 "ptr",mp_tmp_w "ptr" ,arg) [];
875 /* returns: (Int# (expn), Int#, ByteArray#) */
876 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
879 decodeFloatzuIntzh_fast
883 FETCH_MP_TEMP(mp_tmp1);
884 FETCH_MP_TEMP(mp_tmp_w);
886 /* arguments: F1 = Float# */
889 /* Perform the operation */
890 foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
892 /* returns: (Int# (mantissa), Int# (exponent)) */
893 RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
896 #define DOUBLE_MANTISSA_SIZE SIZEOF_DOUBLE
897 #define ARR_SIZE (SIZEOF_StgArrWords + DOUBLE_MANTISSA_SIZE)
903 FETCH_MP_TEMP(mp_tmp1);
904 FETCH_MP_TEMP(mp_tmp_w);
906 /* arguments: D1 = Double# */
909 ALLOC_PRIM( ARR_SIZE, NO_PTRS, decodeDoublezh_fast );
911 /* Be prepared to tell Lennart-coded __decodeDouble
912 where mantissa.d can be put (it does not care about the rest) */
913 p = Hp - ARR_SIZE + WDS(1);
914 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
915 StgArrWords_words(p) = BYTES_TO_WDS(DOUBLE_MANTISSA_SIZE);
916 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
918 /* Perform the operation */
919 foreign "C" __decodeDouble(mp_tmp1 "ptr", mp_tmp_w "ptr",arg) [];
921 /* returns: (Int# (expn), Int#, ByteArray#) */
922 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
925 decodeDoublezu2Intzh_fast
929 FETCH_MP_TEMP(mp_tmp1);
930 FETCH_MP_TEMP(mp_tmp2);
931 FETCH_MP_TEMP(mp_result1);
932 FETCH_MP_TEMP(mp_result2);
934 /* arguments: D1 = Double# */
937 /* Perform the operation */
938 foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
939 mp_result1 "ptr", mp_result2 "ptr",
943 (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
944 RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
947 /* -----------------------------------------------------------------------------
948 * Concurrency primitives
949 * -------------------------------------------------------------------------- */
953 /* args: R1 = closure to spark */
955 MAYBE_GC(R1_PTR, forkzh_fast);
961 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
962 RtsFlags_GcFlags_initialStkSize(RtsFlags),
965 /* start blocked if the current thread is blocked */
966 StgTSO_flags(threadid) =
967 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
968 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
970 foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
972 // switch at the earliest opportunity
973 CInt[context_switch] = 1 :: CInt;
980 /* args: R1 = cpu, R2 = closure to spark */
982 MAYBE_GC(R2_PTR, forkOnzh_fast);
990 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
991 RtsFlags_GcFlags_initialStkSize(RtsFlags),
994 /* start blocked if the current thread is blocked */
995 StgTSO_flags(threadid) =
996 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
997 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
999 foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
1001 // switch at the earliest opportunity
1002 CInt[context_switch] = 1 :: CInt;
1009 jump stg_yield_noregs;
1024 foreign "C" labelThread(R1 "ptr", R2 "ptr") [];
1026 jump %ENTRY_CODE(Sp(0));
1029 isCurrentThreadBoundzh_fast
1033 (r) = foreign "C" isThreadBound(CurrentTSO) [];
1038 /* -----------------------------------------------------------------------------
1040 * -------------------------------------------------------------------------- */
1044 #define IF_NOT_REG_R1(x)
1047 #define IF_NOT_REG_R1(x) x
1050 // Catch retry frame ------------------------------------------------------------
1052 INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
1053 #if defined(PROFILING)
1054 W_ unused1, W_ unused2,
1056 W_ unused3, "ptr" W_ unused4, "ptr" W_ unused5)
1058 W_ r, frame, trec, outer;
1059 IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
1062 trec = StgTSO_trec(CurrentTSO);
1063 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1064 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1066 /* Succeeded (either first branch or second branch) */
1067 StgTSO_trec(CurrentTSO) = outer;
1068 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1069 IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
1070 jump %ENTRY_CODE(Sp(SP_OFF));
1072 /* Did not commit: re-execute */
1074 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1075 StgTSO_trec(CurrentTSO) = new_trec;
1076 if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1077 R1 = StgCatchRetryFrame_alt_code(frame);
1079 R1 = StgCatchRetryFrame_first_code(frame);
1086 // Atomically frame ------------------------------------------------------------
1088 INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
1089 #if defined(PROFILING)
1090 W_ unused1, W_ unused2,
1092 "ptr" W_ unused3, "ptr" W_ unused4)
1094 W_ frame, trec, valid, next_invariant, q, outer;
1095 IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
1098 trec = StgTSO_trec(CurrentTSO);
1099 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1101 if (outer == NO_TREC) {
1102 /* First time back at the atomically frame -- pick up invariants */
1103 ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
1104 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1107 /* Second/subsequent time back at the atomically frame -- abort the
1108 * tx that's checking the invariant and move on to the next one */
1109 StgTSO_trec(CurrentTSO) = outer;
1110 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1111 StgInvariantCheckQueue_my_execution(q) = trec;
1112 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1113 /* Don't free trec -- it's linked from q and will be stashed in the
1114 * invariant if we eventually commit. */
1115 q = StgInvariantCheckQueue_next_queue_entry(q);
1116 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1120 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1122 if (q != END_INVARIANT_CHECK_QUEUE) {
1123 /* We can't commit yet: another invariant to check */
1124 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
1125 StgTSO_trec(CurrentTSO) = trec;
1127 next_invariant = StgInvariantCheckQueue_invariant(q);
1128 R1 = StgAtomicInvariant_code(next_invariant);
1133 /* We've got no more invariants to check, try to commit */
1134 (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
1136 /* Transaction was valid: commit succeeded */
1137 StgTSO_trec(CurrentTSO) = NO_TREC;
1138 Sp = Sp + SIZEOF_StgAtomicallyFrame;
1139 IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
1140 jump %ENTRY_CODE(Sp(SP_OFF));
1142 /* Transaction was not valid: try again */
1143 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1144 StgTSO_trec(CurrentTSO) = trec;
1145 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1146 R1 = StgAtomicallyFrame_code(frame);
1152 INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
1153 #if defined(PROFILING)
1154 W_ unused1, W_ unused2,
1156 "ptr" W_ unused3, "ptr" W_ unused4)
1158 W_ frame, trec, valid;
1159 IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
1163 /* The TSO is currently waiting: should we stop waiting? */
1164 (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
1166 /* Previous attempt is still valid: no point trying again yet */
1167 IF_NOT_REG_R1(Sp_adj(-2);
1168 Sp(1) = stg_NO_FINALIZER_closure;
1169 Sp(0) = stg_ut_1_0_unreg_info;)
1170 jump stg_block_noregs;
1172 /* Previous attempt is no longer valid: try again */
1173 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1174 StgTSO_trec(CurrentTSO) = trec;
1175 StgHeader_info(frame) = stg_atomically_frame_info;
1176 R1 = StgAtomicallyFrame_code(frame);
1181 // STM catch frame --------------------------------------------------------------
1189 /* Catch frames are very similar to update frames, but when entering
1190 * one we just pop the frame off the stack and perform the correct
1191 * kind of return to the activation record underneath us on the stack.
1194 INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
1195 #if defined(PROFILING)
1196 W_ unused1, W_ unused2,
1198 "ptr" W_ unused3, "ptr" W_ unused4)
1200 IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
1201 W_ r, frame, trec, outer;
1203 trec = StgTSO_trec(CurrentTSO);
1204 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1205 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1207 /* Commit succeeded */
1208 StgTSO_trec(CurrentTSO) = outer;
1209 Sp = Sp + SIZEOF_StgCatchSTMFrame;
1210 IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
1215 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1216 StgTSO_trec(CurrentTSO) = new_trec;
1217 R1 = StgCatchSTMFrame_code(frame);
1223 // Primop definition ------------------------------------------------------------
1231 // stmStartTransaction may allocate
1232 MAYBE_GC (R1_PTR, atomicallyzh_fast);
1234 /* Args: R1 = m :: STM a */
1235 STK_CHK_GEN(SIZEOF_StgAtomicallyFrame + WDS(1), R1_PTR, atomicallyzh_fast);
1237 old_trec = StgTSO_trec(CurrentTSO);
1239 /* Nested transactions are not allowed; raise an exception */
1240 if (old_trec != NO_TREC) {
1241 R1 = base_GHCziIOBase_NestedAtomically_closure;
1245 /* Set up the atomically frame */
1246 Sp = Sp - SIZEOF_StgAtomicallyFrame;
1249 SET_HDR(frame,stg_atomically_frame_info, W_[CCCS]);
1250 StgAtomicallyFrame_code(frame) = R1;
1251 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1253 /* Start the memory transcation */
1254 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
1255 StgTSO_trec(CurrentTSO) = new_trec;
1257 /* Apply R1 to the realworld token */
1266 /* Args: R1 :: STM a */
1267 /* Args: R2 :: Exception -> STM a */
1268 STK_CHK_GEN(SIZEOF_StgCatchSTMFrame + WDS(1), R1_PTR & R2_PTR, catchSTMzh_fast);
1270 /* Set up the catch frame */
1271 Sp = Sp - SIZEOF_StgCatchSTMFrame;
1274 SET_HDR(frame, stg_catch_stm_frame_info, W_[CCCS]);
1275 StgCatchSTMFrame_handler(frame) = R2;
1276 StgCatchSTMFrame_code(frame) = R1;
1278 /* Start a nested transaction to run the body of the try block in */
1281 cur_trec = StgTSO_trec(CurrentTSO);
1282 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
1283 StgTSO_trec(CurrentTSO) = new_trec;
1285 /* Apply R1 to the realworld token */
1296 // stmStartTransaction may allocate
1297 MAYBE_GC (R1_PTR & R2_PTR, catchRetryzh_fast);
1299 /* Args: R1 :: STM a */
1300 /* Args: R2 :: STM a */
1301 STK_CHK_GEN(SIZEOF_StgCatchRetryFrame + WDS(1), R1_PTR & R2_PTR, catchRetryzh_fast);
1303 /* Start a nested transaction within which to run the first code */
1304 trec = StgTSO_trec(CurrentTSO);
1305 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
1306 StgTSO_trec(CurrentTSO) = new_trec;
1308 /* Set up the catch-retry frame */
1309 Sp = Sp - SIZEOF_StgCatchRetryFrame;
1312 SET_HDR(frame, stg_catch_retry_frame_info, W_[CCCS]);
1313 StgCatchRetryFrame_running_alt_code(frame) = 0 :: CInt; // false;
1314 StgCatchRetryFrame_first_code(frame) = R1;
1315 StgCatchRetryFrame_alt_code(frame) = R2;
1317 /* Apply R1 to the realworld token */
1330 MAYBE_GC (NO_PTRS, retryzh_fast); // STM operations may allocate
1332 // Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
1334 StgTSO_sp(CurrentTSO) = Sp;
1335 (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
1336 Sp = StgTSO_sp(CurrentTSO);
1338 trec = StgTSO_trec(CurrentTSO);
1339 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1341 if (frame_type == CATCH_RETRY_FRAME) {
1342 // The retry reaches a CATCH_RETRY_FRAME before the atomic frame
1343 ASSERT(outer != NO_TREC);
1344 // Abort the transaction attempting the current branch
1345 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1346 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1347 if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1348 // Retry in the first branch: try the alternative
1349 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1350 StgTSO_trec(CurrentTSO) = trec;
1351 StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
1352 R1 = StgCatchRetryFrame_alt_code(frame);
1355 // Retry in the alternative code: propagate the retry
1356 StgTSO_trec(CurrentTSO) = outer;
1357 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1358 goto retry_pop_stack;
1362 // We've reached the ATOMICALLY_FRAME: attempt to wait
1363 ASSERT(frame_type == ATOMICALLY_FRAME);
1364 if (outer != NO_TREC) {
1365 // We called retry while checking invariants, so abort the current
1366 // invariant check (merging its TVar accesses into the parents read
1367 // set so we'll wait on them)
1368 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1369 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1371 StgTSO_trec(CurrentTSO) = trec;
1372 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1374 ASSERT(outer == NO_TREC);
1376 (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
1378 // Transaction was valid: stmWait put us on the TVars' queues, we now block
1379 StgHeader_info(frame) = stg_atomically_waiting_frame_info;
1381 // Fix up the stack in the unregisterised case: the return convention is different.
1382 IF_NOT_REG_R1(Sp_adj(-2);
1383 Sp(1) = stg_NO_FINALIZER_closure;
1384 Sp(0) = stg_ut_1_0_unreg_info;)
1385 R3 = trec; // passing to stmWaitUnblock()
1386 jump stg_block_stmwait;
1388 // Transaction was not valid: retry immediately
1389 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1390 StgTSO_trec(CurrentTSO) = trec;
1391 R1 = StgAtomicallyFrame_code(frame);
1402 /* Args: R1 = invariant closure */
1403 MAYBE_GC (R1_PTR, checkzh_fast);
1405 trec = StgTSO_trec(CurrentTSO);
1407 foreign "C" stmAddInvariantToCheck(MyCapability() "ptr",
1411 jump %ENTRY_CODE(Sp(0));
1420 /* Args: R1 = initialisation value */
1422 MAYBE_GC (R1_PTR, newTVarzh_fast);
1424 ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
1435 /* Args: R1 = TVar closure */
1437 MAYBE_GC (R1_PTR, readTVarzh_fast); // Call to stmReadTVar may allocate
1438 trec = StgTSO_trec(CurrentTSO);
1440 ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
1452 /* Args: R1 = TVar closure */
1453 /* R2 = New value */
1455 MAYBE_GC (R1_PTR & R2_PTR, writeTVarzh_fast); // Call to stmWriteTVar may allocate
1456 trec = StgTSO_trec(CurrentTSO);
1459 foreign "C" stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr", new_value "ptr") [];
1461 jump %ENTRY_CODE(Sp(0));
1465 /* -----------------------------------------------------------------------------
1468 * take & putMVar work as follows. Firstly, an important invariant:
1470 * If the MVar is full, then the blocking queue contains only
1471 * threads blocked on putMVar, and if the MVar is empty then the
1472 * blocking queue contains only threads blocked on takeMVar.
1475 * MVar empty : then add ourselves to the blocking queue
1476 * MVar full : remove the value from the MVar, and
1477 * blocking queue empty : return
1478 * blocking queue non-empty : perform the first blocked putMVar
1479 * from the queue, and wake up the
1480 * thread (MVar is now full again)
1482 * putMVar is just the dual of the above algorithm.
1484 * How do we "perform a putMVar"? Well, we have to fiddle around with
1485 * the stack of the thread waiting to do the putMVar. See
1486 * stg_block_putmvar and stg_block_takemvar in HeapStackCheck.c for
1487 * the stack layout, and the PerformPut and PerformTake macros below.
1489 * It is important that a blocked take or put is woken up with the
1490 * take/put already performed, because otherwise there would be a
1491 * small window of vulnerability where the thread could receive an
1492 * exception and never perform its take or put, and we'd end up with a
1495 * -------------------------------------------------------------------------- */
1499 /* args: R1 = MVar closure */
1501 if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
1513 ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
1515 mvar = Hp - SIZEOF_StgMVar + WDS(1);
1516 SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
1517 // MVARs start dirty: generation 0 has no mutable list
1518 StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
1519 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1520 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1525 /* If R1 isn't available, pass it on the stack */
1527 #define PerformTake(tso, value) \
1528 W_[StgTSO_sp(tso) + WDS(1)] = value; \
1529 W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
1531 #define PerformTake(tso, value) \
1532 W_[StgTSO_sp(tso) + WDS(1)] = value; \
1533 W_[StgTSO_sp(tso) + WDS(0)] = stg_ut_1_0_unreg_info;
1536 #define PerformPut(tso,lval) \
1537 StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
1538 lval = W_[StgTSO_sp(tso) - WDS(1)];
1542 W_ mvar, val, info, tso;
1544 /* args: R1 = MVar closure */
1547 #if defined(THREADED_RTS)
1548 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1550 info = GET_INFO(mvar);
1553 if (info == stg_MVAR_CLEAN_info) {
1554 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1557 /* If the MVar is empty, put ourselves on its blocking queue,
1558 * and wait until we're woken up.
1560 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1561 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1562 StgMVar_head(mvar) = CurrentTSO;
1564 StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
1566 StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1567 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1568 StgTSO_block_info(CurrentTSO) = mvar;
1569 StgMVar_tail(mvar) = CurrentTSO;
1571 jump stg_block_takemvar;
1574 /* we got the value... */
1575 val = StgMVar_value(mvar);
1577 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure)
1579 /* There are putMVar(s) waiting...
1580 * wake up the first thread on the queue
1582 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1584 /* actually perform the putMVar for the thread that we just woke up */
1585 tso = StgMVar_head(mvar);
1586 PerformPut(tso,StgMVar_value(mvar));
1589 #if defined(GRAN) || defined(PAR)
1590 /* ToDo: check 2nd arg (mvar) is right */
1591 ("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar),mvar) [];
1592 StgMVar_head(mvar) = tso;
1594 ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
1595 StgMVar_head(mvar) "ptr") [];
1596 StgMVar_head(mvar) = tso;
1599 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1600 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1603 #if defined(THREADED_RTS)
1604 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1606 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1612 /* No further putMVars, MVar is now empty */
1613 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1615 #if defined(THREADED_RTS)
1616 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1618 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1628 W_ mvar, val, info, tso;
1630 /* args: R1 = MVar closure */
1634 #if defined(THREADED_RTS)
1635 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1637 info = GET_INFO(mvar);
1640 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1641 #if defined(THREADED_RTS)
1642 unlockClosure(mvar, info);
1644 /* HACK: we need a pointer to pass back,
1645 * so we abuse NO_FINALIZER_closure
1647 RET_NP(0, stg_NO_FINALIZER_closure);
1650 if (info == stg_MVAR_CLEAN_info) {
1651 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1654 /* we got the value... */
1655 val = StgMVar_value(mvar);
1657 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1659 /* There are putMVar(s) waiting...
1660 * wake up the first thread on the queue
1662 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1664 /* actually perform the putMVar for the thread that we just woke up */
1665 tso = StgMVar_head(mvar);
1666 PerformPut(tso,StgMVar_value(mvar));
1669 #if defined(GRAN) || defined(PAR)
1670 /* ToDo: check 2nd arg (mvar) is right */
1671 ("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar) "ptr", mvar "ptr") [];
1672 StgMVar_head(mvar) = tso;
1674 ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
1675 StgMVar_head(mvar) "ptr") [];
1676 StgMVar_head(mvar) = tso;
1679 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1680 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1682 #if defined(THREADED_RTS)
1683 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1685 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1690 /* No further putMVars, MVar is now empty */
1691 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1692 #if defined(THREADED_RTS)
1693 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1695 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1707 /* args: R1 = MVar, R2 = value */
1710 #if defined(THREADED_RTS)
1711 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
1713 info = GET_INFO(mvar);
1716 if (info == stg_MVAR_CLEAN_info) {
1717 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1720 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1721 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1722 StgMVar_head(mvar) = CurrentTSO;
1724 StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
1726 StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1727 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1728 StgTSO_block_info(CurrentTSO) = mvar;
1729 StgMVar_tail(mvar) = CurrentTSO;
1731 jump stg_block_putmvar;
1734 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1736 /* There are takeMVar(s) waiting: wake up the first one
1738 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1740 /* actually perform the takeMVar */
1741 tso = StgMVar_head(mvar);
1742 PerformTake(tso, R2);
1745 #if defined(GRAN) || defined(PAR)
1746 /* ToDo: check 2nd arg (mvar) is right */
1747 ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
1748 StgMVar_head(mvar) = tso;
1750 ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
1751 StgMVar_head(mvar) = tso;
1754 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1755 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1758 #if defined(THREADED_RTS)
1759 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1761 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1763 jump %ENTRY_CODE(Sp(0));
1767 /* No further takes, the MVar is now full. */
1768 StgMVar_value(mvar) = R2;
1770 #if defined(THREADED_RTS)
1771 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1773 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1775 jump %ENTRY_CODE(Sp(0));
1778 /* ToDo: yield afterward for better communication performance? */
1786 /* args: R1 = MVar, R2 = value */
1789 #if defined(THREADED_RTS)
1790 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
1792 info = GET_INFO(mvar);
1795 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1796 #if defined(THREADED_RTS)
1797 unlockClosure(mvar, info);
1802 if (info == stg_MVAR_CLEAN_info) {
1803 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1806 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1808 /* There are takeMVar(s) waiting: wake up the first one
1810 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1812 /* actually perform the takeMVar */
1813 tso = StgMVar_head(mvar);
1814 PerformTake(tso, R2);
1817 #if defined(GRAN) || defined(PAR)
1818 /* ToDo: check 2nd arg (mvar) is right */
1819 ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
1820 StgMVar_head(mvar) = tso;
1822 ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
1823 StgMVar_head(mvar) = tso;
1826 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1827 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1830 #if defined(THREADED_RTS)
1831 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1833 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1838 /* No further takes, the MVar is now full. */
1839 StgMVar_value(mvar) = R2;
1841 #if defined(THREADED_RTS)
1842 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1844 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1849 /* ToDo: yield afterward for better communication performance? */
1853 /* -----------------------------------------------------------------------------
1854 Stable pointer primitives
1855 ------------------------------------------------------------------------- */
1857 makeStableNamezh_fast
1861 ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, makeStableNamezh_fast );
1863 (index) = foreign "C" lookupStableName(R1 "ptr") [];
1865 /* Is there already a StableName for this heap object?
1866 * stable_ptr_table is a pointer to an array of snEntry structs.
1868 if ( snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) == NULL ) {
1869 sn_obj = Hp - SIZEOF_StgStableName + WDS(1);
1870 SET_HDR(sn_obj, stg_STABLE_NAME_info, W_[CCCS]);
1871 StgStableName_sn(sn_obj) = index;
1872 snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) = sn_obj;
1874 sn_obj = snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry);
1881 makeStablePtrzh_fast
1885 MAYBE_GC(R1_PTR, makeStablePtrzh_fast);
1886 ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
1890 deRefStablePtrzh_fast
1892 /* Args: R1 = the stable ptr */
1895 r = snEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_snEntry);
1899 /* -----------------------------------------------------------------------------
1900 Bytecode object primitives
1901 ------------------------------------------------------------------------- */
1911 W_ bco, bitmap_arr, bytes, words;
1915 words = BYTES_TO_WDS(SIZEOF_StgBCO) + StgArrWords_words(bitmap_arr);
1918 ALLOC_PRIM( bytes, R1_PTR&R2_PTR&R3_PTR&R5_PTR, newBCOzh_fast );
1920 bco = Hp - bytes + WDS(1);
1921 SET_HDR(bco, stg_BCO_info, W_[CCCS]);
1923 StgBCO_instrs(bco) = R1;
1924 StgBCO_literals(bco) = R2;
1925 StgBCO_ptrs(bco) = R3;
1926 StgBCO_arity(bco) = HALF_W_(R4);
1927 StgBCO_size(bco) = HALF_W_(words);
1929 // Copy the arity/bitmap info into the BCO
1933 if (i < StgArrWords_words(bitmap_arr)) {
1934 StgBCO_bitmap(bco,i) = StgArrWords_payload(bitmap_arr,i);
1945 // R1 = the BCO# for the AP
1949 // This function is *only* used to wrap zero-arity BCOs in an
1950 // updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
1951 // saturated and always points directly to a FUN or BCO.
1952 ASSERT(%INFO_TYPE(%GET_STD_INFO(R1)) == HALF_W_(BCO) &&
1953 StgBCO_arity(R1) == HALF_W_(0));
1955 HP_CHK_GEN_TICKY(SIZEOF_StgAP, R1_PTR, mkApUpd0zh_fast);
1956 TICK_ALLOC_UP_THK(0, 0);
1957 CCCS_ALLOC(SIZEOF_StgAP);
1959 ap = Hp - SIZEOF_StgAP + WDS(1);
1960 SET_HDR(ap, stg_AP_info, W_[CCCS]);
1962 StgAP_n_args(ap) = HALF_W_(0);
1968 unpackClosurezh_fast
1970 /* args: R1 = closure to analyze */
1971 // TODO: Consider the absence of ptrs or nonptrs as a special case ?
1973 W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
1974 info = %GET_STD_INFO(UNTAG(R1));
1976 // Some closures have non-standard layout, so we omit those here.
1978 type = TO_W_(%INFO_TYPE(info));
1979 switch [0 .. N_CLOSURE_TYPES] type {
1980 case THUNK_SELECTOR : {
1985 case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
1986 THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
1992 ptrs = TO_W_(%INFO_PTRS(info));
1993 nptrs = TO_W_(%INFO_NPTRS(info));
1998 W_ ptrs_arr_sz, nptrs_arr_sz;
1999 nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
2000 ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs);
2002 ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, unpackClosurezh_fast);
2007 ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
2008 nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
2010 SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
2011 StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
2015 W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
2020 SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
2021 StgArrWords_words(nptrs_arr) = nptrs;
2025 W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
2029 RET_NPP(info, ptrs_arr, nptrs_arr);
2032 /* -----------------------------------------------------------------------------
2033 Thread I/O blocking primitives
2034 -------------------------------------------------------------------------- */
2036 /* Add a thread to the end of the blocked queue. (C-- version of the C
2037 * macro in Schedule.h).
2039 #define APPEND_TO_BLOCKED_QUEUE(tso) \
2040 ASSERT(StgTSO_link(tso) == END_TSO_QUEUE); \
2041 if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
2042 W_[blocked_queue_hd] = tso; \
2044 StgTSO_link(W_[blocked_queue_tl]) = tso; \
2046 W_[blocked_queue_tl] = tso;
2052 foreign "C" barf("waitRead# on threaded RTS") never returns;
2055 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2056 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2057 StgTSO_block_info(CurrentTSO) = R1;
2058 // No locking - we're not going to use this interface in the
2059 // threaded RTS anyway.
2060 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2061 jump stg_block_noregs;
2069 foreign "C" barf("waitWrite# on threaded RTS") never returns;
2072 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2073 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2074 StgTSO_block_info(CurrentTSO) = R1;
2075 // No locking - we're not going to use this interface in the
2076 // threaded RTS anyway.
2077 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2078 jump stg_block_noregs;
2083 STRING(stg_delayzh_malloc_str, "delayzh_fast")
2086 #ifdef mingw32_HOST_OS
2094 foreign "C" barf("delay# on threaded RTS") never returns;
2097 /* args: R1 (microsecond delay amount) */
2098 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2099 StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
2101 #ifdef mingw32_HOST_OS
2103 /* could probably allocate this on the heap instead */
2104 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2105 stg_delayzh_malloc_str);
2106 (reqID) = foreign "C" addDelayRequest(R1);
2107 StgAsyncIOResult_reqID(ares) = reqID;
2108 StgAsyncIOResult_len(ares) = 0;
2109 StgAsyncIOResult_errCode(ares) = 0;
2110 StgTSO_block_info(CurrentTSO) = ares;
2112 /* Having all async-blocked threads reside on the blocked_queue
2113 * simplifies matters, so change the status to OnDoProc put the
2114 * delayed thread on the blocked_queue.
2116 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2117 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2118 jump stg_block_async_void;
2124 (time) = foreign "C" getourtimeofday() [R1];
2125 divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags));
2129 divisor = divisor * 1000;
2130 target = ((R1 + divisor - 1) / divisor) /* divide rounding up */
2131 + time + 1; /* Add 1 as getourtimeofday rounds down */
2132 StgTSO_block_info(CurrentTSO) = target;
2134 /* Insert the new thread in the sleeping queue. */
2136 t = W_[sleeping_queue];
2138 if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
2144 StgTSO_link(CurrentTSO) = t;
2146 W_[sleeping_queue] = CurrentTSO;
2148 StgTSO_link(prev) = CurrentTSO;
2150 jump stg_block_noregs;
2152 #endif /* !THREADED_RTS */
2156 #ifdef mingw32_HOST_OS
2157 STRING(stg_asyncReadzh_malloc_str, "asyncReadzh_fast")
2164 foreign "C" barf("asyncRead# on threaded RTS") never returns;
2167 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2168 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2169 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2171 /* could probably allocate this on the heap instead */
2172 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2173 stg_asyncReadzh_malloc_str)
2175 (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
2176 StgAsyncIOResult_reqID(ares) = reqID;
2177 StgAsyncIOResult_len(ares) = 0;
2178 StgAsyncIOResult_errCode(ares) = 0;
2179 StgTSO_block_info(CurrentTSO) = ares;
2180 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2181 jump stg_block_async;
2185 STRING(stg_asyncWritezh_malloc_str, "asyncWritezh_fast")
2192 foreign "C" barf("asyncWrite# on threaded RTS") never returns;
2195 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2196 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2197 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2199 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2200 stg_asyncWritezh_malloc_str)
2202 (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
2204 StgAsyncIOResult_reqID(ares) = reqID;
2205 StgAsyncIOResult_len(ares) = 0;
2206 StgAsyncIOResult_errCode(ares) = 0;
2207 StgTSO_block_info(CurrentTSO) = ares;
2208 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2209 jump stg_block_async;
2213 STRING(stg_asyncDoProczh_malloc_str, "asyncDoProczh_fast")
2220 foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
2223 /* args: R1 = proc, R2 = param */
2224 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2225 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2227 /* could probably allocate this on the heap instead */
2228 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2229 stg_asyncDoProczh_malloc_str)
2231 (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
2232 StgAsyncIOResult_reqID(ares) = reqID;
2233 StgAsyncIOResult_len(ares) = 0;
2234 StgAsyncIOResult_errCode(ares) = 0;
2235 StgTSO_block_info(CurrentTSO) = ares;
2236 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2237 jump stg_block_async;
2242 // noDuplicate# tries to ensure that none of the thunks under
2243 // evaluation by the current thread are also under evaluation by
2244 // another thread. It relies on *both* threads doing noDuplicate#;
2245 // the second one will get blocked if they are duplicating some work.
2248 SAVE_THREAD_STATE();
2249 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2250 foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
2252 if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
2253 jump stg_threadFinished;
2255 LOAD_THREAD_STATE();
2256 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2257 jump %ENTRY_CODE(Sp(0));
2261 getApStackValzh_fast
2263 W_ ap_stack, offset, val, ok;
2265 /* args: R1 = AP_STACK, R2 = offset */
2269 if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
2271 val = StgAP_STACK_payload(ap_stack,offset);