1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Out-of-line primitive operations
7 * This file contains the implementations of all the primitive
8 * operations ("primops") which are not expanded inline. See
9 * ghc/compiler/prelude/primops.txt.pp for a list of all the primops;
10 * this file contains code for most of those with the attribute
13 * Entry convention: the entry convention for a primop is that all the
14 * args are in Stg registers (R1, R2, etc.). This is to make writing
15 * the primops easier. (see compiler/codeGen/CgCallConv.hs).
17 * Return convention: results from a primop are generally returned
18 * using the ordinary unboxed tuple return convention. The C-- parser
19 * implements the RET_xxxx() macros to perform unboxed-tuple returns
20 * based on the prevailing return convention.
22 * This file is written in a subset of C--, extended with various
23 * features specific to GHC. It is compiled by GHC directly. For the
24 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
26 * ---------------------------------------------------------------------------*/
31 #ifndef mingw32_HOST_OS
41 import __gmpz_tdiv_qr;
42 import __gmpz_fdiv_qr;
43 import __gmpz_divexact;
49 import pthread_mutex_lock;
50 import pthread_mutex_unlock;
52 import base_ControlziExceptionziBase_nestedAtomically_closure;
53 import EnterCriticalSection;
54 import LeaveCriticalSection;
56 /*-----------------------------------------------------------------------------
59 Basically just new*Array - the others are all inline macros.
61 The size arg is always passed in R1, and the result returned in R1.
63 The slow entry point is for returning from a heap check, the saved
64 size argument must be re-loaded from the stack.
65 -------------------------------------------------------------------------- */
67 /* for objects that are *less* than the size of a word, make sure we
68 * round up to the nearest word for the size of the array.
73 W_ words, payload_words, n, p;
74 MAYBE_GC(NO_PTRS,newByteArrayzh_fast);
76 payload_words = ROUNDUP_BYTES_TO_WDS(n);
77 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
78 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr",words) [];
79 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
80 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
81 StgArrWords_words(p) = payload_words;
85 newPinnedByteArrayzh_fast
87 W_ words, payload_words, n, p;
89 MAYBE_GC(NO_PTRS,newPinnedByteArrayzh_fast);
91 payload_words = ROUNDUP_BYTES_TO_WDS(n);
93 // We want an 8-byte aligned array. allocatePinned() gives us
94 // 8-byte aligned memory by default, but we want to align the
95 // *goods* inside the ArrWords object, so we have to check the
96 // size of the ArrWords header and adjust our size accordingly.
97 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
98 if ((SIZEOF_StgArrWords & 7) != 0) {
102 ("ptr" p) = foreign "C" allocatePinned(words) [];
103 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
105 // Again, if the ArrWords header isn't a multiple of 8 bytes, we
106 // have to push the object forward one word so that the goods
107 // fall on an 8-byte boundary.
108 if ((SIZEOF_StgArrWords & 7) != 0) {
112 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
113 StgArrWords_words(p) = payload_words;
119 W_ words, n, init, arr, p;
120 /* Args: R1 = words, R2 = initialisation value */
123 MAYBE_GC(R2_PTR,newArrayzh_fast);
125 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + n;
126 ("ptr" arr) = foreign "C" allocateLocal(MyCapability() "ptr",words) [R2];
127 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
129 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
130 StgMutArrPtrs_ptrs(arr) = n;
132 // Initialise all elements of the the array with the value in R2
134 p = arr + SIZEOF_StgMutArrPtrs;
136 if (p < arr + WDS(words)) {
145 unsafeThawArrayzh_fast
147 // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
149 // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
150 // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
151 // it on the mutable list for the GC to remove (removing something from
152 // the mutable list is not easy, because the mut_list is only singly-linked).
154 // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
155 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
156 // to indicate that it is still on the mutable list.
158 // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
159 // either it is on a mut_list, or it isn't. We adopt the convention that
160 // the closure type is MUT_ARR_PTRS_FROZEN0 if it is on the mutable list,
161 // and MUT_ARR_PTRS_FROZEN otherwise. In fact it wouldn't matter if
162 // we put it on the mutable list more than once, but it would get scavenged
163 // multiple times during GC, which would be unnecessarily slow.
165 if (StgHeader_info(R1) != stg_MUT_ARR_PTRS_FROZEN0_info) {
166 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
167 recordMutable(R1, R1);
168 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
171 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
176 /* -----------------------------------------------------------------------------
178 -------------------------------------------------------------------------- */
183 /* Args: R1 = initialisation value */
185 ALLOC_PRIM( SIZEOF_StgMutVar, R1_PTR, newMutVarzh_fast);
187 mv = Hp - SIZEOF_StgMutVar + WDS(1);
188 SET_HDR(mv,stg_MUT_VAR_DIRTY_info,W_[CCCS]);
189 StgMutVar_var(mv) = R1;
194 atomicModifyMutVarzh_fast
196 W_ mv, f, z, x, y, r, h;
197 /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */
199 /* If x is the current contents of the MutVar#, then
200 We want to make the new contents point to
204 and the return value is
208 obviously we can share (f x).
210 z = [stg_ap_2 f x] (max (HS + 2) MIN_UPD_SIZE)
211 y = [stg_sel_0 z] (max (HS + 1) MIN_UPD_SIZE)
212 r = [stg_sel_1 z] (max (HS + 1) MIN_UPD_SIZE)
216 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
217 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),WDS(MIN_UPD_SIZE-1))
219 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(1))
220 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),0)
224 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
225 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),WDS(MIN_UPD_SIZE-2))
227 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(2))
228 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),0)
231 #define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
233 HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, atomicModifyMutVarzh_fast);
238 TICK_ALLOC_THUNK_2();
239 CCCS_ALLOC(THUNK_2_SIZE);
240 z = Hp - THUNK_2_SIZE + WDS(1);
241 SET_HDR(z, stg_ap_2_upd_info, W_[CCCS]);
242 LDV_RECORD_CREATE(z);
243 StgThunk_payload(z,0) = f;
245 TICK_ALLOC_THUNK_1();
246 CCCS_ALLOC(THUNK_1_SIZE);
247 y = z - THUNK_1_SIZE;
248 SET_HDR(y, stg_sel_0_upd_info, W_[CCCS]);
249 LDV_RECORD_CREATE(y);
250 StgThunk_payload(y,0) = z;
252 TICK_ALLOC_THUNK_1();
253 CCCS_ALLOC(THUNK_1_SIZE);
254 r = y - THUNK_1_SIZE;
255 SET_HDR(r, stg_sel_1_upd_info, W_[CCCS]);
256 LDV_RECORD_CREATE(r);
257 StgThunk_payload(r,0) = z;
260 x = StgMutVar_var(mv);
261 StgThunk_payload(z,1) = x;
263 (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y) [];
264 if (h != x) { goto retry; }
266 StgMutVar_var(mv) = y;
269 if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
270 foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") [];
276 /* -----------------------------------------------------------------------------
277 Weak Pointer Primitives
278 -------------------------------------------------------------------------- */
280 STRING(stg_weak_msg,"New weak pointer at %p\n")
286 R3 = finalizer (or NULL)
291 R3 = stg_NO_FINALIZER_closure;
294 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, mkWeakzh_fast );
296 w = Hp - SIZEOF_StgWeak + WDS(1);
297 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
300 StgWeak_value(w) = R2;
301 StgWeak_finalizer(w) = R3;
303 StgWeak_link(w) = W_[weak_ptr_list];
304 W_[weak_ptr_list] = w;
306 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
321 if (GET_INFO(w) == stg_DEAD_WEAK_info) {
322 RET_NP(0,stg_NO_FINALIZER_closure);
328 // A weak pointer is inherently used, so we do not need to call
329 // LDV_recordDead_FILL_SLOP_DYNAMIC():
330 // LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)w);
331 // or, LDV_recordDead():
332 // LDV_recordDead((StgClosure *)w, sizeofW(StgWeak) - sizeofW(StgProfHeader));
333 // Furthermore, when PROFILING is turned on, dead weak pointers are exactly as
334 // large as weak pointers, so there is no need to fill the slop, either.
335 // See stg_DEAD_WEAK_info in StgMiscClosures.hc.
339 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
341 SET_INFO(w,stg_DEAD_WEAK_info);
342 LDV_RECORD_CREATE(w);
344 f = StgWeak_finalizer(w);
345 StgDeadWeak_link(w) = StgWeak_link(w);
347 /* return the finalizer */
348 if (f == stg_NO_FINALIZER_closure) {
349 RET_NP(0,stg_NO_FINALIZER_closure);
361 if (GET_INFO(w) == stg_WEAK_info) {
363 val = StgWeak_value(w);
371 /* -----------------------------------------------------------------------------
372 Arbitrary-precision Integer operations.
374 There are some assumptions in this code that mp_limb_t == W_. This is
375 the case for all the platforms that GHC supports, currently.
376 -------------------------------------------------------------------------- */
380 /* arguments: R1 = Int# */
382 W_ val, s, p; /* to avoid aliasing */
385 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, int2Integerzh_fast );
387 p = Hp - SIZEOF_StgArrWords;
388 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
389 StgArrWords_words(p) = 1;
391 /* mpz_set_si is inlined here, makes things simpler */
404 /* returns (# size :: Int#,
413 /* arguments: R1 = Word# */
415 W_ val, s, p; /* to avoid aliasing */
419 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, word2Integerzh_fast);
421 p = Hp - SIZEOF_StgArrWords;
422 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
423 StgArrWords_words(p) = 1;
432 /* returns (# size :: Int#,
433 data :: ByteArray# #)
440 * 'long long' primops for converting to/from Integers.
443 #ifdef SUPPORT_LONG_LONGS
445 int64ToIntegerzh_fast
447 /* arguments: L1 = Int64# */
450 W_ hi, lo, s, neg, words_needed, p;
455 hi = TO_W_(val >> 32);
458 if ( hi == 0 || (hi == 0xFFFFFFFF && lo != 0) ) {
459 // minimum is one word
465 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
466 NO_PTRS, int64ToIntegerzh_fast );
468 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
469 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
470 StgArrWords_words(p) = words_needed;
482 if ( words_needed == 2 ) {
490 } else /* val==0 */ {
498 /* returns (# size :: Int#,
499 data :: ByteArray# #)
503 word64ToIntegerzh_fast
505 /* arguments: L1 = Word64# */
508 W_ hi, lo, s, words_needed, p;
511 hi = TO_W_(val >> 32);
520 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
521 NO_PTRS, word64ToIntegerzh_fast );
523 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
524 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
525 StgArrWords_words(p) = words_needed;
535 } else /* val==0 */ {
540 /* returns (# size :: Int#,
541 data :: ByteArray# #)
548 #endif /* SUPPORT_LONG_LONGS */
550 /* ToDo: this is shockingly inefficient */
555 bits8 [SIZEOF_MP_INT];
560 bits8 [SIZEOF_MP_INT];
565 bits8 [SIZEOF_MP_INT];
570 bits8 [SIZEOF_MP_INT];
575 #define FETCH_MP_TEMP(X) \
577 X = BaseReg + (OFFSET_StgRegTable_r ## X);
579 #define FETCH_MP_TEMP(X) /* Nothing */
582 #define GMP_TAKE2_RET1(name,mp_fun) \
587 FETCH_MP_TEMP(mp_tmp1); \
588 FETCH_MP_TEMP(mp_tmp2); \
589 FETCH_MP_TEMP(mp_result1) \
590 FETCH_MP_TEMP(mp_result2); \
592 /* call doYouWantToGC() */ \
593 MAYBE_GC(R2_PTR & R4_PTR, name); \
600 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
601 MP_INT__mp_size(mp_tmp1) = (s1); \
602 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
603 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
604 MP_INT__mp_size(mp_tmp2) = (s2); \
605 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
607 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
609 /* Perform the operation */ \
610 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
612 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
613 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
616 #define GMP_TAKE1_RET1(name,mp_fun) \
621 FETCH_MP_TEMP(mp_tmp1); \
622 FETCH_MP_TEMP(mp_result1) \
624 /* call doYouWantToGC() */ \
625 MAYBE_GC(R2_PTR, name); \
630 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
631 MP_INT__mp_size(mp_tmp1) = (s1); \
632 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
634 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
636 /* Perform the operation */ \
637 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr") []; \
639 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
640 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
643 #define GMP_TAKE2_RET2(name,mp_fun) \
648 FETCH_MP_TEMP(mp_tmp1); \
649 FETCH_MP_TEMP(mp_tmp2); \
650 FETCH_MP_TEMP(mp_result1) \
651 FETCH_MP_TEMP(mp_result2) \
653 /* call doYouWantToGC() */ \
654 MAYBE_GC(R2_PTR & R4_PTR, name); \
661 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
662 MP_INT__mp_size(mp_tmp1) = (s1); \
663 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
664 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
665 MP_INT__mp_size(mp_tmp2) = (s2); \
666 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
668 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
669 foreign "C" __gmpz_init(mp_result2 "ptr") []; \
671 /* Perform the operation */ \
672 foreign "C" mp_fun(mp_result1 "ptr",mp_result2 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
674 RET_NPNP(TO_W_(MP_INT__mp_size(mp_result1)), \
675 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords, \
676 TO_W_(MP_INT__mp_size(mp_result2)), \
677 MP_INT__mp_d(mp_result2) - SIZEOF_StgArrWords); \
680 GMP_TAKE2_RET1(plusIntegerzh_fast, __gmpz_add)
681 GMP_TAKE2_RET1(minusIntegerzh_fast, __gmpz_sub)
682 GMP_TAKE2_RET1(timesIntegerzh_fast, __gmpz_mul)
683 GMP_TAKE2_RET1(gcdIntegerzh_fast, __gmpz_gcd)
684 GMP_TAKE2_RET1(quotIntegerzh_fast, __gmpz_tdiv_q)
685 GMP_TAKE2_RET1(remIntegerzh_fast, __gmpz_tdiv_r)
686 GMP_TAKE2_RET1(divExactIntegerzh_fast, __gmpz_divexact)
687 GMP_TAKE2_RET1(andIntegerzh_fast, __gmpz_and)
688 GMP_TAKE2_RET1(orIntegerzh_fast, __gmpz_ior)
689 GMP_TAKE2_RET1(xorIntegerzh_fast, __gmpz_xor)
690 GMP_TAKE1_RET1(complementIntegerzh_fast, __gmpz_com)
692 GMP_TAKE2_RET2(quotRemIntegerzh_fast, __gmpz_tdiv_qr)
693 GMP_TAKE2_RET2(divModIntegerzh_fast, __gmpz_fdiv_qr)
697 mp_tmp_w: W_; // NB. mp_tmp_w is really an here mp_limb_t
703 /* R1 = the first Int#; R2 = the second Int# */
705 FETCH_MP_TEMP(mp_tmp_w);
708 (r) = foreign "C" __gmpn_gcd_1(mp_tmp_w "ptr", 1, R2) [];
711 /* Result parked in R1, return via info-pointer at TOS */
712 jump %ENTRY_CODE(Sp(0));
718 /* R1 = s1; R2 = d1; R3 = the int */
720 (s1) = foreign "C" __gmpn_gcd_1( BYTE_ARR_CTS(R2) "ptr", R1, R3) [];
723 /* Result parked in R1, return via info-pointer at TOS */
724 jump %ENTRY_CODE(Sp(0));
730 /* R1 = s1; R2 = d1; R3 = the int */
731 W_ usize, vsize, v_digit, u_digit;
737 // paraphrased from __gmpz_cmp_si() in the GMP sources
738 if (%gt(v_digit,0)) {
741 if (%lt(v_digit,0)) {
747 if (usize != vsize) {
749 jump %ENTRY_CODE(Sp(0));
754 jump %ENTRY_CODE(Sp(0));
757 u_digit = W_[BYTE_ARR_CTS(R2)];
759 if (u_digit == v_digit) {
761 jump %ENTRY_CODE(Sp(0));
764 if (%gtu(u_digit,v_digit)) { // NB. unsigned: these are mp_limb_t's
770 jump %ENTRY_CODE(Sp(0));
775 /* R1 = s1; R2 = d1; R3 = s2; R4 = d2 */
776 W_ usize, vsize, size, up, vp;
779 // paraphrased from __gmpz_cmp() in the GMP sources
783 if (usize != vsize) {
785 jump %ENTRY_CODE(Sp(0));
790 jump %ENTRY_CODE(Sp(0));
793 if (%lt(usize,0)) { // NB. not <, which is unsigned
799 up = BYTE_ARR_CTS(R2);
800 vp = BYTE_ARR_CTS(R4);
802 (cmp) = foreign "C" __gmpn_cmp(up "ptr", vp "ptr", size) [];
804 if (cmp == 0 :: CInt) {
806 jump %ENTRY_CODE(Sp(0));
809 if (%lt(cmp,0 :: CInt) == %lt(usize,0)) {
814 /* Result parked in R1, return via info-pointer at TOS */
815 jump %ENTRY_CODE(Sp(0));
827 r = W_[R2 + SIZEOF_StgArrWords];
832 /* Result parked in R1, return via info-pointer at TOS */
834 jump %ENTRY_CODE(Sp(0));
846 r = W_[R2 + SIZEOF_StgArrWords];
851 /* Result parked in R1, return via info-pointer at TOS */
853 jump %ENTRY_CODE(Sp(0));
860 FETCH_MP_TEMP(mp_tmp1);
861 FETCH_MP_TEMP(mp_tmp_w);
863 /* arguments: F1 = Float# */
866 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, decodeFloatzh_fast );
868 /* Be prepared to tell Lennart-coded __decodeFloat
869 where mantissa._mp_d can be put (it does not care about the rest) */
870 p = Hp - SIZEOF_StgArrWords;
871 SET_HDR(p,stg_ARR_WORDS_info,W_[CCCS]);
872 StgArrWords_words(p) = 1;
873 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
875 /* Perform the operation */
876 foreign "C" __decodeFloat(mp_tmp1 "ptr",mp_tmp_w "ptr" ,arg) [];
878 /* returns: (Int# (expn), Int#, ByteArray#) */
879 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
882 decodeFloatzuIntzh_fast
886 FETCH_MP_TEMP(mp_tmp1);
887 FETCH_MP_TEMP(mp_tmp_w);
889 /* arguments: F1 = Float# */
892 /* Perform the operation */
893 foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
895 /* returns: (Int# (mantissa), Int# (exponent)) */
896 RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
899 #define DOUBLE_MANTISSA_SIZE SIZEOF_DOUBLE
900 #define ARR_SIZE (SIZEOF_StgArrWords + DOUBLE_MANTISSA_SIZE)
906 FETCH_MP_TEMP(mp_tmp1);
907 FETCH_MP_TEMP(mp_tmp_w);
909 /* arguments: D1 = Double# */
912 ALLOC_PRIM( ARR_SIZE, NO_PTRS, decodeDoublezh_fast );
914 /* Be prepared to tell Lennart-coded __decodeDouble
915 where mantissa.d can be put (it does not care about the rest) */
916 p = Hp - ARR_SIZE + WDS(1);
917 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
918 StgArrWords_words(p) = BYTES_TO_WDS(DOUBLE_MANTISSA_SIZE);
919 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
921 /* Perform the operation */
922 foreign "C" __decodeDouble(mp_tmp1 "ptr", mp_tmp_w "ptr",arg) [];
924 /* returns: (Int# (expn), Int#, ByteArray#) */
925 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
928 decodeDoublezu2Intzh_fast
932 FETCH_MP_TEMP(mp_tmp1);
933 FETCH_MP_TEMP(mp_tmp2);
934 FETCH_MP_TEMP(mp_result1);
935 FETCH_MP_TEMP(mp_result2);
937 /* arguments: D1 = Double# */
940 /* Perform the operation */
941 foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
942 mp_result1 "ptr", mp_result2 "ptr",
946 (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
947 RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
950 /* -----------------------------------------------------------------------------
951 * Concurrency primitives
952 * -------------------------------------------------------------------------- */
956 /* args: R1 = closure to spark */
958 MAYBE_GC(R1_PTR, forkzh_fast);
964 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
965 RtsFlags_GcFlags_initialStkSize(RtsFlags),
968 /* start blocked if the current thread is blocked */
969 StgTSO_flags(threadid) =
970 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
971 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
973 foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
975 // switch at the earliest opportunity
976 Capability_context_switch(MyCapability()) = 1 :: CInt;
983 /* args: R1 = cpu, R2 = closure to spark */
985 MAYBE_GC(R2_PTR, forkOnzh_fast);
993 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
994 RtsFlags_GcFlags_initialStkSize(RtsFlags),
997 /* start blocked if the current thread is blocked */
998 StgTSO_flags(threadid) =
999 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
1000 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
1002 foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
1004 // switch at the earliest opportunity
1005 Capability_context_switch(MyCapability()) = 1 :: CInt;
1012 jump stg_yield_noregs;
1027 foreign "C" labelThread(R1 "ptr", R2 "ptr") [];
1029 jump %ENTRY_CODE(Sp(0));
1032 isCurrentThreadBoundzh_fast
1036 (r) = foreign "C" isThreadBound(CurrentTSO) [];
1042 /* args: R1 :: ThreadId# */
1050 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
1051 tso = StgTSO__link(tso);
1055 what_next = TO_W_(StgTSO_what_next(tso));
1056 why_blocked = TO_W_(StgTSO_why_blocked(tso));
1057 // Note: these two reads are not atomic, so they might end up
1058 // being inconsistent. It doesn't matter, since we
1059 // only return one or the other. If we wanted to return the
1060 // contents of block_info too, then we'd have to do some synchronisation.
1062 if (what_next == ThreadComplete) {
1063 ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
1065 if (what_next == ThreadKilled) {
1074 /* -----------------------------------------------------------------------------
1076 * -------------------------------------------------------------------------- */
1080 // Catch retry frame ------------------------------------------------------------
1082 INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
1083 #if defined(PROFILING)
1084 W_ unused1, W_ unused2,
1086 W_ unused3, "ptr" W_ unused4, "ptr" W_ unused5)
1088 W_ r, frame, trec, outer;
1091 trec = StgTSO_trec(CurrentTSO);
1092 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1093 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1095 /* Succeeded (either first branch or second branch) */
1096 StgTSO_trec(CurrentTSO) = outer;
1097 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1098 jump %ENTRY_CODE(Sp(SP_OFF));
1100 /* Did not commit: re-execute */
1102 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1103 StgTSO_trec(CurrentTSO) = new_trec;
1104 if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1105 R1 = StgCatchRetryFrame_alt_code(frame);
1107 R1 = StgCatchRetryFrame_first_code(frame);
1114 // Atomically frame ------------------------------------------------------------
1116 INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
1117 #if defined(PROFILING)
1118 W_ unused1, W_ unused2,
1120 "ptr" W_ unused3, "ptr" W_ unused4)
1122 W_ frame, trec, valid, next_invariant, q, outer;
1125 trec = StgTSO_trec(CurrentTSO);
1126 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1128 if (outer == NO_TREC) {
1129 /* First time back at the atomically frame -- pick up invariants */
1130 ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
1131 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1134 /* Second/subsequent time back at the atomically frame -- abort the
1135 * tx that's checking the invariant and move on to the next one */
1136 StgTSO_trec(CurrentTSO) = outer;
1137 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1138 StgInvariantCheckQueue_my_execution(q) = trec;
1139 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1140 /* Don't free trec -- it's linked from q and will be stashed in the
1141 * invariant if we eventually commit. */
1142 q = StgInvariantCheckQueue_next_queue_entry(q);
1143 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1147 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1149 if (q != END_INVARIANT_CHECK_QUEUE) {
1150 /* We can't commit yet: another invariant to check */
1151 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
1152 StgTSO_trec(CurrentTSO) = trec;
1154 next_invariant = StgInvariantCheckQueue_invariant(q);
1155 R1 = StgAtomicInvariant_code(next_invariant);
1160 /* We've got no more invariants to check, try to commit */
1161 (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
1163 /* Transaction was valid: commit succeeded */
1164 StgTSO_trec(CurrentTSO) = NO_TREC;
1165 Sp = Sp + SIZEOF_StgAtomicallyFrame;
1166 jump %ENTRY_CODE(Sp(SP_OFF));
1168 /* Transaction was not valid: try again */
1169 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1170 StgTSO_trec(CurrentTSO) = trec;
1171 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1172 R1 = StgAtomicallyFrame_code(frame);
1178 INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
1179 #if defined(PROFILING)
1180 W_ unused1, W_ unused2,
1182 "ptr" W_ unused3, "ptr" W_ unused4)
1184 W_ frame, trec, valid;
1188 /* The TSO is currently waiting: should we stop waiting? */
1189 (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
1191 /* Previous attempt is still valid: no point trying again yet */
1192 jump stg_block_noregs;
1194 /* Previous attempt is no longer valid: try again */
1195 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1196 StgTSO_trec(CurrentTSO) = trec;
1197 StgHeader_info(frame) = stg_atomically_frame_info;
1198 R1 = StgAtomicallyFrame_code(frame);
1203 // STM catch frame --------------------------------------------------------------
1207 /* Catch frames are very similar to update frames, but when entering
1208 * one we just pop the frame off the stack and perform the correct
1209 * kind of return to the activation record underneath us on the stack.
1212 INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
1213 #if defined(PROFILING)
1214 W_ unused1, W_ unused2,
1216 "ptr" W_ unused3, "ptr" W_ unused4)
1218 W_ r, frame, trec, outer;
1220 trec = StgTSO_trec(CurrentTSO);
1221 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1222 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1224 /* Commit succeeded */
1225 StgTSO_trec(CurrentTSO) = outer;
1226 Sp = Sp + SIZEOF_StgCatchSTMFrame;
1231 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1232 StgTSO_trec(CurrentTSO) = new_trec;
1233 R1 = StgCatchSTMFrame_code(frame);
1239 // Primop definition ------------------------------------------------------------
1247 // stmStartTransaction may allocate
1248 MAYBE_GC (R1_PTR, atomicallyzh_fast);
1250 /* Args: R1 = m :: STM a */
1251 STK_CHK_GEN(SIZEOF_StgAtomicallyFrame + WDS(1), R1_PTR, atomicallyzh_fast);
1253 old_trec = StgTSO_trec(CurrentTSO);
1255 /* Nested transactions are not allowed; raise an exception */
1256 if (old_trec != NO_TREC) {
1257 R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
1261 /* Set up the atomically frame */
1262 Sp = Sp - SIZEOF_StgAtomicallyFrame;
1265 SET_HDR(frame,stg_atomically_frame_info, W_[CCCS]);
1266 StgAtomicallyFrame_code(frame) = R1;
1267 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1269 /* Start the memory transcation */
1270 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
1271 StgTSO_trec(CurrentTSO) = new_trec;
1273 /* Apply R1 to the realworld token */
1282 /* Args: R1 :: STM a */
1283 /* Args: R2 :: Exception -> STM a */
1284 STK_CHK_GEN(SIZEOF_StgCatchSTMFrame + WDS(1), R1_PTR & R2_PTR, catchSTMzh_fast);
1286 /* Set up the catch frame */
1287 Sp = Sp - SIZEOF_StgCatchSTMFrame;
1290 SET_HDR(frame, stg_catch_stm_frame_info, W_[CCCS]);
1291 StgCatchSTMFrame_handler(frame) = R2;
1292 StgCatchSTMFrame_code(frame) = R1;
1294 /* Start a nested transaction to run the body of the try block in */
1297 cur_trec = StgTSO_trec(CurrentTSO);
1298 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
1299 StgTSO_trec(CurrentTSO) = new_trec;
1301 /* Apply R1 to the realworld token */
1312 // stmStartTransaction may allocate
1313 MAYBE_GC (R1_PTR & R2_PTR, catchRetryzh_fast);
1315 /* Args: R1 :: STM a */
1316 /* Args: R2 :: STM a */
1317 STK_CHK_GEN(SIZEOF_StgCatchRetryFrame + WDS(1), R1_PTR & R2_PTR, catchRetryzh_fast);
1319 /* Start a nested transaction within which to run the first code */
1320 trec = StgTSO_trec(CurrentTSO);
1321 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
1322 StgTSO_trec(CurrentTSO) = new_trec;
1324 /* Set up the catch-retry frame */
1325 Sp = Sp - SIZEOF_StgCatchRetryFrame;
1328 SET_HDR(frame, stg_catch_retry_frame_info, W_[CCCS]);
1329 StgCatchRetryFrame_running_alt_code(frame) = 0 :: CInt; // false;
1330 StgCatchRetryFrame_first_code(frame) = R1;
1331 StgCatchRetryFrame_alt_code(frame) = R2;
1333 /* Apply R1 to the realworld token */
1346 MAYBE_GC (NO_PTRS, retryzh_fast); // STM operations may allocate
1348 // Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
1350 StgTSO_sp(CurrentTSO) = Sp;
1351 (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
1352 Sp = StgTSO_sp(CurrentTSO);
1354 trec = StgTSO_trec(CurrentTSO);
1355 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1357 if (frame_type == CATCH_RETRY_FRAME) {
1358 // The retry reaches a CATCH_RETRY_FRAME before the atomic frame
1359 ASSERT(outer != NO_TREC);
1360 // Abort the transaction attempting the current branch
1361 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1362 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1363 if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1364 // Retry in the first branch: try the alternative
1365 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1366 StgTSO_trec(CurrentTSO) = trec;
1367 StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
1368 R1 = StgCatchRetryFrame_alt_code(frame);
1371 // Retry in the alternative code: propagate the retry
1372 StgTSO_trec(CurrentTSO) = outer;
1373 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1374 goto retry_pop_stack;
1378 // We've reached the ATOMICALLY_FRAME: attempt to wait
1379 ASSERT(frame_type == ATOMICALLY_FRAME);
1380 if (outer != NO_TREC) {
1381 // We called retry while checking invariants, so abort the current
1382 // invariant check (merging its TVar accesses into the parents read
1383 // set so we'll wait on them)
1384 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1385 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1387 StgTSO_trec(CurrentTSO) = trec;
1388 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1390 ASSERT(outer == NO_TREC);
1392 (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
1394 // Transaction was valid: stmWait put us on the TVars' queues, we now block
1395 StgHeader_info(frame) = stg_atomically_waiting_frame_info;
1397 // Fix up the stack in the unregisterised case: the return convention is different.
1398 R3 = trec; // passing to stmWaitUnblock()
1399 jump stg_block_stmwait;
1401 // Transaction was not valid: retry immediately
1402 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1403 StgTSO_trec(CurrentTSO) = trec;
1404 R1 = StgAtomicallyFrame_code(frame);
1415 /* Args: R1 = invariant closure */
1416 MAYBE_GC (R1_PTR, checkzh_fast);
1418 trec = StgTSO_trec(CurrentTSO);
1420 foreign "C" stmAddInvariantToCheck(MyCapability() "ptr",
1424 jump %ENTRY_CODE(Sp(0));
1433 /* Args: R1 = initialisation value */
1435 MAYBE_GC (R1_PTR, newTVarzh_fast);
1437 ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
1448 /* Args: R1 = TVar closure */
1450 MAYBE_GC (R1_PTR, readTVarzh_fast); // Call to stmReadTVar may allocate
1451 trec = StgTSO_trec(CurrentTSO);
1453 ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
1463 result = StgTVar_current_value(R1);
1464 if (%INFO_PTR(result) == stg_TREC_HEADER_info) {
1476 /* Args: R1 = TVar closure */
1477 /* R2 = New value */
1479 MAYBE_GC (R1_PTR & R2_PTR, writeTVarzh_fast); // Call to stmWriteTVar may allocate
1480 trec = StgTSO_trec(CurrentTSO);
1483 foreign "C" stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr", new_value "ptr") [];
1485 jump %ENTRY_CODE(Sp(0));
1489 /* -----------------------------------------------------------------------------
1492 * take & putMVar work as follows. Firstly, an important invariant:
1494 * If the MVar is full, then the blocking queue contains only
1495 * threads blocked on putMVar, and if the MVar is empty then the
1496 * blocking queue contains only threads blocked on takeMVar.
1499 * MVar empty : then add ourselves to the blocking queue
1500 * MVar full : remove the value from the MVar, and
1501 * blocking queue empty : return
1502 * blocking queue non-empty : perform the first blocked putMVar
1503 * from the queue, and wake up the
1504 * thread (MVar is now full again)
1506 * putMVar is just the dual of the above algorithm.
1508 * How do we "perform a putMVar"? Well, we have to fiddle around with
1509 * the stack of the thread waiting to do the putMVar. See
1510 * stg_block_putmvar and stg_block_takemvar in HeapStackCheck.c for
1511 * the stack layout, and the PerformPut and PerformTake macros below.
1513 * It is important that a blocked take or put is woken up with the
1514 * take/put already performed, because otherwise there would be a
1515 * small window of vulnerability where the thread could receive an
1516 * exception and never perform its take or put, and we'd end up with a
1519 * -------------------------------------------------------------------------- */
1523 /* args: R1 = MVar closure */
1525 if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
1537 ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
1539 mvar = Hp - SIZEOF_StgMVar + WDS(1);
1540 SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
1541 // MVARs start dirty: generation 0 has no mutable list
1542 StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
1543 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1544 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1549 #define PerformTake(tso, value) \
1550 W_[StgTSO_sp(tso) + WDS(1)] = value; \
1551 W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
1553 #define PerformPut(tso,lval) \
1554 StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
1555 lval = W_[StgTSO_sp(tso) - WDS(1)];
1559 W_ mvar, val, info, tso;
1561 /* args: R1 = MVar closure */
1564 #if defined(THREADED_RTS)
1565 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1567 info = GET_INFO(mvar);
1570 if (info == stg_MVAR_CLEAN_info) {
1571 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
1574 /* If the MVar is empty, put ourselves on its blocking queue,
1575 * and wait until we're woken up.
1577 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1578 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1579 StgMVar_head(mvar) = CurrentTSO;
1581 foreign "C" setTSOLink(MyCapability() "ptr",
1582 StgMVar_tail(mvar) "ptr",
1585 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1586 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1587 StgTSO_block_info(CurrentTSO) = mvar;
1588 StgMVar_tail(mvar) = CurrentTSO;
1591 jump stg_block_takemvar;
1594 /* we got the value... */
1595 val = StgMVar_value(mvar);
1597 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure)
1599 /* There are putMVar(s) waiting...
1600 * wake up the first thread on the queue
1602 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1604 /* actually perform the putMVar for the thread that we just woke up */
1605 tso = StgMVar_head(mvar);
1606 PerformPut(tso,StgMVar_value(mvar));
1608 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1609 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1612 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1613 StgMVar_head(mvar) "ptr", 1) [];
1614 StgMVar_head(mvar) = tso;
1616 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1617 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1620 #if defined(THREADED_RTS)
1621 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1623 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1629 /* No further putMVars, MVar is now empty */
1630 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1632 #if defined(THREADED_RTS)
1633 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1635 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1645 W_ mvar, val, info, tso;
1647 /* args: R1 = MVar closure */
1651 #if defined(THREADED_RTS)
1652 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1654 info = GET_INFO(mvar);
1657 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1658 #if defined(THREADED_RTS)
1659 unlockClosure(mvar, info);
1661 /* HACK: we need a pointer to pass back,
1662 * so we abuse NO_FINALIZER_closure
1664 RET_NP(0, stg_NO_FINALIZER_closure);
1667 if (info == stg_MVAR_CLEAN_info) {
1668 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1671 /* we got the value... */
1672 val = StgMVar_value(mvar);
1674 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1676 /* There are putMVar(s) waiting...
1677 * wake up the first thread on the queue
1679 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1681 /* actually perform the putMVar for the thread that we just woke up */
1682 tso = StgMVar_head(mvar);
1683 PerformPut(tso,StgMVar_value(mvar));
1684 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1685 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1688 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1689 StgMVar_head(mvar) "ptr", 1) [];
1690 StgMVar_head(mvar) = tso;
1692 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1693 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1695 #if defined(THREADED_RTS)
1696 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1698 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1703 /* No further putMVars, MVar is now empty */
1704 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1705 #if defined(THREADED_RTS)
1706 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1708 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1718 W_ mvar, val, info, tso;
1720 /* args: R1 = MVar, R2 = value */
1724 #if defined(THREADED_RTS)
1725 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1727 info = GET_INFO(mvar);
1730 if (info == stg_MVAR_CLEAN_info) {
1731 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1734 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1735 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1736 StgMVar_head(mvar) = CurrentTSO;
1738 foreign "C" setTSOLink(MyCapability() "ptr",
1739 StgMVar_tail(mvar) "ptr",
1742 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1743 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1744 StgTSO_block_info(CurrentTSO) = mvar;
1745 StgMVar_tail(mvar) = CurrentTSO;
1749 jump stg_block_putmvar;
1752 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1754 /* There are takeMVar(s) waiting: wake up the first one
1756 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1758 /* actually perform the takeMVar */
1759 tso = StgMVar_head(mvar);
1760 PerformTake(tso, val);
1761 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1762 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1765 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1766 StgMVar_head(mvar) "ptr", 1) [];
1767 StgMVar_head(mvar) = tso;
1769 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1770 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1773 #if defined(THREADED_RTS)
1774 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1776 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1778 jump %ENTRY_CODE(Sp(0));
1782 /* No further takes, the MVar is now full. */
1783 StgMVar_value(mvar) = val;
1785 #if defined(THREADED_RTS)
1786 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1788 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1790 jump %ENTRY_CODE(Sp(0));
1793 /* ToDo: yield afterward for better communication performance? */
1801 /* args: R1 = MVar, R2 = value */
1804 #if defined(THREADED_RTS)
1805 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
1807 info = GET_INFO(mvar);
1810 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1811 #if defined(THREADED_RTS)
1812 unlockClosure(mvar, info);
1817 if (info == stg_MVAR_CLEAN_info) {
1818 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1821 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1823 /* There are takeMVar(s) waiting: wake up the first one
1825 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1827 /* actually perform the takeMVar */
1828 tso = StgMVar_head(mvar);
1829 PerformTake(tso, R2);
1830 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1831 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1834 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1835 StgMVar_head(mvar) "ptr", 1) [];
1836 StgMVar_head(mvar) = tso;
1838 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1839 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1842 #if defined(THREADED_RTS)
1843 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1845 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1850 /* No further takes, the MVar is now full. */
1851 StgMVar_value(mvar) = R2;
1853 #if defined(THREADED_RTS)
1854 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1856 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1861 /* ToDo: yield afterward for better communication performance? */
1865 /* -----------------------------------------------------------------------------
1866 Stable pointer primitives
1867 ------------------------------------------------------------------------- */
1869 makeStableNamezh_fast
1873 ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, makeStableNamezh_fast );
1875 (index) = foreign "C" lookupStableName(R1 "ptr") [];
1877 /* Is there already a StableName for this heap object?
1878 * stable_ptr_table is a pointer to an array of snEntry structs.
1880 if ( snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) == NULL ) {
1881 sn_obj = Hp - SIZEOF_StgStableName + WDS(1);
1882 SET_HDR(sn_obj, stg_STABLE_NAME_info, W_[CCCS]);
1883 StgStableName_sn(sn_obj) = index;
1884 snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) = sn_obj;
1886 sn_obj = snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry);
1893 makeStablePtrzh_fast
1897 MAYBE_GC(R1_PTR, makeStablePtrzh_fast);
1898 ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
1902 deRefStablePtrzh_fast
1904 /* Args: R1 = the stable ptr */
1907 r = snEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_snEntry);
1911 /* -----------------------------------------------------------------------------
1912 Bytecode object primitives
1913 ------------------------------------------------------------------------- */
1923 W_ bco, bitmap_arr, bytes, words;
1927 words = BYTES_TO_WDS(SIZEOF_StgBCO) + StgArrWords_words(bitmap_arr);
1930 ALLOC_PRIM( bytes, R1_PTR&R2_PTR&R3_PTR&R5_PTR, newBCOzh_fast );
1932 bco = Hp - bytes + WDS(1);
1933 SET_HDR(bco, stg_BCO_info, W_[CCCS]);
1935 StgBCO_instrs(bco) = R1;
1936 StgBCO_literals(bco) = R2;
1937 StgBCO_ptrs(bco) = R3;
1938 StgBCO_arity(bco) = HALF_W_(R4);
1939 StgBCO_size(bco) = HALF_W_(words);
1941 // Copy the arity/bitmap info into the BCO
1945 if (i < StgArrWords_words(bitmap_arr)) {
1946 StgBCO_bitmap(bco,i) = StgArrWords_payload(bitmap_arr,i);
1957 // R1 = the BCO# for the AP
1961 // This function is *only* used to wrap zero-arity BCOs in an
1962 // updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
1963 // saturated and always points directly to a FUN or BCO.
1964 ASSERT(%INFO_TYPE(%GET_STD_INFO(R1)) == HALF_W_(BCO) &&
1965 StgBCO_arity(R1) == HALF_W_(0));
1967 HP_CHK_GEN_TICKY(SIZEOF_StgAP, R1_PTR, mkApUpd0zh_fast);
1968 TICK_ALLOC_UP_THK(0, 0);
1969 CCCS_ALLOC(SIZEOF_StgAP);
1971 ap = Hp - SIZEOF_StgAP + WDS(1);
1972 SET_HDR(ap, stg_AP_info, W_[CCCS]);
1974 StgAP_n_args(ap) = HALF_W_(0);
1980 unpackClosurezh_fast
1982 /* args: R1 = closure to analyze */
1983 // TODO: Consider the absence of ptrs or nonptrs as a special case ?
1985 W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
1986 info = %GET_STD_INFO(UNTAG(R1));
1988 // Some closures have non-standard layout, so we omit those here.
1990 type = TO_W_(%INFO_TYPE(info));
1991 switch [0 .. N_CLOSURE_TYPES] type {
1992 case THUNK_SELECTOR : {
1997 case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
1998 THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
2004 ptrs = TO_W_(%INFO_PTRS(info));
2005 nptrs = TO_W_(%INFO_NPTRS(info));
2010 W_ ptrs_arr_sz, nptrs_arr_sz;
2011 nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
2012 ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs);
2014 ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, unpackClosurezh_fast);
2019 ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
2020 nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
2022 SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
2023 StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
2027 W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
2032 SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
2033 StgArrWords_words(nptrs_arr) = nptrs;
2037 W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
2041 RET_NPP(info, ptrs_arr, nptrs_arr);
2044 /* -----------------------------------------------------------------------------
2045 Thread I/O blocking primitives
2046 -------------------------------------------------------------------------- */
2048 /* Add a thread to the end of the blocked queue. (C-- version of the C
2049 * macro in Schedule.h).
2051 #define APPEND_TO_BLOCKED_QUEUE(tso) \
2052 ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
2053 if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
2054 W_[blocked_queue_hd] = tso; \
2056 foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
2058 W_[blocked_queue_tl] = tso;
2064 foreign "C" barf("waitRead# on threaded RTS") never returns;
2067 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2068 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2069 StgTSO_block_info(CurrentTSO) = R1;
2070 // No locking - we're not going to use this interface in the
2071 // threaded RTS anyway.
2072 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2073 jump stg_block_noregs;
2081 foreign "C" barf("waitWrite# on threaded RTS") never returns;
2084 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2085 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2086 StgTSO_block_info(CurrentTSO) = R1;
2087 // No locking - we're not going to use this interface in the
2088 // threaded RTS anyway.
2089 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2090 jump stg_block_noregs;
2095 STRING(stg_delayzh_malloc_str, "delayzh_fast")
2098 #ifdef mingw32_HOST_OS
2106 foreign "C" barf("delay# on threaded RTS") never returns;
2109 /* args: R1 (microsecond delay amount) */
2110 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2111 StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
2113 #ifdef mingw32_HOST_OS
2115 /* could probably allocate this on the heap instead */
2116 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2117 stg_delayzh_malloc_str);
2118 (reqID) = foreign "C" addDelayRequest(R1);
2119 StgAsyncIOResult_reqID(ares) = reqID;
2120 StgAsyncIOResult_len(ares) = 0;
2121 StgAsyncIOResult_errCode(ares) = 0;
2122 StgTSO_block_info(CurrentTSO) = ares;
2124 /* Having all async-blocked threads reside on the blocked_queue
2125 * simplifies matters, so change the status to OnDoProc put the
2126 * delayed thread on the blocked_queue.
2128 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2129 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2130 jump stg_block_async_void;
2136 (time) = foreign "C" getourtimeofday() [R1];
2137 divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags));
2141 divisor = divisor * 1000;
2142 target = ((R1 + divisor - 1) / divisor) /* divide rounding up */
2143 + time + 1; /* Add 1 as getourtimeofday rounds down */
2144 StgTSO_block_info(CurrentTSO) = target;
2146 /* Insert the new thread in the sleeping queue. */
2148 t = W_[sleeping_queue];
2150 if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
2152 t = StgTSO__link(t);
2156 StgTSO__link(CurrentTSO) = t;
2158 W_[sleeping_queue] = CurrentTSO;
2160 foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
2162 jump stg_block_noregs;
2164 #endif /* !THREADED_RTS */
2168 #ifdef mingw32_HOST_OS
2169 STRING(stg_asyncReadzh_malloc_str, "asyncReadzh_fast")
2176 foreign "C" barf("asyncRead# on threaded RTS") never returns;
2179 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2180 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2181 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2183 /* could probably allocate this on the heap instead */
2184 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2185 stg_asyncReadzh_malloc_str)
2187 (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
2188 StgAsyncIOResult_reqID(ares) = reqID;
2189 StgAsyncIOResult_len(ares) = 0;
2190 StgAsyncIOResult_errCode(ares) = 0;
2191 StgTSO_block_info(CurrentTSO) = ares;
2192 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2193 jump stg_block_async;
2197 STRING(stg_asyncWritezh_malloc_str, "asyncWritezh_fast")
2204 foreign "C" barf("asyncWrite# on threaded RTS") never returns;
2207 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2208 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2209 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2211 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2212 stg_asyncWritezh_malloc_str)
2214 (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
2216 StgAsyncIOResult_reqID(ares) = reqID;
2217 StgAsyncIOResult_len(ares) = 0;
2218 StgAsyncIOResult_errCode(ares) = 0;
2219 StgTSO_block_info(CurrentTSO) = ares;
2220 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2221 jump stg_block_async;
2225 STRING(stg_asyncDoProczh_malloc_str, "asyncDoProczh_fast")
2232 foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
2235 /* args: R1 = proc, R2 = param */
2236 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2237 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2239 /* could probably allocate this on the heap instead */
2240 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2241 stg_asyncDoProczh_malloc_str)
2243 (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
2244 StgAsyncIOResult_reqID(ares) = reqID;
2245 StgAsyncIOResult_len(ares) = 0;
2246 StgAsyncIOResult_errCode(ares) = 0;
2247 StgTSO_block_info(CurrentTSO) = ares;
2248 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2249 jump stg_block_async;
2254 // noDuplicate# tries to ensure that none of the thunks under
2255 // evaluation by the current thread are also under evaluation by
2256 // another thread. It relies on *both* threads doing noDuplicate#;
2257 // the second one will get blocked if they are duplicating some work.
2260 SAVE_THREAD_STATE();
2261 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2262 foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
2264 if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
2265 jump stg_threadFinished;
2267 LOAD_THREAD_STATE();
2268 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2269 jump %ENTRY_CODE(Sp(0));
2273 getApStackValzh_fast
2275 W_ ap_stack, offset, val, ok;
2277 /* args: R1 = AP_STACK, R2 = offset */
2281 if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
2283 val = StgAP_STACK_payload(ap_stack,offset);
2295 #ifndef THREADED_RTS
2296 RET_NP(0,ghczmprim_GHCziBool_False_closure);
2298 (spark) = foreign "C" tryStealSpark(MyCapability());
2302 (spark) = foreign "C" stealWork (MyCapability());
2306 RET_NP(0,ghczmprim_GHCziBool_False_closure);