1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Out-of-line primitive operations
7 * This file contains the implementations of all the primitive
8 * operations ("primops") which are not expanded inline. See
9 * ghc/compiler/prelude/primops.txt.pp for a list of all the primops;
10 * this file contains code for most of those with the attribute
13 * Entry convention: the entry convention for a primop is that all the
14 * args are in Stg registers (R1, R2, etc.). This is to make writing
15 * the primops easier. (see compiler/codeGen/CgCallConv.hs).
17 * Return convention: results from a primop are generally returned
18 * using the ordinary unboxed tuple return convention. The C-- parser
19 * implements the RET_xxxx() macros to perform unboxed-tuple returns
20 * based on the prevailing return convention.
22 * This file is written in a subset of C--, extended with various
23 * features specific to GHC. It is compiled by GHC directly. For the
24 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
26 * ---------------------------------------------------------------------------*/
31 #ifndef mingw32_HOST_OS
41 import __gmpz_tdiv_qr;
42 import __gmpz_fdiv_qr;
43 import __gmpz_divexact;
49 import pthread_mutex_lock;
50 import pthread_mutex_unlock;
52 import base_ControlziExceptionziBase_nestedAtomically_closure;
53 import EnterCriticalSection;
54 import LeaveCriticalSection;
55 import ghczmprim_GHCziBool_False_closure;
57 /*-----------------------------------------------------------------------------
60 Basically just new*Array - the others are all inline macros.
62 The size arg is always passed in R1, and the result returned in R1.
64 The slow entry point is for returning from a heap check, the saved
65 size argument must be re-loaded from the stack.
66 -------------------------------------------------------------------------- */
68 /* for objects that are *less* than the size of a word, make sure we
69 * round up to the nearest word for the size of the array.
74 W_ words, payload_words, n, p;
75 MAYBE_GC(NO_PTRS,newByteArrayzh_fast);
77 payload_words = ROUNDUP_BYTES_TO_WDS(n);
78 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
79 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr",words) [];
80 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
81 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
82 StgArrWords_words(p) = payload_words;
86 newPinnedByteArrayzh_fast
88 W_ words, payload_words, n, p;
90 MAYBE_GC(NO_PTRS,newPinnedByteArrayzh_fast);
92 payload_words = ROUNDUP_BYTES_TO_WDS(n);
94 // We want an 8-byte aligned array. allocatePinned() gives us
95 // 8-byte aligned memory by default, but we want to align the
96 // *goods* inside the ArrWords object, so we have to check the
97 // size of the ArrWords header and adjust our size accordingly.
98 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
99 if ((SIZEOF_StgArrWords & 7) != 0) {
103 ("ptr" p) = foreign "C" allocatePinned(words) [];
104 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
106 // Again, if the ArrWords header isn't a multiple of 8 bytes, we
107 // have to push the object forward one word so that the goods
108 // fall on an 8-byte boundary.
109 if ((SIZEOF_StgArrWords & 7) != 0) {
113 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
114 StgArrWords_words(p) = payload_words;
120 W_ words, n, init, arr, p;
121 /* Args: R1 = words, R2 = initialisation value */
124 MAYBE_GC(R2_PTR,newArrayzh_fast);
126 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + n;
127 ("ptr" arr) = foreign "C" allocateLocal(MyCapability() "ptr",words) [R2];
128 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
130 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
131 StgMutArrPtrs_ptrs(arr) = n;
133 // Initialise all elements of the the array with the value in R2
135 p = arr + SIZEOF_StgMutArrPtrs;
137 if (p < arr + WDS(words)) {
146 unsafeThawArrayzh_fast
148 // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
150 // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
151 // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
152 // it on the mutable list for the GC to remove (removing something from
153 // the mutable list is not easy, because the mut_list is only singly-linked).
155 // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
156 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
157 // to indicate that it is still on the mutable list.
159 // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
160 // either it is on a mut_list, or it isn't. We adopt the convention that
161 // the closure type is MUT_ARR_PTRS_FROZEN0 if it is on the mutable list,
162 // and MUT_ARR_PTRS_FROZEN otherwise. In fact it wouldn't matter if
163 // we put it on the mutable list more than once, but it would get scavenged
164 // multiple times during GC, which would be unnecessarily slow.
166 if (StgHeader_info(R1) != stg_MUT_ARR_PTRS_FROZEN0_info) {
167 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
168 recordMutable(R1, R1);
169 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
172 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
177 /* -----------------------------------------------------------------------------
179 -------------------------------------------------------------------------- */
184 /* Args: R1 = initialisation value */
186 ALLOC_PRIM( SIZEOF_StgMutVar, R1_PTR, newMutVarzh_fast);
188 mv = Hp - SIZEOF_StgMutVar + WDS(1);
189 SET_HDR(mv,stg_MUT_VAR_DIRTY_info,W_[CCCS]);
190 StgMutVar_var(mv) = R1;
195 atomicModifyMutVarzh_fast
197 W_ mv, f, z, x, y, r, h;
198 /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */
200 /* If x is the current contents of the MutVar#, then
201 We want to make the new contents point to
205 and the return value is
209 obviously we can share (f x).
211 z = [stg_ap_2 f x] (max (HS + 2) MIN_UPD_SIZE)
212 y = [stg_sel_0 z] (max (HS + 1) MIN_UPD_SIZE)
213 r = [stg_sel_1 z] (max (HS + 1) MIN_UPD_SIZE)
217 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
218 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),WDS(MIN_UPD_SIZE-1))
220 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(1))
221 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),0)
225 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
226 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),WDS(MIN_UPD_SIZE-2))
228 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(2))
229 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),0)
232 #define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
234 HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, atomicModifyMutVarzh_fast);
239 TICK_ALLOC_THUNK_2();
240 CCCS_ALLOC(THUNK_2_SIZE);
241 z = Hp - THUNK_2_SIZE + WDS(1);
242 SET_HDR(z, stg_ap_2_upd_info, W_[CCCS]);
243 LDV_RECORD_CREATE(z);
244 StgThunk_payload(z,0) = f;
246 TICK_ALLOC_THUNK_1();
247 CCCS_ALLOC(THUNK_1_SIZE);
248 y = z - THUNK_1_SIZE;
249 SET_HDR(y, stg_sel_0_upd_info, W_[CCCS]);
250 LDV_RECORD_CREATE(y);
251 StgThunk_payload(y,0) = z;
253 TICK_ALLOC_THUNK_1();
254 CCCS_ALLOC(THUNK_1_SIZE);
255 r = y - THUNK_1_SIZE;
256 SET_HDR(r, stg_sel_1_upd_info, W_[CCCS]);
257 LDV_RECORD_CREATE(r);
258 StgThunk_payload(r,0) = z;
261 x = StgMutVar_var(mv);
262 StgThunk_payload(z,1) = x;
264 (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y) [];
265 if (h != x) { goto retry; }
267 StgMutVar_var(mv) = y;
270 if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
271 foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") [];
277 /* -----------------------------------------------------------------------------
278 Weak Pointer Primitives
279 -------------------------------------------------------------------------- */
281 STRING(stg_weak_msg,"New weak pointer at %p\n")
287 R3 = finalizer (or NULL)
292 R3 = stg_NO_FINALIZER_closure;
295 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, mkWeakzh_fast );
297 w = Hp - SIZEOF_StgWeak + WDS(1);
298 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
300 // We don't care about cfinalizer here.
301 // Should StgWeak_cfinalizer(w) be stg_NO_FINALIZER_closure or
305 StgWeak_value(w) = R2;
306 StgWeak_finalizer(w) = R3;
307 StgWeak_cfinalizer(w) = stg_NO_FINALIZER_closure;
309 StgWeak_link(w) = W_[weak_ptr_list];
310 W_[weak_ptr_list] = w;
312 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
317 mkWeakForeignEnvzh_fast
323 R5 = has environment (0 or 1)
326 W_ w, payload_words, words, p;
328 W_ key, val, fptr, ptr, flag, eptr;
337 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, mkWeakForeignEnvzh_fast );
339 w = Hp - SIZEOF_StgWeak + WDS(1);
340 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
343 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
344 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr", words) [];
346 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
347 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
349 StgArrWords_words(p) = payload_words;
350 StgArrWords_payload(p,0) = fptr;
351 StgArrWords_payload(p,1) = ptr;
352 StgArrWords_payload(p,2) = eptr;
353 StgArrWords_payload(p,3) = flag;
355 // We don't care about the value here.
356 // Should StgWeak_value(w) be stg_NO_FINALIZER_closure or something else?
358 StgWeak_key(w) = key;
359 StgWeak_value(w) = val;
360 StgWeak_finalizer(w) = stg_NO_FINALIZER_closure;
361 StgWeak_cfinalizer(w) = p;
363 StgWeak_link(w) = W_[weak_ptr_list];
364 W_[weak_ptr_list] = w;
366 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
380 if (GET_INFO(w) == stg_DEAD_WEAK_info) {
381 RET_NP(0,stg_NO_FINALIZER_closure);
387 // A weak pointer is inherently used, so we do not need to call
388 // LDV_recordDead_FILL_SLOP_DYNAMIC():
389 // LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)w);
390 // or, LDV_recordDead():
391 // LDV_recordDead((StgClosure *)w, sizeofW(StgWeak) - sizeofW(StgProfHeader));
392 // Furthermore, when PROFILING is turned on, dead weak pointers are exactly as
393 // large as weak pointers, so there is no need to fill the slop, either.
394 // See stg_DEAD_WEAK_info in StgMiscClosures.hc.
398 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
400 SET_INFO(w,stg_DEAD_WEAK_info);
401 LDV_RECORD_CREATE(w);
403 f = StgWeak_finalizer(w);
404 arr = StgWeak_cfinalizer(w);
406 StgDeadWeak_link(w) = StgWeak_link(w);
408 if (arr != stg_NO_FINALIZER_closure) {
409 foreign "C" runCFinalizer(StgArrWords_payload(arr,0),
410 StgArrWords_payload(arr,1),
411 StgArrWords_payload(arr,2),
412 StgArrWords_payload(arr,3)) [];
415 /* return the finalizer */
416 if (f == stg_NO_FINALIZER_closure) {
417 RET_NP(0,stg_NO_FINALIZER_closure);
429 if (GET_INFO(w) == stg_WEAK_info) {
431 val = StgWeak_value(w);
439 /* -----------------------------------------------------------------------------
440 Arbitrary-precision Integer operations.
442 There are some assumptions in this code that mp_limb_t == W_. This is
443 the case for all the platforms that GHC supports, currently.
444 -------------------------------------------------------------------------- */
448 /* arguments: R1 = Int# */
450 W_ val, s, p; /* to avoid aliasing */
453 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, int2Integerzh_fast );
455 p = Hp - SIZEOF_StgArrWords;
456 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
457 StgArrWords_words(p) = 1;
459 /* mpz_set_si is inlined here, makes things simpler */
472 /* returns (# size :: Int#,
481 /* arguments: R1 = Word# */
483 W_ val, s, p; /* to avoid aliasing */
487 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, word2Integerzh_fast);
489 p = Hp - SIZEOF_StgArrWords;
490 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
491 StgArrWords_words(p) = 1;
500 /* returns (# size :: Int#,
501 data :: ByteArray# #)
508 * 'long long' primops for converting to/from Integers.
511 #ifdef SUPPORT_LONG_LONGS
513 int64ToIntegerzh_fast
515 /* arguments: L1 = Int64# */
518 W_ hi, lo, s, neg, words_needed, p;
523 hi = TO_W_(val >> 32);
526 if ( hi == 0 || (hi == 0xFFFFFFFF && lo != 0) ) {
527 // minimum is one word
533 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
534 NO_PTRS, int64ToIntegerzh_fast );
536 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
537 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
538 StgArrWords_words(p) = words_needed;
550 if ( words_needed == 2 ) {
558 } else /* val==0 */ {
566 /* returns (# size :: Int#,
567 data :: ByteArray# #)
571 word64ToIntegerzh_fast
573 /* arguments: L1 = Word64# */
576 W_ hi, lo, s, words_needed, p;
579 hi = TO_W_(val >> 32);
588 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
589 NO_PTRS, word64ToIntegerzh_fast );
591 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
592 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
593 StgArrWords_words(p) = words_needed;
603 } else /* val==0 */ {
608 /* returns (# size :: Int#,
609 data :: ByteArray# #)
616 #endif /* SUPPORT_LONG_LONGS */
618 /* ToDo: this is shockingly inefficient */
623 bits8 [SIZEOF_MP_INT];
628 bits8 [SIZEOF_MP_INT];
633 bits8 [SIZEOF_MP_INT];
638 bits8 [SIZEOF_MP_INT];
643 #define FETCH_MP_TEMP(X) \
645 X = BaseReg + (OFFSET_StgRegTable_r ## X);
647 #define FETCH_MP_TEMP(X) /* Nothing */
650 #define GMP_TAKE2_RET1(name,mp_fun) \
655 FETCH_MP_TEMP(mp_tmp1); \
656 FETCH_MP_TEMP(mp_tmp2); \
657 FETCH_MP_TEMP(mp_result1) \
658 FETCH_MP_TEMP(mp_result2); \
660 /* call doYouWantToGC() */ \
661 MAYBE_GC(R2_PTR & R4_PTR, name); \
668 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
669 MP_INT__mp_size(mp_tmp1) = (s1); \
670 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
671 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
672 MP_INT__mp_size(mp_tmp2) = (s2); \
673 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
675 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
677 /* Perform the operation */ \
678 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
680 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
681 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
684 #define GMP_TAKE1_RET1(name,mp_fun) \
689 FETCH_MP_TEMP(mp_tmp1); \
690 FETCH_MP_TEMP(mp_result1) \
692 /* call doYouWantToGC() */ \
693 MAYBE_GC(R2_PTR, name); \
698 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
699 MP_INT__mp_size(mp_tmp1) = (s1); \
700 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
702 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
704 /* Perform the operation */ \
705 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr") []; \
707 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
708 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
711 #define GMP_TAKE2_RET2(name,mp_fun) \
716 FETCH_MP_TEMP(mp_tmp1); \
717 FETCH_MP_TEMP(mp_tmp2); \
718 FETCH_MP_TEMP(mp_result1) \
719 FETCH_MP_TEMP(mp_result2) \
721 /* call doYouWantToGC() */ \
722 MAYBE_GC(R2_PTR & R4_PTR, name); \
729 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
730 MP_INT__mp_size(mp_tmp1) = (s1); \
731 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
732 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
733 MP_INT__mp_size(mp_tmp2) = (s2); \
734 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
736 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
737 foreign "C" __gmpz_init(mp_result2 "ptr") []; \
739 /* Perform the operation */ \
740 foreign "C" mp_fun(mp_result1 "ptr",mp_result2 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
742 RET_NPNP(TO_W_(MP_INT__mp_size(mp_result1)), \
743 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords, \
744 TO_W_(MP_INT__mp_size(mp_result2)), \
745 MP_INT__mp_d(mp_result2) - SIZEOF_StgArrWords); \
748 GMP_TAKE2_RET1(plusIntegerzh_fast, __gmpz_add)
749 GMP_TAKE2_RET1(minusIntegerzh_fast, __gmpz_sub)
750 GMP_TAKE2_RET1(timesIntegerzh_fast, __gmpz_mul)
751 GMP_TAKE2_RET1(gcdIntegerzh_fast, __gmpz_gcd)
752 GMP_TAKE2_RET1(quotIntegerzh_fast, __gmpz_tdiv_q)
753 GMP_TAKE2_RET1(remIntegerzh_fast, __gmpz_tdiv_r)
754 GMP_TAKE2_RET1(divExactIntegerzh_fast, __gmpz_divexact)
755 GMP_TAKE2_RET1(andIntegerzh_fast, __gmpz_and)
756 GMP_TAKE2_RET1(orIntegerzh_fast, __gmpz_ior)
757 GMP_TAKE2_RET1(xorIntegerzh_fast, __gmpz_xor)
758 GMP_TAKE1_RET1(complementIntegerzh_fast, __gmpz_com)
760 GMP_TAKE2_RET2(quotRemIntegerzh_fast, __gmpz_tdiv_qr)
761 GMP_TAKE2_RET2(divModIntegerzh_fast, __gmpz_fdiv_qr)
765 mp_tmp_w: W_; // NB. mp_tmp_w is really an here mp_limb_t
771 /* R1 = the first Int#; R2 = the second Int# */
773 FETCH_MP_TEMP(mp_tmp_w);
776 (r) = foreign "C" __gmpn_gcd_1(mp_tmp_w "ptr", 1, R2) [];
779 /* Result parked in R1, return via info-pointer at TOS */
780 jump %ENTRY_CODE(Sp(0));
786 /* R1 = s1; R2 = d1; R3 = the int */
788 (s1) = foreign "C" __gmpn_gcd_1( BYTE_ARR_CTS(R2) "ptr", R1, R3) [];
791 /* Result parked in R1, return via info-pointer at TOS */
792 jump %ENTRY_CODE(Sp(0));
798 /* R1 = s1; R2 = d1; R3 = the int */
799 W_ usize, vsize, v_digit, u_digit;
805 // paraphrased from __gmpz_cmp_si() in the GMP sources
806 if (%gt(v_digit,0)) {
809 if (%lt(v_digit,0)) {
815 if (usize != vsize) {
817 jump %ENTRY_CODE(Sp(0));
822 jump %ENTRY_CODE(Sp(0));
825 u_digit = W_[BYTE_ARR_CTS(R2)];
827 if (u_digit == v_digit) {
829 jump %ENTRY_CODE(Sp(0));
832 if (%gtu(u_digit,v_digit)) { // NB. unsigned: these are mp_limb_t's
838 jump %ENTRY_CODE(Sp(0));
843 /* R1 = s1; R2 = d1; R3 = s2; R4 = d2 */
844 W_ usize, vsize, size, up, vp;
847 // paraphrased from __gmpz_cmp() in the GMP sources
851 if (usize != vsize) {
853 jump %ENTRY_CODE(Sp(0));
858 jump %ENTRY_CODE(Sp(0));
861 if (%lt(usize,0)) { // NB. not <, which is unsigned
867 up = BYTE_ARR_CTS(R2);
868 vp = BYTE_ARR_CTS(R4);
870 (cmp) = foreign "C" __gmpn_cmp(up "ptr", vp "ptr", size) [];
872 if (cmp == 0 :: CInt) {
874 jump %ENTRY_CODE(Sp(0));
877 if (%lt(cmp,0 :: CInt) == %lt(usize,0)) {
882 /* Result parked in R1, return via info-pointer at TOS */
883 jump %ENTRY_CODE(Sp(0));
895 r = W_[R2 + SIZEOF_StgArrWords];
900 /* Result parked in R1, return via info-pointer at TOS */
902 jump %ENTRY_CODE(Sp(0));
914 r = W_[R2 + SIZEOF_StgArrWords];
919 /* Result parked in R1, return via info-pointer at TOS */
921 jump %ENTRY_CODE(Sp(0));
928 FETCH_MP_TEMP(mp_tmp1);
929 FETCH_MP_TEMP(mp_tmp_w);
931 /* arguments: F1 = Float# */
934 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, decodeFloatzh_fast );
936 /* Be prepared to tell Lennart-coded __decodeFloat
937 where mantissa._mp_d can be put (it does not care about the rest) */
938 p = Hp - SIZEOF_StgArrWords;
939 SET_HDR(p,stg_ARR_WORDS_info,W_[CCCS]);
940 StgArrWords_words(p) = 1;
941 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
943 /* Perform the operation */
944 foreign "C" __decodeFloat(mp_tmp1 "ptr",mp_tmp_w "ptr" ,arg) [];
946 /* returns: (Int# (expn), Int#, ByteArray#) */
947 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
950 decodeFloatzuIntzh_fast
954 FETCH_MP_TEMP(mp_tmp1);
955 FETCH_MP_TEMP(mp_tmp_w);
957 /* arguments: F1 = Float# */
960 /* Perform the operation */
961 foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
963 /* returns: (Int# (mantissa), Int# (exponent)) */
964 RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
967 #define DOUBLE_MANTISSA_SIZE SIZEOF_DOUBLE
968 #define ARR_SIZE (SIZEOF_StgArrWords + DOUBLE_MANTISSA_SIZE)
974 FETCH_MP_TEMP(mp_tmp1);
975 FETCH_MP_TEMP(mp_tmp_w);
977 /* arguments: D1 = Double# */
980 ALLOC_PRIM( ARR_SIZE, NO_PTRS, decodeDoublezh_fast );
982 /* Be prepared to tell Lennart-coded __decodeDouble
983 where mantissa.d can be put (it does not care about the rest) */
984 p = Hp - ARR_SIZE + WDS(1);
985 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
986 StgArrWords_words(p) = BYTES_TO_WDS(DOUBLE_MANTISSA_SIZE);
987 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
989 /* Perform the operation */
990 foreign "C" __decodeDouble(mp_tmp1 "ptr", mp_tmp_w "ptr",arg) [];
992 /* returns: (Int# (expn), Int#, ByteArray#) */
993 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
996 decodeDoublezu2Intzh_fast
1000 FETCH_MP_TEMP(mp_tmp1);
1001 FETCH_MP_TEMP(mp_tmp2);
1002 FETCH_MP_TEMP(mp_result1);
1003 FETCH_MP_TEMP(mp_result2);
1005 /* arguments: D1 = Double# */
1008 /* Perform the operation */
1009 foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
1010 mp_result1 "ptr", mp_result2 "ptr",
1014 (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
1015 RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
1018 /* -----------------------------------------------------------------------------
1019 * Concurrency primitives
1020 * -------------------------------------------------------------------------- */
1024 /* args: R1 = closure to spark */
1026 MAYBE_GC(R1_PTR, forkzh_fast);
1032 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
1033 RtsFlags_GcFlags_initialStkSize(RtsFlags),
1036 /* start blocked if the current thread is blocked */
1037 StgTSO_flags(threadid) =
1038 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
1039 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
1041 foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
1043 // switch at the earliest opportunity
1044 Capability_context_switch(MyCapability()) = 1 :: CInt;
1051 /* args: R1 = cpu, R2 = closure to spark */
1053 MAYBE_GC(R2_PTR, forkOnzh_fast);
1061 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
1062 RtsFlags_GcFlags_initialStkSize(RtsFlags),
1065 /* start blocked if the current thread is blocked */
1066 StgTSO_flags(threadid) =
1067 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
1068 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
1070 foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
1072 // switch at the earliest opportunity
1073 Capability_context_switch(MyCapability()) = 1 :: CInt;
1080 jump stg_yield_noregs;
1095 foreign "C" labelThread(R1 "ptr", R2 "ptr") [];
1097 jump %ENTRY_CODE(Sp(0));
1100 isCurrentThreadBoundzh_fast
1104 (r) = foreign "C" isThreadBound(CurrentTSO) [];
1110 /* args: R1 :: ThreadId# */
1118 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
1119 tso = StgTSO__link(tso);
1123 what_next = TO_W_(StgTSO_what_next(tso));
1124 why_blocked = TO_W_(StgTSO_why_blocked(tso));
1125 // Note: these two reads are not atomic, so they might end up
1126 // being inconsistent. It doesn't matter, since we
1127 // only return one or the other. If we wanted to return the
1128 // contents of block_info too, then we'd have to do some synchronisation.
1130 if (what_next == ThreadComplete) {
1131 ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
1133 if (what_next == ThreadKilled) {
1142 /* -----------------------------------------------------------------------------
1144 * -------------------------------------------------------------------------- */
1148 // Catch retry frame ------------------------------------------------------------
1150 INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
1151 #if defined(PROFILING)
1152 W_ unused1, W_ unused2,
1154 W_ unused3, P_ unused4, P_ unused5)
1156 W_ r, frame, trec, outer;
1159 trec = StgTSO_trec(CurrentTSO);
1160 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1161 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1163 /* Succeeded (either first branch or second branch) */
1164 StgTSO_trec(CurrentTSO) = outer;
1165 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1166 jump %ENTRY_CODE(Sp(SP_OFF));
1168 /* Did not commit: re-execute */
1170 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1171 StgTSO_trec(CurrentTSO) = new_trec;
1172 if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1173 R1 = StgCatchRetryFrame_alt_code(frame);
1175 R1 = StgCatchRetryFrame_first_code(frame);
1182 // Atomically frame ------------------------------------------------------------
1184 INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
1185 #if defined(PROFILING)
1186 W_ unused1, W_ unused2,
1188 P_ unused3, P_ unused4)
1190 W_ frame, trec, valid, next_invariant, q, outer;
1193 trec = StgTSO_trec(CurrentTSO);
1194 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1196 if (outer == NO_TREC) {
1197 /* First time back at the atomically frame -- pick up invariants */
1198 ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
1199 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1202 /* Second/subsequent time back at the atomically frame -- abort the
1203 * tx that's checking the invariant and move on to the next one */
1204 StgTSO_trec(CurrentTSO) = outer;
1205 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1206 StgInvariantCheckQueue_my_execution(q) = trec;
1207 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1208 /* Don't free trec -- it's linked from q and will be stashed in the
1209 * invariant if we eventually commit. */
1210 q = StgInvariantCheckQueue_next_queue_entry(q);
1211 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1215 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1217 if (q != END_INVARIANT_CHECK_QUEUE) {
1218 /* We can't commit yet: another invariant to check */
1219 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
1220 StgTSO_trec(CurrentTSO) = trec;
1222 next_invariant = StgInvariantCheckQueue_invariant(q);
1223 R1 = StgAtomicInvariant_code(next_invariant);
1228 /* We've got no more invariants to check, try to commit */
1229 (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
1231 /* Transaction was valid: commit succeeded */
1232 StgTSO_trec(CurrentTSO) = NO_TREC;
1233 Sp = Sp + SIZEOF_StgAtomicallyFrame;
1234 jump %ENTRY_CODE(Sp(SP_OFF));
1236 /* Transaction was not valid: try again */
1237 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1238 StgTSO_trec(CurrentTSO) = trec;
1239 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1240 R1 = StgAtomicallyFrame_code(frame);
1246 INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
1247 #if defined(PROFILING)
1248 W_ unused1, W_ unused2,
1250 P_ unused3, P_ unused4)
1252 W_ frame, trec, valid;
1256 /* The TSO is currently waiting: should we stop waiting? */
1257 (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
1259 /* Previous attempt is still valid: no point trying again yet */
1260 jump stg_block_noregs;
1262 /* Previous attempt is no longer valid: try again */
1263 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1264 StgTSO_trec(CurrentTSO) = trec;
1265 StgHeader_info(frame) = stg_atomically_frame_info;
1266 R1 = StgAtomicallyFrame_code(frame);
1271 // STM catch frame --------------------------------------------------------------
1275 /* Catch frames are very similar to update frames, but when entering
1276 * one we just pop the frame off the stack and perform the correct
1277 * kind of return to the activation record underneath us on the stack.
1280 INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
1281 #if defined(PROFILING)
1282 W_ unused1, W_ unused2,
1284 P_ unused3, P_ unused4)
1286 W_ r, frame, trec, outer;
1288 trec = StgTSO_trec(CurrentTSO);
1289 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1290 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1292 /* Commit succeeded */
1293 StgTSO_trec(CurrentTSO) = outer;
1294 Sp = Sp + SIZEOF_StgCatchSTMFrame;
1299 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1300 StgTSO_trec(CurrentTSO) = new_trec;
1301 R1 = StgCatchSTMFrame_code(frame);
1307 // Primop definition ------------------------------------------------------------
1315 // stmStartTransaction may allocate
1316 MAYBE_GC (R1_PTR, atomicallyzh_fast);
1318 /* Args: R1 = m :: STM a */
1319 STK_CHK_GEN(SIZEOF_StgAtomicallyFrame + WDS(1), R1_PTR, atomicallyzh_fast);
1321 old_trec = StgTSO_trec(CurrentTSO);
1323 /* Nested transactions are not allowed; raise an exception */
1324 if (old_trec != NO_TREC) {
1325 R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
1329 /* Set up the atomically frame */
1330 Sp = Sp - SIZEOF_StgAtomicallyFrame;
1333 SET_HDR(frame,stg_atomically_frame_info, W_[CCCS]);
1334 StgAtomicallyFrame_code(frame) = R1;
1335 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1337 /* Start the memory transcation */
1338 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
1339 StgTSO_trec(CurrentTSO) = new_trec;
1341 /* Apply R1 to the realworld token */
1350 /* Args: R1 :: STM a */
1351 /* Args: R2 :: Exception -> STM a */
1352 STK_CHK_GEN(SIZEOF_StgCatchSTMFrame + WDS(1), R1_PTR & R2_PTR, catchSTMzh_fast);
1354 /* Set up the catch frame */
1355 Sp = Sp - SIZEOF_StgCatchSTMFrame;
1358 SET_HDR(frame, stg_catch_stm_frame_info, W_[CCCS]);
1359 StgCatchSTMFrame_handler(frame) = R2;
1360 StgCatchSTMFrame_code(frame) = R1;
1362 /* Start a nested transaction to run the body of the try block in */
1365 cur_trec = StgTSO_trec(CurrentTSO);
1366 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
1367 StgTSO_trec(CurrentTSO) = new_trec;
1369 /* Apply R1 to the realworld token */
1380 // stmStartTransaction may allocate
1381 MAYBE_GC (R1_PTR & R2_PTR, catchRetryzh_fast);
1383 /* Args: R1 :: STM a */
1384 /* Args: R2 :: STM a */
1385 STK_CHK_GEN(SIZEOF_StgCatchRetryFrame + WDS(1), R1_PTR & R2_PTR, catchRetryzh_fast);
1387 /* Start a nested transaction within which to run the first code */
1388 trec = StgTSO_trec(CurrentTSO);
1389 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
1390 StgTSO_trec(CurrentTSO) = new_trec;
1392 /* Set up the catch-retry frame */
1393 Sp = Sp - SIZEOF_StgCatchRetryFrame;
1396 SET_HDR(frame, stg_catch_retry_frame_info, W_[CCCS]);
1397 StgCatchRetryFrame_running_alt_code(frame) = 0 :: CInt; // false;
1398 StgCatchRetryFrame_first_code(frame) = R1;
1399 StgCatchRetryFrame_alt_code(frame) = R2;
1401 /* Apply R1 to the realworld token */
1414 MAYBE_GC (NO_PTRS, retryzh_fast); // STM operations may allocate
1416 // Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
1418 StgTSO_sp(CurrentTSO) = Sp;
1419 (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
1420 Sp = StgTSO_sp(CurrentTSO);
1422 trec = StgTSO_trec(CurrentTSO);
1423 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1425 if (frame_type == CATCH_RETRY_FRAME) {
1426 // The retry reaches a CATCH_RETRY_FRAME before the atomic frame
1427 ASSERT(outer != NO_TREC);
1428 // Abort the transaction attempting the current branch
1429 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1430 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1431 if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1432 // Retry in the first branch: try the alternative
1433 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1434 StgTSO_trec(CurrentTSO) = trec;
1435 StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
1436 R1 = StgCatchRetryFrame_alt_code(frame);
1439 // Retry in the alternative code: propagate the retry
1440 StgTSO_trec(CurrentTSO) = outer;
1441 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1442 goto retry_pop_stack;
1446 // We've reached the ATOMICALLY_FRAME: attempt to wait
1447 ASSERT(frame_type == ATOMICALLY_FRAME);
1448 if (outer != NO_TREC) {
1449 // We called retry while checking invariants, so abort the current
1450 // invariant check (merging its TVar accesses into the parents read
1451 // set so we'll wait on them)
1452 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1453 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1455 StgTSO_trec(CurrentTSO) = trec;
1456 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1458 ASSERT(outer == NO_TREC);
1460 (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
1462 // Transaction was valid: stmWait put us on the TVars' queues, we now block
1463 StgHeader_info(frame) = stg_atomically_waiting_frame_info;
1465 // Fix up the stack in the unregisterised case: the return convention is different.
1466 R3 = trec; // passing to stmWaitUnblock()
1467 jump stg_block_stmwait;
1469 // Transaction was not valid: retry immediately
1470 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1471 StgTSO_trec(CurrentTSO) = trec;
1472 R1 = StgAtomicallyFrame_code(frame);
1483 /* Args: R1 = invariant closure */
1484 MAYBE_GC (R1_PTR, checkzh_fast);
1486 trec = StgTSO_trec(CurrentTSO);
1488 foreign "C" stmAddInvariantToCheck(MyCapability() "ptr",
1492 jump %ENTRY_CODE(Sp(0));
1501 /* Args: R1 = initialisation value */
1503 MAYBE_GC (R1_PTR, newTVarzh_fast);
1505 ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
1516 /* Args: R1 = TVar closure */
1518 MAYBE_GC (R1_PTR, readTVarzh_fast); // Call to stmReadTVar may allocate
1519 trec = StgTSO_trec(CurrentTSO);
1521 ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
1531 result = StgTVar_current_value(R1);
1532 if (%INFO_PTR(result) == stg_TREC_HEADER_info) {
1544 /* Args: R1 = TVar closure */
1545 /* R2 = New value */
1547 MAYBE_GC (R1_PTR & R2_PTR, writeTVarzh_fast); // Call to stmWriteTVar may allocate
1548 trec = StgTSO_trec(CurrentTSO);
1551 foreign "C" stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr", new_value "ptr") [];
1553 jump %ENTRY_CODE(Sp(0));
1557 /* -----------------------------------------------------------------------------
1560 * take & putMVar work as follows. Firstly, an important invariant:
1562 * If the MVar is full, then the blocking queue contains only
1563 * threads blocked on putMVar, and if the MVar is empty then the
1564 * blocking queue contains only threads blocked on takeMVar.
1567 * MVar empty : then add ourselves to the blocking queue
1568 * MVar full : remove the value from the MVar, and
1569 * blocking queue empty : return
1570 * blocking queue non-empty : perform the first blocked putMVar
1571 * from the queue, and wake up the
1572 * thread (MVar is now full again)
1574 * putMVar is just the dual of the above algorithm.
1576 * How do we "perform a putMVar"? Well, we have to fiddle around with
1577 * the stack of the thread waiting to do the putMVar. See
1578 * stg_block_putmvar and stg_block_takemvar in HeapStackCheck.c for
1579 * the stack layout, and the PerformPut and PerformTake macros below.
1581 * It is important that a blocked take or put is woken up with the
1582 * take/put already performed, because otherwise there would be a
1583 * small window of vulnerability where the thread could receive an
1584 * exception and never perform its take or put, and we'd end up with a
1587 * -------------------------------------------------------------------------- */
1591 /* args: R1 = MVar closure */
1593 if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
1605 ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
1607 mvar = Hp - SIZEOF_StgMVar + WDS(1);
1608 SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
1609 // MVARs start dirty: generation 0 has no mutable list
1610 StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
1611 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1612 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1617 #define PerformTake(tso, value) \
1618 W_[StgTSO_sp(tso) + WDS(1)] = value; \
1619 W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
1621 #define PerformPut(tso,lval) \
1622 StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
1623 lval = W_[StgTSO_sp(tso) - WDS(1)];
1627 W_ mvar, val, info, tso;
1629 /* args: R1 = MVar closure */
1632 #if defined(THREADED_RTS)
1633 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1635 info = GET_INFO(mvar);
1638 if (info == stg_MVAR_CLEAN_info) {
1639 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
1642 /* If the MVar is empty, put ourselves on its blocking queue,
1643 * and wait until we're woken up.
1645 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1646 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1647 StgMVar_head(mvar) = CurrentTSO;
1649 foreign "C" setTSOLink(MyCapability() "ptr",
1650 StgMVar_tail(mvar) "ptr",
1653 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1654 StgTSO_block_info(CurrentTSO) = mvar;
1655 // write barrier for throwTo(), which looks at block_info
1656 // if why_blocked==BlockedOnMVar.
1657 prim %write_barrier() [];
1658 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1659 StgMVar_tail(mvar) = CurrentTSO;
1662 jump stg_block_takemvar;
1665 /* we got the value... */
1666 val = StgMVar_value(mvar);
1668 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure)
1670 /* There are putMVar(s) waiting...
1671 * wake up the first thread on the queue
1673 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1675 /* actually perform the putMVar for the thread that we just woke up */
1676 tso = StgMVar_head(mvar);
1677 PerformPut(tso,StgMVar_value(mvar));
1679 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1680 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1683 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1684 StgMVar_head(mvar) "ptr", 1) [];
1685 StgMVar_head(mvar) = tso;
1687 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1688 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1691 #if defined(THREADED_RTS)
1692 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1694 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1700 /* No further putMVars, MVar is now empty */
1701 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1703 #if defined(THREADED_RTS)
1704 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1706 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1716 W_ mvar, val, info, tso;
1718 /* args: R1 = MVar closure */
1722 #if defined(THREADED_RTS)
1723 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1725 info = GET_INFO(mvar);
1728 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1729 #if defined(THREADED_RTS)
1730 unlockClosure(mvar, info);
1732 /* HACK: we need a pointer to pass back,
1733 * so we abuse NO_FINALIZER_closure
1735 RET_NP(0, stg_NO_FINALIZER_closure);
1738 if (info == stg_MVAR_CLEAN_info) {
1739 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1742 /* we got the value... */
1743 val = StgMVar_value(mvar);
1745 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1747 /* There are putMVar(s) waiting...
1748 * wake up the first thread on the queue
1750 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1752 /* actually perform the putMVar for the thread that we just woke up */
1753 tso = StgMVar_head(mvar);
1754 PerformPut(tso,StgMVar_value(mvar));
1755 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1756 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1759 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1760 StgMVar_head(mvar) "ptr", 1) [];
1761 StgMVar_head(mvar) = tso;
1763 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1764 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1766 #if defined(THREADED_RTS)
1767 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1769 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1774 /* No further putMVars, MVar is now empty */
1775 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1776 #if defined(THREADED_RTS)
1777 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1779 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1789 W_ mvar, val, info, tso;
1791 /* args: R1 = MVar, R2 = value */
1795 #if defined(THREADED_RTS)
1796 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1798 info = GET_INFO(mvar);
1801 if (info == stg_MVAR_CLEAN_info) {
1802 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1805 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1806 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1807 StgMVar_head(mvar) = CurrentTSO;
1809 foreign "C" setTSOLink(MyCapability() "ptr",
1810 StgMVar_tail(mvar) "ptr",
1813 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1814 StgTSO_block_info(CurrentTSO) = mvar;
1815 // write barrier for throwTo(), which looks at block_info
1816 // if why_blocked==BlockedOnMVar.
1817 prim %write_barrier() [];
1818 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1819 StgMVar_tail(mvar) = CurrentTSO;
1823 jump stg_block_putmvar;
1826 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1828 /* There are takeMVar(s) waiting: wake up the first one
1830 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1832 /* actually perform the takeMVar */
1833 tso = StgMVar_head(mvar);
1834 PerformTake(tso, val);
1835 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1836 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1839 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1840 StgMVar_head(mvar) "ptr", 1) [];
1841 StgMVar_head(mvar) = tso;
1843 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1844 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1847 #if defined(THREADED_RTS)
1848 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1850 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1852 jump %ENTRY_CODE(Sp(0));
1856 /* No further takes, the MVar is now full. */
1857 StgMVar_value(mvar) = val;
1859 #if defined(THREADED_RTS)
1860 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1862 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1864 jump %ENTRY_CODE(Sp(0));
1867 /* ToDo: yield afterward for better communication performance? */
1875 /* args: R1 = MVar, R2 = value */
1878 #if defined(THREADED_RTS)
1879 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
1881 info = GET_INFO(mvar);
1884 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1885 #if defined(THREADED_RTS)
1886 unlockClosure(mvar, info);
1891 if (info == stg_MVAR_CLEAN_info) {
1892 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1895 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1897 /* There are takeMVar(s) waiting: wake up the first one
1899 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1901 /* actually perform the takeMVar */
1902 tso = StgMVar_head(mvar);
1903 PerformTake(tso, R2);
1904 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1905 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1908 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1909 StgMVar_head(mvar) "ptr", 1) [];
1910 StgMVar_head(mvar) = tso;
1912 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1913 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1916 #if defined(THREADED_RTS)
1917 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1919 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1924 /* No further takes, the MVar is now full. */
1925 StgMVar_value(mvar) = R2;
1927 #if defined(THREADED_RTS)
1928 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1930 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1935 /* ToDo: yield afterward for better communication performance? */
1939 /* -----------------------------------------------------------------------------
1940 Stable pointer primitives
1941 ------------------------------------------------------------------------- */
1943 makeStableNamezh_fast
1947 ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, makeStableNamezh_fast );
1949 (index) = foreign "C" lookupStableName(R1 "ptr") [];
1951 /* Is there already a StableName for this heap object?
1952 * stable_ptr_table is a pointer to an array of snEntry structs.
1954 if ( snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) == NULL ) {
1955 sn_obj = Hp - SIZEOF_StgStableName + WDS(1);
1956 SET_HDR(sn_obj, stg_STABLE_NAME_info, W_[CCCS]);
1957 StgStableName_sn(sn_obj) = index;
1958 snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) = sn_obj;
1960 sn_obj = snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry);
1967 makeStablePtrzh_fast
1971 MAYBE_GC(R1_PTR, makeStablePtrzh_fast);
1972 ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
1976 deRefStablePtrzh_fast
1978 /* Args: R1 = the stable ptr */
1981 r = snEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_snEntry);
1985 /* -----------------------------------------------------------------------------
1986 Bytecode object primitives
1987 ------------------------------------------------------------------------- */
1997 W_ bco, bitmap_arr, bytes, words;
2001 words = BYTES_TO_WDS(SIZEOF_StgBCO) + StgArrWords_words(bitmap_arr);
2004 ALLOC_PRIM( bytes, R1_PTR&R2_PTR&R3_PTR&R5_PTR, newBCOzh_fast );
2006 bco = Hp - bytes + WDS(1);
2007 SET_HDR(bco, stg_BCO_info, W_[CCCS]);
2009 StgBCO_instrs(bco) = R1;
2010 StgBCO_literals(bco) = R2;
2011 StgBCO_ptrs(bco) = R3;
2012 StgBCO_arity(bco) = HALF_W_(R4);
2013 StgBCO_size(bco) = HALF_W_(words);
2015 // Copy the arity/bitmap info into the BCO
2019 if (i < StgArrWords_words(bitmap_arr)) {
2020 StgBCO_bitmap(bco,i) = StgArrWords_payload(bitmap_arr,i);
2031 // R1 = the BCO# for the AP
2035 // This function is *only* used to wrap zero-arity BCOs in an
2036 // updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
2037 // saturated and always points directly to a FUN or BCO.
2038 ASSERT(%INFO_TYPE(%GET_STD_INFO(R1)) == HALF_W_(BCO) &&
2039 StgBCO_arity(R1) == HALF_W_(0));
2041 HP_CHK_GEN_TICKY(SIZEOF_StgAP, R1_PTR, mkApUpd0zh_fast);
2042 TICK_ALLOC_UP_THK(0, 0);
2043 CCCS_ALLOC(SIZEOF_StgAP);
2045 ap = Hp - SIZEOF_StgAP + WDS(1);
2046 SET_HDR(ap, stg_AP_info, W_[CCCS]);
2048 StgAP_n_args(ap) = HALF_W_(0);
2054 unpackClosurezh_fast
2056 /* args: R1 = closure to analyze */
2057 // TODO: Consider the absence of ptrs or nonptrs as a special case ?
2059 W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
2060 info = %GET_STD_INFO(UNTAG(R1));
2062 // Some closures have non-standard layout, so we omit those here.
2064 type = TO_W_(%INFO_TYPE(info));
2065 switch [0 .. N_CLOSURE_TYPES] type {
2066 case THUNK_SELECTOR : {
2071 case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
2072 THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
2078 ptrs = TO_W_(%INFO_PTRS(info));
2079 nptrs = TO_W_(%INFO_NPTRS(info));
2084 W_ ptrs_arr_sz, nptrs_arr_sz;
2085 nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
2086 ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs);
2088 ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, unpackClosurezh_fast);
2093 ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
2094 nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
2096 SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
2097 StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
2101 W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
2106 SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
2107 StgArrWords_words(nptrs_arr) = nptrs;
2111 W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
2115 RET_NPP(info, ptrs_arr, nptrs_arr);
2118 /* -----------------------------------------------------------------------------
2119 Thread I/O blocking primitives
2120 -------------------------------------------------------------------------- */
2122 /* Add a thread to the end of the blocked queue. (C-- version of the C
2123 * macro in Schedule.h).
2125 #define APPEND_TO_BLOCKED_QUEUE(tso) \
2126 ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
2127 if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
2128 W_[blocked_queue_hd] = tso; \
2130 foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
2132 W_[blocked_queue_tl] = tso;
2138 foreign "C" barf("waitRead# on threaded RTS") never returns;
2141 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2142 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2143 StgTSO_block_info(CurrentTSO) = R1;
2144 // No locking - we're not going to use this interface in the
2145 // threaded RTS anyway.
2146 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2147 jump stg_block_noregs;
2155 foreign "C" barf("waitWrite# on threaded RTS") never returns;
2158 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2159 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2160 StgTSO_block_info(CurrentTSO) = R1;
2161 // No locking - we're not going to use this interface in the
2162 // threaded RTS anyway.
2163 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2164 jump stg_block_noregs;
2169 STRING(stg_delayzh_malloc_str, "delayzh_fast")
2172 #ifdef mingw32_HOST_OS
2180 foreign "C" barf("delay# on threaded RTS") never returns;
2183 /* args: R1 (microsecond delay amount) */
2184 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2185 StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
2187 #ifdef mingw32_HOST_OS
2189 /* could probably allocate this on the heap instead */
2190 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2191 stg_delayzh_malloc_str);
2192 (reqID) = foreign "C" addDelayRequest(R1);
2193 StgAsyncIOResult_reqID(ares) = reqID;
2194 StgAsyncIOResult_len(ares) = 0;
2195 StgAsyncIOResult_errCode(ares) = 0;
2196 StgTSO_block_info(CurrentTSO) = ares;
2198 /* Having all async-blocked threads reside on the blocked_queue
2199 * simplifies matters, so change the status to OnDoProc put the
2200 * delayed thread on the blocked_queue.
2202 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2203 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2204 jump stg_block_async_void;
2210 (time) = foreign "C" getourtimeofday() [R1];
2211 divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags));
2215 divisor = divisor * 1000;
2216 target = ((R1 + divisor - 1) / divisor) /* divide rounding up */
2217 + time + 1; /* Add 1 as getourtimeofday rounds down */
2218 StgTSO_block_info(CurrentTSO) = target;
2220 /* Insert the new thread in the sleeping queue. */
2222 t = W_[sleeping_queue];
2224 if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
2226 t = StgTSO__link(t);
2230 StgTSO__link(CurrentTSO) = t;
2232 W_[sleeping_queue] = CurrentTSO;
2234 foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
2236 jump stg_block_noregs;
2238 #endif /* !THREADED_RTS */
2242 #ifdef mingw32_HOST_OS
2243 STRING(stg_asyncReadzh_malloc_str, "asyncReadzh_fast")
2250 foreign "C" barf("asyncRead# on threaded RTS") never returns;
2253 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2254 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2255 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2257 /* could probably allocate this on the heap instead */
2258 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2259 stg_asyncReadzh_malloc_str)
2261 (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
2262 StgAsyncIOResult_reqID(ares) = reqID;
2263 StgAsyncIOResult_len(ares) = 0;
2264 StgAsyncIOResult_errCode(ares) = 0;
2265 StgTSO_block_info(CurrentTSO) = ares;
2266 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2267 jump stg_block_async;
2271 STRING(stg_asyncWritezh_malloc_str, "asyncWritezh_fast")
2278 foreign "C" barf("asyncWrite# on threaded RTS") never returns;
2281 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2282 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2283 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2285 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2286 stg_asyncWritezh_malloc_str)
2288 (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
2290 StgAsyncIOResult_reqID(ares) = reqID;
2291 StgAsyncIOResult_len(ares) = 0;
2292 StgAsyncIOResult_errCode(ares) = 0;
2293 StgTSO_block_info(CurrentTSO) = ares;
2294 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2295 jump stg_block_async;
2299 STRING(stg_asyncDoProczh_malloc_str, "asyncDoProczh_fast")
2306 foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
2309 /* args: R1 = proc, R2 = param */
2310 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2311 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2313 /* could probably allocate this on the heap instead */
2314 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2315 stg_asyncDoProczh_malloc_str)
2317 (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
2318 StgAsyncIOResult_reqID(ares) = reqID;
2319 StgAsyncIOResult_len(ares) = 0;
2320 StgAsyncIOResult_errCode(ares) = 0;
2321 StgTSO_block_info(CurrentTSO) = ares;
2322 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2323 jump stg_block_async;
2328 // noDuplicate# tries to ensure that none of the thunks under
2329 // evaluation by the current thread are also under evaluation by
2330 // another thread. It relies on *both* threads doing noDuplicate#;
2331 // the second one will get blocked if they are duplicating some work.
2334 SAVE_THREAD_STATE();
2335 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2336 foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
2338 if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
2339 jump stg_threadFinished;
2341 LOAD_THREAD_STATE();
2342 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2343 jump %ENTRY_CODE(Sp(0));
2347 getApStackValzh_fast
2349 W_ ap_stack, offset, val, ok;
2351 /* args: R1 = AP_STACK, R2 = offset */
2355 if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
2357 val = StgAP_STACK_payload(ap_stack,offset);
2365 /* -----------------------------------------------------------------------------
2367 -------------------------------------------------------------------------- */
2369 // Write the cost center stack of the first argument on stderr; return
2370 // the second. Possibly only makes sense for already evaluated
2377 ccs = StgHeader_ccs(UNTAG(R1));
2378 foreign "C" fprintCCS_stderr(ccs "ptr") [R2];
2389 #ifndef THREADED_RTS
2390 RET_NP(0,ghczmprim_GHCziBool_False_closure);
2392 (spark) = foreign "C" findSpark(MyCapability());
2396 RET_NP(0,ghczmprim_GHCziBool_False_closure);