1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Out-of-line primitive operations
7 * This file contains the implementations of all the primitive
8 * operations ("primops") which are not expanded inline. See
9 * ghc/compiler/prelude/primops.txt.pp for a list of all the primops;
10 * this file contains code for most of those with the attribute
13 * Entry convention: the entry convention for a primop is that all the
14 * args are in Stg registers (R1, R2, etc.). This is to make writing
15 * the primops easier. (see compiler/codeGen/CgCallConv.hs).
17 * Return convention: results from a primop are generally returned
18 * using the ordinary unboxed tuple return convention. The C-- parser
19 * implements the RET_xxxx() macros to perform unboxed-tuple returns
20 * based on the prevailing return convention.
22 * This file is written in a subset of C--, extended with various
23 * features specific to GHC. It is compiled by GHC directly. For the
24 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
26 * ---------------------------------------------------------------------------*/
31 #ifndef mingw32_HOST_OS
41 import __gmpz_tdiv_qr;
42 import __gmpz_fdiv_qr;
43 import __gmpz_divexact;
49 import pthread_mutex_lock;
50 import pthread_mutex_unlock;
52 import base_ControlziExceptionziBase_nestedAtomically_closure;
53 import EnterCriticalSection;
54 import LeaveCriticalSection;
55 import ghczmprim_GHCziBool_False_closure;
57 /*-----------------------------------------------------------------------------
60 Basically just new*Array - the others are all inline macros.
62 The size arg is always passed in R1, and the result returned in R1.
64 The slow entry point is for returning from a heap check, the saved
65 size argument must be re-loaded from the stack.
66 -------------------------------------------------------------------------- */
68 /* for objects that are *less* than the size of a word, make sure we
69 * round up to the nearest word for the size of the array.
74 W_ words, payload_words, n, p;
75 MAYBE_GC(NO_PTRS,newByteArrayzh_fast);
77 payload_words = ROUNDUP_BYTES_TO_WDS(n);
78 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
79 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr",words) [];
80 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
81 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
82 StgArrWords_words(p) = payload_words;
87 #define BA_MASK (BA_ALIGN-1)
89 newPinnedByteArrayzh_fast
91 W_ words, payload_words, n, p;
93 MAYBE_GC(NO_PTRS,newPinnedByteArrayzh_fast);
95 payload_words = ROUNDUP_BYTES_TO_WDS(n);
97 words = payload_words + ((SIZEOF_StgArrWords + BA_MASK) & ~BA_MASK);
99 ("ptr" p) = foreign "C" allocatePinned(words) [];
100 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
102 // This bumps p forwards so that the payload falls on an R2-byte boundary.
103 p = p + ((-p - SIZEOF_StgArrWords) & BA_MASK);
105 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
106 StgArrWords_words(p) = payload_words;
110 newAlignedPinnedByteArrayzh_fast
112 W_ words, payload_words, n, p, mask;
114 MAYBE_GC(NO_PTRS,newAlignedPinnedByteArrayzh_fast);
123 payload_words = ROUNDUP_BYTES_TO_WDS(n);
125 // We want an <align>-byte aligned array. allocatePinned() gives us
126 // 8-byte aligned memory by default, but we want to align the
127 // *goods* inside the ArrWords object, so we have to check the
128 // size of the ArrWords header and adjust our size accordingly.
129 words = payload_words + ((SIZEOF_StgArrWords + mask) & ~mask);
131 ("ptr" p) = foreign "C" allocatePinned(words) [];
132 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
134 // This bumps p forwards so that the payload falls on an R2-byte boundary.
135 p = p + ((-p - SIZEOF_StgArrWords) & mask);
137 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
138 StgArrWords_words(p) = payload_words;
144 W_ words, n, init, arr, p;
145 /* Args: R1 = words, R2 = initialisation value */
148 MAYBE_GC(R2_PTR,newArrayzh_fast);
150 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + n;
151 ("ptr" arr) = foreign "C" allocateLocal(MyCapability() "ptr",words) [R2];
152 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
154 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
155 StgMutArrPtrs_ptrs(arr) = n;
157 // Initialise all elements of the the array with the value in R2
159 p = arr + SIZEOF_StgMutArrPtrs;
161 if (p < arr + WDS(words)) {
170 unsafeThawArrayzh_fast
172 // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
174 // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
175 // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
176 // it on the mutable list for the GC to remove (removing something from
177 // the mutable list is not easy, because the mut_list is only singly-linked).
179 // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
180 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
181 // to indicate that it is still on the mutable list.
183 // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
184 // either it is on a mut_list, or it isn't. We adopt the convention that
185 // the closure type is MUT_ARR_PTRS_FROZEN0 if it is on the mutable list,
186 // and MUT_ARR_PTRS_FROZEN otherwise. In fact it wouldn't matter if
187 // we put it on the mutable list more than once, but it would get scavenged
188 // multiple times during GC, which would be unnecessarily slow.
190 if (StgHeader_info(R1) != stg_MUT_ARR_PTRS_FROZEN0_info) {
191 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
192 recordMutable(R1, R1);
193 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
196 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
201 /* -----------------------------------------------------------------------------
203 -------------------------------------------------------------------------- */
208 /* Args: R1 = initialisation value */
210 ALLOC_PRIM( SIZEOF_StgMutVar, R1_PTR, newMutVarzh_fast);
212 mv = Hp - SIZEOF_StgMutVar + WDS(1);
213 SET_HDR(mv,stg_MUT_VAR_DIRTY_info,W_[CCCS]);
214 StgMutVar_var(mv) = R1;
219 atomicModifyMutVarzh_fast
221 W_ mv, f, z, x, y, r, h;
222 /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */
224 /* If x is the current contents of the MutVar#, then
225 We want to make the new contents point to
229 and the return value is
233 obviously we can share (f x).
235 z = [stg_ap_2 f x] (max (HS + 2) MIN_UPD_SIZE)
236 y = [stg_sel_0 z] (max (HS + 1) MIN_UPD_SIZE)
237 r = [stg_sel_1 z] (max (HS + 1) MIN_UPD_SIZE)
241 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
242 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),WDS(MIN_UPD_SIZE-1))
244 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(1))
245 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),0)
249 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
250 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),WDS(MIN_UPD_SIZE-2))
252 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(2))
253 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),0)
256 #define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
258 HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, atomicModifyMutVarzh_fast);
263 TICK_ALLOC_THUNK_2();
264 CCCS_ALLOC(THUNK_2_SIZE);
265 z = Hp - THUNK_2_SIZE + WDS(1);
266 SET_HDR(z, stg_ap_2_upd_info, W_[CCCS]);
267 LDV_RECORD_CREATE(z);
268 StgThunk_payload(z,0) = f;
270 TICK_ALLOC_THUNK_1();
271 CCCS_ALLOC(THUNK_1_SIZE);
272 y = z - THUNK_1_SIZE;
273 SET_HDR(y, stg_sel_0_upd_info, W_[CCCS]);
274 LDV_RECORD_CREATE(y);
275 StgThunk_payload(y,0) = z;
277 TICK_ALLOC_THUNK_1();
278 CCCS_ALLOC(THUNK_1_SIZE);
279 r = y - THUNK_1_SIZE;
280 SET_HDR(r, stg_sel_1_upd_info, W_[CCCS]);
281 LDV_RECORD_CREATE(r);
282 StgThunk_payload(r,0) = z;
285 x = StgMutVar_var(mv);
286 StgThunk_payload(z,1) = x;
288 (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y) [];
289 if (h != x) { goto retry; }
291 StgMutVar_var(mv) = y;
294 if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
295 foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") [];
301 /* -----------------------------------------------------------------------------
302 Weak Pointer Primitives
303 -------------------------------------------------------------------------- */
305 STRING(stg_weak_msg,"New weak pointer at %p\n")
311 R3 = finalizer (or NULL)
316 R3 = stg_NO_FINALIZER_closure;
319 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, mkWeakzh_fast );
321 w = Hp - SIZEOF_StgWeak + WDS(1);
322 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
324 // We don't care about cfinalizer here.
325 // Should StgWeak_cfinalizer(w) be stg_NO_FINALIZER_closure or
329 StgWeak_value(w) = R2;
330 StgWeak_finalizer(w) = R3;
331 StgWeak_cfinalizer(w) = stg_NO_FINALIZER_closure;
333 StgWeak_link(w) = W_[weak_ptr_list];
334 W_[weak_ptr_list] = w;
336 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
341 mkWeakForeignEnvzh_fast
347 R5 = has environment (0 or 1)
350 W_ w, payload_words, words, p;
352 W_ key, val, fptr, ptr, flag, eptr;
361 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, mkWeakForeignEnvzh_fast );
363 w = Hp - SIZEOF_StgWeak + WDS(1);
364 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
367 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
368 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr", words) [];
370 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
371 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
373 StgArrWords_words(p) = payload_words;
374 StgArrWords_payload(p,0) = fptr;
375 StgArrWords_payload(p,1) = ptr;
376 StgArrWords_payload(p,2) = eptr;
377 StgArrWords_payload(p,3) = flag;
379 // We don't care about the value here.
380 // Should StgWeak_value(w) be stg_NO_FINALIZER_closure or something else?
382 StgWeak_key(w) = key;
383 StgWeak_value(w) = val;
384 StgWeak_finalizer(w) = stg_NO_FINALIZER_closure;
385 StgWeak_cfinalizer(w) = p;
387 StgWeak_link(w) = W_[weak_ptr_list];
388 W_[weak_ptr_list] = w;
390 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
404 if (GET_INFO(w) == stg_DEAD_WEAK_info) {
405 RET_NP(0,stg_NO_FINALIZER_closure);
411 // A weak pointer is inherently used, so we do not need to call
412 // LDV_recordDead_FILL_SLOP_DYNAMIC():
413 // LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)w);
414 // or, LDV_recordDead():
415 // LDV_recordDead((StgClosure *)w, sizeofW(StgWeak) - sizeofW(StgProfHeader));
416 // Furthermore, when PROFILING is turned on, dead weak pointers are exactly as
417 // large as weak pointers, so there is no need to fill the slop, either.
418 // See stg_DEAD_WEAK_info in StgMiscClosures.hc.
422 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
424 SET_INFO(w,stg_DEAD_WEAK_info);
425 LDV_RECORD_CREATE(w);
427 f = StgWeak_finalizer(w);
428 arr = StgWeak_cfinalizer(w);
430 StgDeadWeak_link(w) = StgWeak_link(w);
432 if (arr != stg_NO_FINALIZER_closure) {
433 foreign "C" runCFinalizer(StgArrWords_payload(arr,0),
434 StgArrWords_payload(arr,1),
435 StgArrWords_payload(arr,2),
436 StgArrWords_payload(arr,3)) [];
439 /* return the finalizer */
440 if (f == stg_NO_FINALIZER_closure) {
441 RET_NP(0,stg_NO_FINALIZER_closure);
453 if (GET_INFO(w) == stg_WEAK_info) {
455 val = StgWeak_value(w);
463 /* -----------------------------------------------------------------------------
464 Arbitrary-precision Integer operations.
466 There are some assumptions in this code that mp_limb_t == W_. This is
467 the case for all the platforms that GHC supports, currently.
468 -------------------------------------------------------------------------- */
472 /* arguments: R1 = Int# */
474 W_ val, s, p; /* to avoid aliasing */
477 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, int2Integerzh_fast );
479 p = Hp - SIZEOF_StgArrWords;
480 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
481 StgArrWords_words(p) = 1;
483 /* mpz_set_si is inlined here, makes things simpler */
496 /* returns (# size :: Int#,
505 /* arguments: R1 = Word# */
507 W_ val, s, p; /* to avoid aliasing */
511 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, word2Integerzh_fast);
513 p = Hp - SIZEOF_StgArrWords;
514 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
515 StgArrWords_words(p) = 1;
524 /* returns (# size :: Int#,
525 data :: ByteArray# #)
532 * 'long long' primops for converting to/from Integers.
535 #ifdef SUPPORT_LONG_LONGS
537 int64ToIntegerzh_fast
539 /* arguments: L1 = Int64# */
542 W_ hi, lo, s, neg, words_needed, p;
547 hi = TO_W_(val >> 32);
550 if ( hi == 0 || (hi == 0xFFFFFFFF && lo != 0) ) {
551 // minimum is one word
557 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
558 NO_PTRS, int64ToIntegerzh_fast );
560 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
561 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
562 StgArrWords_words(p) = words_needed;
574 if ( words_needed == 2 ) {
582 } else /* val==0 */ {
590 /* returns (# size :: Int#,
591 data :: ByteArray# #)
595 word64ToIntegerzh_fast
597 /* arguments: L1 = Word64# */
600 W_ hi, lo, s, words_needed, p;
603 hi = TO_W_(val >> 32);
612 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(words_needed),
613 NO_PTRS, word64ToIntegerzh_fast );
615 p = Hp - SIZEOF_StgArrWords - WDS(words_needed) + WDS(1);
616 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
617 StgArrWords_words(p) = words_needed;
627 } else /* val==0 */ {
632 /* returns (# size :: Int#,
633 data :: ByteArray# #)
640 #endif /* SUPPORT_LONG_LONGS */
642 /* ToDo: this is shockingly inefficient */
647 bits8 [SIZEOF_MP_INT];
652 bits8 [SIZEOF_MP_INT];
657 bits8 [SIZEOF_MP_INT];
662 bits8 [SIZEOF_MP_INT];
667 #define FETCH_MP_TEMP(X) \
669 X = BaseReg + (OFFSET_StgRegTable_r ## X);
671 #define FETCH_MP_TEMP(X) /* Nothing */
674 #define GMP_TAKE2_RET1(name,mp_fun) \
679 FETCH_MP_TEMP(mp_tmp1); \
680 FETCH_MP_TEMP(mp_tmp2); \
681 FETCH_MP_TEMP(mp_result1) \
682 FETCH_MP_TEMP(mp_result2); \
684 /* call doYouWantToGC() */ \
685 MAYBE_GC(R2_PTR & R4_PTR, name); \
692 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
693 MP_INT__mp_size(mp_tmp1) = (s1); \
694 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
695 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
696 MP_INT__mp_size(mp_tmp2) = (s2); \
697 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
699 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
701 /* Perform the operation */ \
702 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
704 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
705 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
708 #define GMP_TAKE1_RET1(name,mp_fun) \
713 FETCH_MP_TEMP(mp_tmp1); \
714 FETCH_MP_TEMP(mp_result1) \
716 /* call doYouWantToGC() */ \
717 MAYBE_GC(R2_PTR, name); \
722 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
723 MP_INT__mp_size(mp_tmp1) = (s1); \
724 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
726 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
728 /* Perform the operation */ \
729 foreign "C" mp_fun(mp_result1 "ptr",mp_tmp1 "ptr") []; \
731 RET_NP(TO_W_(MP_INT__mp_size(mp_result1)), \
732 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords); \
735 #define GMP_TAKE2_RET2(name,mp_fun) \
740 FETCH_MP_TEMP(mp_tmp1); \
741 FETCH_MP_TEMP(mp_tmp2); \
742 FETCH_MP_TEMP(mp_result1) \
743 FETCH_MP_TEMP(mp_result2) \
745 /* call doYouWantToGC() */ \
746 MAYBE_GC(R2_PTR & R4_PTR, name); \
753 MP_INT__mp_alloc(mp_tmp1) = W_TO_INT(StgArrWords_words(d1)); \
754 MP_INT__mp_size(mp_tmp1) = (s1); \
755 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(d1); \
756 MP_INT__mp_alloc(mp_tmp2) = W_TO_INT(StgArrWords_words(d2)); \
757 MP_INT__mp_size(mp_tmp2) = (s2); \
758 MP_INT__mp_d(mp_tmp2) = BYTE_ARR_CTS(d2); \
760 foreign "C" __gmpz_init(mp_result1 "ptr") []; \
761 foreign "C" __gmpz_init(mp_result2 "ptr") []; \
763 /* Perform the operation */ \
764 foreign "C" mp_fun(mp_result1 "ptr",mp_result2 "ptr",mp_tmp1 "ptr",mp_tmp2 "ptr") []; \
766 RET_NPNP(TO_W_(MP_INT__mp_size(mp_result1)), \
767 MP_INT__mp_d(mp_result1) - SIZEOF_StgArrWords, \
768 TO_W_(MP_INT__mp_size(mp_result2)), \
769 MP_INT__mp_d(mp_result2) - SIZEOF_StgArrWords); \
772 GMP_TAKE2_RET1(plusIntegerzh_fast, __gmpz_add)
773 GMP_TAKE2_RET1(minusIntegerzh_fast, __gmpz_sub)
774 GMP_TAKE2_RET1(timesIntegerzh_fast, __gmpz_mul)
775 GMP_TAKE2_RET1(gcdIntegerzh_fast, __gmpz_gcd)
776 GMP_TAKE2_RET1(quotIntegerzh_fast, __gmpz_tdiv_q)
777 GMP_TAKE2_RET1(remIntegerzh_fast, __gmpz_tdiv_r)
778 GMP_TAKE2_RET1(divExactIntegerzh_fast, __gmpz_divexact)
779 GMP_TAKE2_RET1(andIntegerzh_fast, __gmpz_and)
780 GMP_TAKE2_RET1(orIntegerzh_fast, __gmpz_ior)
781 GMP_TAKE2_RET1(xorIntegerzh_fast, __gmpz_xor)
782 GMP_TAKE1_RET1(complementIntegerzh_fast, __gmpz_com)
784 GMP_TAKE2_RET2(quotRemIntegerzh_fast, __gmpz_tdiv_qr)
785 GMP_TAKE2_RET2(divModIntegerzh_fast, __gmpz_fdiv_qr)
789 mp_tmp_w: W_; // NB. mp_tmp_w is really an here mp_limb_t
795 /* R1 = the first Int#; R2 = the second Int# */
797 FETCH_MP_TEMP(mp_tmp_w);
800 (r) = foreign "C" __gmpn_gcd_1(mp_tmp_w "ptr", 1, R2) [];
803 /* Result parked in R1, return via info-pointer at TOS */
804 jump %ENTRY_CODE(Sp(0));
810 /* R1 = s1; R2 = d1; R3 = the int */
812 (s1) = foreign "C" __gmpn_gcd_1( BYTE_ARR_CTS(R2) "ptr", R1, R3) [];
815 /* Result parked in R1, return via info-pointer at TOS */
816 jump %ENTRY_CODE(Sp(0));
822 /* R1 = s1; R2 = d1; R3 = the int */
823 W_ usize, vsize, v_digit, u_digit;
829 // paraphrased from __gmpz_cmp_si() in the GMP sources
830 if (%gt(v_digit,0)) {
833 if (%lt(v_digit,0)) {
839 if (usize != vsize) {
841 jump %ENTRY_CODE(Sp(0));
846 jump %ENTRY_CODE(Sp(0));
849 u_digit = W_[BYTE_ARR_CTS(R2)];
851 if (u_digit == v_digit) {
853 jump %ENTRY_CODE(Sp(0));
856 if (%gtu(u_digit,v_digit)) { // NB. unsigned: these are mp_limb_t's
862 jump %ENTRY_CODE(Sp(0));
867 /* R1 = s1; R2 = d1; R3 = s2; R4 = d2 */
868 W_ usize, vsize, size, up, vp;
871 // paraphrased from __gmpz_cmp() in the GMP sources
875 if (usize != vsize) {
877 jump %ENTRY_CODE(Sp(0));
882 jump %ENTRY_CODE(Sp(0));
885 if (%lt(usize,0)) { // NB. not <, which is unsigned
891 up = BYTE_ARR_CTS(R2);
892 vp = BYTE_ARR_CTS(R4);
894 (cmp) = foreign "C" __gmpn_cmp(up "ptr", vp "ptr", size) [];
896 if (cmp == 0 :: CInt) {
898 jump %ENTRY_CODE(Sp(0));
901 if (%lt(cmp,0 :: CInt) == %lt(usize,0)) {
906 /* Result parked in R1, return via info-pointer at TOS */
907 jump %ENTRY_CODE(Sp(0));
919 r = W_[R2 + SIZEOF_StgArrWords];
924 /* Result parked in R1, return via info-pointer at TOS */
926 jump %ENTRY_CODE(Sp(0));
938 r = W_[R2 + SIZEOF_StgArrWords];
943 /* Result parked in R1, return via info-pointer at TOS */
945 jump %ENTRY_CODE(Sp(0));
952 FETCH_MP_TEMP(mp_tmp1);
953 FETCH_MP_TEMP(mp_tmp_w);
955 /* arguments: F1 = Float# */
958 ALLOC_PRIM( SIZEOF_StgArrWords + WDS(1), NO_PTRS, decodeFloatzh_fast );
960 /* Be prepared to tell Lennart-coded __decodeFloat
961 where mantissa._mp_d can be put (it does not care about the rest) */
962 p = Hp - SIZEOF_StgArrWords;
963 SET_HDR(p,stg_ARR_WORDS_info,W_[CCCS]);
964 StgArrWords_words(p) = 1;
965 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
967 /* Perform the operation */
968 foreign "C" __decodeFloat(mp_tmp1 "ptr",mp_tmp_w "ptr" ,arg) [];
970 /* returns: (Int# (expn), Int#, ByteArray#) */
971 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
974 decodeFloatzuIntzh_fast
978 FETCH_MP_TEMP(mp_tmp1);
979 FETCH_MP_TEMP(mp_tmp_w);
981 /* arguments: F1 = Float# */
984 /* Perform the operation */
985 foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
987 /* returns: (Int# (mantissa), Int# (exponent)) */
988 RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
991 #define DOUBLE_MANTISSA_SIZE SIZEOF_DOUBLE
992 #define ARR_SIZE (SIZEOF_StgArrWords + DOUBLE_MANTISSA_SIZE)
998 FETCH_MP_TEMP(mp_tmp1);
999 FETCH_MP_TEMP(mp_tmp_w);
1001 /* arguments: D1 = Double# */
1004 ALLOC_PRIM( ARR_SIZE, NO_PTRS, decodeDoublezh_fast );
1006 /* Be prepared to tell Lennart-coded __decodeDouble
1007 where mantissa.d can be put (it does not care about the rest) */
1008 p = Hp - ARR_SIZE + WDS(1);
1009 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
1010 StgArrWords_words(p) = BYTES_TO_WDS(DOUBLE_MANTISSA_SIZE);
1011 MP_INT__mp_d(mp_tmp1) = BYTE_ARR_CTS(p);
1013 /* Perform the operation */
1014 foreign "C" __decodeDouble(mp_tmp1 "ptr", mp_tmp_w "ptr",arg) [];
1016 /* returns: (Int# (expn), Int#, ByteArray#) */
1017 RET_NNP(W_[mp_tmp_w], TO_W_(MP_INT__mp_size(mp_tmp1)), p);
1020 decodeDoublezu2Intzh_fast
1024 FETCH_MP_TEMP(mp_tmp1);
1025 FETCH_MP_TEMP(mp_tmp2);
1026 FETCH_MP_TEMP(mp_result1);
1027 FETCH_MP_TEMP(mp_result2);
1029 /* arguments: D1 = Double# */
1032 /* Perform the operation */
1033 foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
1034 mp_result1 "ptr", mp_result2 "ptr",
1038 (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
1039 RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
1042 /* -----------------------------------------------------------------------------
1043 * Concurrency primitives
1044 * -------------------------------------------------------------------------- */
1048 /* args: R1 = closure to spark */
1050 MAYBE_GC(R1_PTR, forkzh_fast);
1056 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
1057 RtsFlags_GcFlags_initialStkSize(RtsFlags),
1060 /* start blocked if the current thread is blocked */
1061 StgTSO_flags(threadid) =
1062 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
1063 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
1065 foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
1067 // switch at the earliest opportunity
1068 Capability_context_switch(MyCapability()) = 1 :: CInt;
1075 /* args: R1 = cpu, R2 = closure to spark */
1077 MAYBE_GC(R2_PTR, forkOnzh_fast);
1085 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
1086 RtsFlags_GcFlags_initialStkSize(RtsFlags),
1089 /* start blocked if the current thread is blocked */
1090 StgTSO_flags(threadid) =
1091 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
1092 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
1094 foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
1096 // switch at the earliest opportunity
1097 Capability_context_switch(MyCapability()) = 1 :: CInt;
1104 jump stg_yield_noregs;
1119 foreign "C" labelThread(R1 "ptr", R2 "ptr") [];
1121 jump %ENTRY_CODE(Sp(0));
1124 isCurrentThreadBoundzh_fast
1128 (r) = foreign "C" isThreadBound(CurrentTSO) [];
1134 /* args: R1 :: ThreadId# */
1142 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
1143 tso = StgTSO__link(tso);
1147 what_next = TO_W_(StgTSO_what_next(tso));
1148 why_blocked = TO_W_(StgTSO_why_blocked(tso));
1149 // Note: these two reads are not atomic, so they might end up
1150 // being inconsistent. It doesn't matter, since we
1151 // only return one or the other. If we wanted to return the
1152 // contents of block_info too, then we'd have to do some synchronisation.
1154 if (what_next == ThreadComplete) {
1155 ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
1157 if (what_next == ThreadKilled) {
1166 /* -----------------------------------------------------------------------------
1168 * -------------------------------------------------------------------------- */
1172 // Catch retry frame ------------------------------------------------------------
1174 INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
1175 #if defined(PROFILING)
1176 W_ unused1, W_ unused2,
1178 W_ unused3, P_ unused4, P_ unused5)
1180 W_ r, frame, trec, outer;
1183 trec = StgTSO_trec(CurrentTSO);
1184 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1185 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1187 /* Succeeded (either first branch or second branch) */
1188 StgTSO_trec(CurrentTSO) = outer;
1189 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1190 jump %ENTRY_CODE(Sp(SP_OFF));
1192 /* Did not commit: re-execute */
1194 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1195 StgTSO_trec(CurrentTSO) = new_trec;
1196 if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1197 R1 = StgCatchRetryFrame_alt_code(frame);
1199 R1 = StgCatchRetryFrame_first_code(frame);
1206 // Atomically frame ------------------------------------------------------------
1208 INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
1209 #if defined(PROFILING)
1210 W_ unused1, W_ unused2,
1212 P_ unused3, P_ unused4)
1214 W_ frame, trec, valid, next_invariant, q, outer;
1217 trec = StgTSO_trec(CurrentTSO);
1218 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1220 if (outer == NO_TREC) {
1221 /* First time back at the atomically frame -- pick up invariants */
1222 ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
1223 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1226 /* Second/subsequent time back at the atomically frame -- abort the
1227 * tx that's checking the invariant and move on to the next one */
1228 StgTSO_trec(CurrentTSO) = outer;
1229 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1230 StgInvariantCheckQueue_my_execution(q) = trec;
1231 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1232 /* Don't free trec -- it's linked from q and will be stashed in the
1233 * invariant if we eventually commit. */
1234 q = StgInvariantCheckQueue_next_queue_entry(q);
1235 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
1239 q = StgAtomicallyFrame_next_invariant_to_check(frame);
1241 if (q != END_INVARIANT_CHECK_QUEUE) {
1242 /* We can't commit yet: another invariant to check */
1243 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
1244 StgTSO_trec(CurrentTSO) = trec;
1246 next_invariant = StgInvariantCheckQueue_invariant(q);
1247 R1 = StgAtomicInvariant_code(next_invariant);
1252 /* We've got no more invariants to check, try to commit */
1253 (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
1255 /* Transaction was valid: commit succeeded */
1256 StgTSO_trec(CurrentTSO) = NO_TREC;
1257 Sp = Sp + SIZEOF_StgAtomicallyFrame;
1258 jump %ENTRY_CODE(Sp(SP_OFF));
1260 /* Transaction was not valid: try again */
1261 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1262 StgTSO_trec(CurrentTSO) = trec;
1263 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1264 R1 = StgAtomicallyFrame_code(frame);
1270 INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
1271 #if defined(PROFILING)
1272 W_ unused1, W_ unused2,
1274 P_ unused3, P_ unused4)
1276 W_ frame, trec, valid;
1280 /* The TSO is currently waiting: should we stop waiting? */
1281 (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
1283 /* Previous attempt is still valid: no point trying again yet */
1284 jump stg_block_noregs;
1286 /* Previous attempt is no longer valid: try again */
1287 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
1288 StgTSO_trec(CurrentTSO) = trec;
1289 StgHeader_info(frame) = stg_atomically_frame_info;
1290 R1 = StgAtomicallyFrame_code(frame);
1295 // STM catch frame --------------------------------------------------------------
1299 /* Catch frames are very similar to update frames, but when entering
1300 * one we just pop the frame off the stack and perform the correct
1301 * kind of return to the activation record underneath us on the stack.
1304 INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
1305 #if defined(PROFILING)
1306 W_ unused1, W_ unused2,
1308 P_ unused3, P_ unused4)
1310 W_ r, frame, trec, outer;
1312 trec = StgTSO_trec(CurrentTSO);
1313 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1314 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
1316 /* Commit succeeded */
1317 StgTSO_trec(CurrentTSO) = outer;
1318 Sp = Sp + SIZEOF_StgCatchSTMFrame;
1323 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1324 StgTSO_trec(CurrentTSO) = new_trec;
1325 R1 = StgCatchSTMFrame_code(frame);
1331 // Primop definition ------------------------------------------------------------
1339 // stmStartTransaction may allocate
1340 MAYBE_GC (R1_PTR, atomicallyzh_fast);
1342 /* Args: R1 = m :: STM a */
1343 STK_CHK_GEN(SIZEOF_StgAtomicallyFrame + WDS(1), R1_PTR, atomicallyzh_fast);
1345 old_trec = StgTSO_trec(CurrentTSO);
1347 /* Nested transactions are not allowed; raise an exception */
1348 if (old_trec != NO_TREC) {
1349 R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
1353 /* Set up the atomically frame */
1354 Sp = Sp - SIZEOF_StgAtomicallyFrame;
1357 SET_HDR(frame,stg_atomically_frame_info, W_[CCCS]);
1358 StgAtomicallyFrame_code(frame) = R1;
1359 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
1361 /* Start the memory transcation */
1362 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
1363 StgTSO_trec(CurrentTSO) = new_trec;
1365 /* Apply R1 to the realworld token */
1374 /* Args: R1 :: STM a */
1375 /* Args: R2 :: Exception -> STM a */
1376 STK_CHK_GEN(SIZEOF_StgCatchSTMFrame + WDS(1), R1_PTR & R2_PTR, catchSTMzh_fast);
1378 /* Set up the catch frame */
1379 Sp = Sp - SIZEOF_StgCatchSTMFrame;
1382 SET_HDR(frame, stg_catch_stm_frame_info, W_[CCCS]);
1383 StgCatchSTMFrame_handler(frame) = R2;
1384 StgCatchSTMFrame_code(frame) = R1;
1386 /* Start a nested transaction to run the body of the try block in */
1389 cur_trec = StgTSO_trec(CurrentTSO);
1390 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
1391 StgTSO_trec(CurrentTSO) = new_trec;
1393 /* Apply R1 to the realworld token */
1404 // stmStartTransaction may allocate
1405 MAYBE_GC (R1_PTR & R2_PTR, catchRetryzh_fast);
1407 /* Args: R1 :: STM a */
1408 /* Args: R2 :: STM a */
1409 STK_CHK_GEN(SIZEOF_StgCatchRetryFrame + WDS(1), R1_PTR & R2_PTR, catchRetryzh_fast);
1411 /* Start a nested transaction within which to run the first code */
1412 trec = StgTSO_trec(CurrentTSO);
1413 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
1414 StgTSO_trec(CurrentTSO) = new_trec;
1416 /* Set up the catch-retry frame */
1417 Sp = Sp - SIZEOF_StgCatchRetryFrame;
1420 SET_HDR(frame, stg_catch_retry_frame_info, W_[CCCS]);
1421 StgCatchRetryFrame_running_alt_code(frame) = 0 :: CInt; // false;
1422 StgCatchRetryFrame_first_code(frame) = R1;
1423 StgCatchRetryFrame_alt_code(frame) = R2;
1425 /* Apply R1 to the realworld token */
1438 MAYBE_GC (NO_PTRS, retryzh_fast); // STM operations may allocate
1440 // Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
1442 StgTSO_sp(CurrentTSO) = Sp;
1443 (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
1444 Sp = StgTSO_sp(CurrentTSO);
1446 trec = StgTSO_trec(CurrentTSO);
1447 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1449 if (frame_type == CATCH_RETRY_FRAME) {
1450 // The retry reaches a CATCH_RETRY_FRAME before the atomic frame
1451 ASSERT(outer != NO_TREC);
1452 // Abort the transaction attempting the current branch
1453 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1454 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1455 if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
1456 // Retry in the first branch: try the alternative
1457 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1458 StgTSO_trec(CurrentTSO) = trec;
1459 StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
1460 R1 = StgCatchRetryFrame_alt_code(frame);
1463 // Retry in the alternative code: propagate the retry
1464 StgTSO_trec(CurrentTSO) = outer;
1465 Sp = Sp + SIZEOF_StgCatchRetryFrame;
1466 goto retry_pop_stack;
1470 // We've reached the ATOMICALLY_FRAME: attempt to wait
1471 ASSERT(frame_type == ATOMICALLY_FRAME);
1472 if (outer != NO_TREC) {
1473 // We called retry while checking invariants, so abort the current
1474 // invariant check (merging its TVar accesses into the parents read
1475 // set so we'll wait on them)
1476 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
1477 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
1479 StgTSO_trec(CurrentTSO) = trec;
1480 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
1482 ASSERT(outer == NO_TREC);
1484 (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
1486 // Transaction was valid: stmWait put us on the TVars' queues, we now block
1487 StgHeader_info(frame) = stg_atomically_waiting_frame_info;
1489 // Fix up the stack in the unregisterised case: the return convention is different.
1490 R3 = trec; // passing to stmWaitUnblock()
1491 jump stg_block_stmwait;
1493 // Transaction was not valid: retry immediately
1494 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
1495 StgTSO_trec(CurrentTSO) = trec;
1496 R1 = StgAtomicallyFrame_code(frame);
1507 /* Args: R1 = invariant closure */
1508 MAYBE_GC (R1_PTR, checkzh_fast);
1510 trec = StgTSO_trec(CurrentTSO);
1512 foreign "C" stmAddInvariantToCheck(MyCapability() "ptr",
1516 jump %ENTRY_CODE(Sp(0));
1525 /* Args: R1 = initialisation value */
1527 MAYBE_GC (R1_PTR, newTVarzh_fast);
1529 ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
1540 /* Args: R1 = TVar closure */
1542 MAYBE_GC (R1_PTR, readTVarzh_fast); // Call to stmReadTVar may allocate
1543 trec = StgTSO_trec(CurrentTSO);
1545 ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
1555 result = StgTVar_current_value(R1);
1556 if (%INFO_PTR(result) == stg_TREC_HEADER_info) {
1568 /* Args: R1 = TVar closure */
1569 /* R2 = New value */
1571 MAYBE_GC (R1_PTR & R2_PTR, writeTVarzh_fast); // Call to stmWriteTVar may allocate
1572 trec = StgTSO_trec(CurrentTSO);
1575 foreign "C" stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr", new_value "ptr") [];
1577 jump %ENTRY_CODE(Sp(0));
1581 /* -----------------------------------------------------------------------------
1584 * take & putMVar work as follows. Firstly, an important invariant:
1586 * If the MVar is full, then the blocking queue contains only
1587 * threads blocked on putMVar, and if the MVar is empty then the
1588 * blocking queue contains only threads blocked on takeMVar.
1591 * MVar empty : then add ourselves to the blocking queue
1592 * MVar full : remove the value from the MVar, and
1593 * blocking queue empty : return
1594 * blocking queue non-empty : perform the first blocked putMVar
1595 * from the queue, and wake up the
1596 * thread (MVar is now full again)
1598 * putMVar is just the dual of the above algorithm.
1600 * How do we "perform a putMVar"? Well, we have to fiddle around with
1601 * the stack of the thread waiting to do the putMVar. See
1602 * stg_block_putmvar and stg_block_takemvar in HeapStackCheck.c for
1603 * the stack layout, and the PerformPut and PerformTake macros below.
1605 * It is important that a blocked take or put is woken up with the
1606 * take/put already performed, because otherwise there would be a
1607 * small window of vulnerability where the thread could receive an
1608 * exception and never perform its take or put, and we'd end up with a
1611 * -------------------------------------------------------------------------- */
1615 /* args: R1 = MVar closure */
1617 if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
1629 ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
1631 mvar = Hp - SIZEOF_StgMVar + WDS(1);
1632 SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
1633 // MVARs start dirty: generation 0 has no mutable list
1634 StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
1635 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1636 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1641 #define PerformTake(tso, value) \
1642 W_[StgTSO_sp(tso) + WDS(1)] = value; \
1643 W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
1645 #define PerformPut(tso,lval) \
1646 StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
1647 lval = W_[StgTSO_sp(tso) - WDS(1)];
1651 W_ mvar, val, info, tso;
1653 /* args: R1 = MVar closure */
1656 #if defined(THREADED_RTS)
1657 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1659 info = GET_INFO(mvar);
1662 if (info == stg_MVAR_CLEAN_info) {
1663 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
1666 /* If the MVar is empty, put ourselves on its blocking queue,
1667 * and wait until we're woken up.
1669 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1670 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1671 StgMVar_head(mvar) = CurrentTSO;
1673 foreign "C" setTSOLink(MyCapability() "ptr",
1674 StgMVar_tail(mvar) "ptr",
1677 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1678 StgTSO_block_info(CurrentTSO) = mvar;
1679 // write barrier for throwTo(), which looks at block_info
1680 // if why_blocked==BlockedOnMVar.
1681 prim %write_barrier() [];
1682 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1683 StgMVar_tail(mvar) = CurrentTSO;
1686 jump stg_block_takemvar;
1689 /* we got the value... */
1690 val = StgMVar_value(mvar);
1692 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure)
1694 /* There are putMVar(s) waiting...
1695 * wake up the first thread on the queue
1697 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1699 /* actually perform the putMVar for the thread that we just woke up */
1700 tso = StgMVar_head(mvar);
1701 PerformPut(tso,StgMVar_value(mvar));
1703 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1704 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1707 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1708 StgMVar_head(mvar) "ptr", 1) [];
1709 StgMVar_head(mvar) = tso;
1711 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1712 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1715 #if defined(THREADED_RTS)
1716 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1718 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1724 /* No further putMVars, MVar is now empty */
1725 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1727 #if defined(THREADED_RTS)
1728 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1730 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1740 W_ mvar, val, info, tso;
1742 /* args: R1 = MVar closure */
1746 #if defined(THREADED_RTS)
1747 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1749 info = GET_INFO(mvar);
1752 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1753 #if defined(THREADED_RTS)
1754 unlockClosure(mvar, info);
1756 /* HACK: we need a pointer to pass back,
1757 * so we abuse NO_FINALIZER_closure
1759 RET_NP(0, stg_NO_FINALIZER_closure);
1762 if (info == stg_MVAR_CLEAN_info) {
1763 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1766 /* we got the value... */
1767 val = StgMVar_value(mvar);
1769 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1771 /* There are putMVar(s) waiting...
1772 * wake up the first thread on the queue
1774 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1776 /* actually perform the putMVar for the thread that we just woke up */
1777 tso = StgMVar_head(mvar);
1778 PerformPut(tso,StgMVar_value(mvar));
1779 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1780 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1783 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1784 StgMVar_head(mvar) "ptr", 1) [];
1785 StgMVar_head(mvar) = tso;
1787 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1788 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1790 #if defined(THREADED_RTS)
1791 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1793 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1798 /* No further putMVars, MVar is now empty */
1799 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1800 #if defined(THREADED_RTS)
1801 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1803 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1813 W_ mvar, val, info, tso;
1815 /* args: R1 = MVar, R2 = value */
1819 #if defined(THREADED_RTS)
1820 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1822 info = GET_INFO(mvar);
1825 if (info == stg_MVAR_CLEAN_info) {
1826 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1829 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1830 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1831 StgMVar_head(mvar) = CurrentTSO;
1833 foreign "C" setTSOLink(MyCapability() "ptr",
1834 StgMVar_tail(mvar) "ptr",
1837 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1838 StgTSO_block_info(CurrentTSO) = mvar;
1839 // write barrier for throwTo(), which looks at block_info
1840 // if why_blocked==BlockedOnMVar.
1841 prim %write_barrier() [];
1842 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1843 StgMVar_tail(mvar) = CurrentTSO;
1847 jump stg_block_putmvar;
1850 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1852 /* There are takeMVar(s) waiting: wake up the first one
1854 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1856 /* actually perform the takeMVar */
1857 tso = StgMVar_head(mvar);
1858 PerformTake(tso, val);
1859 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1860 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1863 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1864 StgMVar_head(mvar) "ptr", 1) [];
1865 StgMVar_head(mvar) = tso;
1867 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1868 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1871 #if defined(THREADED_RTS)
1872 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1874 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1876 jump %ENTRY_CODE(Sp(0));
1880 /* No further takes, the MVar is now full. */
1881 StgMVar_value(mvar) = val;
1883 #if defined(THREADED_RTS)
1884 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1886 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1888 jump %ENTRY_CODE(Sp(0));
1891 /* ToDo: yield afterward for better communication performance? */
1899 /* args: R1 = MVar, R2 = value */
1902 #if defined(THREADED_RTS)
1903 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
1905 info = GET_INFO(mvar);
1908 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1909 #if defined(THREADED_RTS)
1910 unlockClosure(mvar, info);
1915 if (info == stg_MVAR_CLEAN_info) {
1916 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1919 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1921 /* There are takeMVar(s) waiting: wake up the first one
1923 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1925 /* actually perform the takeMVar */
1926 tso = StgMVar_head(mvar);
1927 PerformTake(tso, R2);
1928 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1929 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1932 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1933 StgMVar_head(mvar) "ptr", 1) [];
1934 StgMVar_head(mvar) = tso;
1936 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1937 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1940 #if defined(THREADED_RTS)
1941 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1943 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1948 /* No further takes, the MVar is now full. */
1949 StgMVar_value(mvar) = R2;
1951 #if defined(THREADED_RTS)
1952 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1954 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1959 /* ToDo: yield afterward for better communication performance? */
1963 /* -----------------------------------------------------------------------------
1964 Stable pointer primitives
1965 ------------------------------------------------------------------------- */
1967 makeStableNamezh_fast
1971 ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, makeStableNamezh_fast );
1973 (index) = foreign "C" lookupStableName(R1 "ptr") [];
1975 /* Is there already a StableName for this heap object?
1976 * stable_ptr_table is a pointer to an array of snEntry structs.
1978 if ( snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) == NULL ) {
1979 sn_obj = Hp - SIZEOF_StgStableName + WDS(1);
1980 SET_HDR(sn_obj, stg_STABLE_NAME_info, W_[CCCS]);
1981 StgStableName_sn(sn_obj) = index;
1982 snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) = sn_obj;
1984 sn_obj = snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry);
1991 makeStablePtrzh_fast
1995 MAYBE_GC(R1_PTR, makeStablePtrzh_fast);
1996 ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
2000 deRefStablePtrzh_fast
2002 /* Args: R1 = the stable ptr */
2005 r = snEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_snEntry);
2009 /* -----------------------------------------------------------------------------
2010 Bytecode object primitives
2011 ------------------------------------------------------------------------- */
2021 W_ bco, bitmap_arr, bytes, words;
2025 words = BYTES_TO_WDS(SIZEOF_StgBCO) + StgArrWords_words(bitmap_arr);
2028 ALLOC_PRIM( bytes, R1_PTR&R2_PTR&R3_PTR&R5_PTR, newBCOzh_fast );
2030 bco = Hp - bytes + WDS(1);
2031 SET_HDR(bco, stg_BCO_info, W_[CCCS]);
2033 StgBCO_instrs(bco) = R1;
2034 StgBCO_literals(bco) = R2;
2035 StgBCO_ptrs(bco) = R3;
2036 StgBCO_arity(bco) = HALF_W_(R4);
2037 StgBCO_size(bco) = HALF_W_(words);
2039 // Copy the arity/bitmap info into the BCO
2043 if (i < StgArrWords_words(bitmap_arr)) {
2044 StgBCO_bitmap(bco,i) = StgArrWords_payload(bitmap_arr,i);
2055 // R1 = the BCO# for the AP
2059 // This function is *only* used to wrap zero-arity BCOs in an
2060 // updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
2061 // saturated and always points directly to a FUN or BCO.
2062 ASSERT(%INFO_TYPE(%GET_STD_INFO(R1)) == HALF_W_(BCO) &&
2063 StgBCO_arity(R1) == HALF_W_(0));
2065 HP_CHK_GEN_TICKY(SIZEOF_StgAP, R1_PTR, mkApUpd0zh_fast);
2066 TICK_ALLOC_UP_THK(0, 0);
2067 CCCS_ALLOC(SIZEOF_StgAP);
2069 ap = Hp - SIZEOF_StgAP + WDS(1);
2070 SET_HDR(ap, stg_AP_info, W_[CCCS]);
2072 StgAP_n_args(ap) = HALF_W_(0);
2078 unpackClosurezh_fast
2080 /* args: R1 = closure to analyze */
2081 // TODO: Consider the absence of ptrs or nonptrs as a special case ?
2083 W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
2084 info = %GET_STD_INFO(UNTAG(R1));
2086 // Some closures have non-standard layout, so we omit those here.
2088 type = TO_W_(%INFO_TYPE(info));
2089 switch [0 .. N_CLOSURE_TYPES] type {
2090 case THUNK_SELECTOR : {
2095 case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
2096 THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
2102 ptrs = TO_W_(%INFO_PTRS(info));
2103 nptrs = TO_W_(%INFO_NPTRS(info));
2108 W_ ptrs_arr_sz, nptrs_arr_sz;
2109 nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
2110 ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs);
2112 ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, unpackClosurezh_fast);
2117 ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
2118 nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
2120 SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
2121 StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
2125 W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
2130 SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
2131 StgArrWords_words(nptrs_arr) = nptrs;
2135 W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
2139 RET_NPP(info, ptrs_arr, nptrs_arr);
2142 /* -----------------------------------------------------------------------------
2143 Thread I/O blocking primitives
2144 -------------------------------------------------------------------------- */
2146 /* Add a thread to the end of the blocked queue. (C-- version of the C
2147 * macro in Schedule.h).
2149 #define APPEND_TO_BLOCKED_QUEUE(tso) \
2150 ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
2151 if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
2152 W_[blocked_queue_hd] = tso; \
2154 foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
2156 W_[blocked_queue_tl] = tso;
2162 foreign "C" barf("waitRead# on threaded RTS") never returns;
2165 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2166 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2167 StgTSO_block_info(CurrentTSO) = R1;
2168 // No locking - we're not going to use this interface in the
2169 // threaded RTS anyway.
2170 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2171 jump stg_block_noregs;
2179 foreign "C" barf("waitWrite# on threaded RTS") never returns;
2182 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2183 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2184 StgTSO_block_info(CurrentTSO) = R1;
2185 // No locking - we're not going to use this interface in the
2186 // threaded RTS anyway.
2187 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2188 jump stg_block_noregs;
2193 STRING(stg_delayzh_malloc_str, "delayzh_fast")
2196 #ifdef mingw32_HOST_OS
2204 foreign "C" barf("delay# on threaded RTS") never returns;
2207 /* args: R1 (microsecond delay amount) */
2208 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2209 StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
2211 #ifdef mingw32_HOST_OS
2213 /* could probably allocate this on the heap instead */
2214 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2215 stg_delayzh_malloc_str);
2216 (reqID) = foreign "C" addDelayRequest(R1);
2217 StgAsyncIOResult_reqID(ares) = reqID;
2218 StgAsyncIOResult_len(ares) = 0;
2219 StgAsyncIOResult_errCode(ares) = 0;
2220 StgTSO_block_info(CurrentTSO) = ares;
2222 /* Having all async-blocked threads reside on the blocked_queue
2223 * simplifies matters, so change the status to OnDoProc put the
2224 * delayed thread on the blocked_queue.
2226 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2227 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2228 jump stg_block_async_void;
2234 (time) = foreign "C" getourtimeofday() [R1];
2235 divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags));
2239 divisor = divisor * 1000;
2240 target = ((R1 + divisor - 1) / divisor) /* divide rounding up */
2241 + time + 1; /* Add 1 as getourtimeofday rounds down */
2242 StgTSO_block_info(CurrentTSO) = target;
2244 /* Insert the new thread in the sleeping queue. */
2246 t = W_[sleeping_queue];
2248 if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
2250 t = StgTSO__link(t);
2254 StgTSO__link(CurrentTSO) = t;
2256 W_[sleeping_queue] = CurrentTSO;
2258 foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
2260 jump stg_block_noregs;
2262 #endif /* !THREADED_RTS */
2266 #ifdef mingw32_HOST_OS
2267 STRING(stg_asyncReadzh_malloc_str, "asyncReadzh_fast")
2274 foreign "C" barf("asyncRead# on threaded RTS") never returns;
2277 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2278 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2279 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
2281 /* could probably allocate this on the heap instead */
2282 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2283 stg_asyncReadzh_malloc_str)
2285 (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
2286 StgAsyncIOResult_reqID(ares) = reqID;
2287 StgAsyncIOResult_len(ares) = 0;
2288 StgAsyncIOResult_errCode(ares) = 0;
2289 StgTSO_block_info(CurrentTSO) = ares;
2290 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2291 jump stg_block_async;
2295 STRING(stg_asyncWritezh_malloc_str, "asyncWritezh_fast")
2302 foreign "C" barf("asyncWrite# on threaded RTS") never returns;
2305 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
2306 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2307 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
2309 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2310 stg_asyncWritezh_malloc_str)
2312 (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
2314 StgAsyncIOResult_reqID(ares) = reqID;
2315 StgAsyncIOResult_len(ares) = 0;
2316 StgAsyncIOResult_errCode(ares) = 0;
2317 StgTSO_block_info(CurrentTSO) = ares;
2318 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2319 jump stg_block_async;
2323 STRING(stg_asyncDoProczh_malloc_str, "asyncDoProczh_fast")
2330 foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
2333 /* args: R1 = proc, R2 = param */
2334 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
2335 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
2337 /* could probably allocate this on the heap instead */
2338 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
2339 stg_asyncDoProczh_malloc_str)
2341 (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
2342 StgAsyncIOResult_reqID(ares) = reqID;
2343 StgAsyncIOResult_len(ares) = 0;
2344 StgAsyncIOResult_errCode(ares) = 0;
2345 StgTSO_block_info(CurrentTSO) = ares;
2346 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
2347 jump stg_block_async;
2352 // noDuplicate# tries to ensure that none of the thunks under
2353 // evaluation by the current thread are also under evaluation by
2354 // another thread. It relies on *both* threads doing noDuplicate#;
2355 // the second one will get blocked if they are duplicating some work.
2358 SAVE_THREAD_STATE();
2359 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2360 foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
2362 if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
2363 jump stg_threadFinished;
2365 LOAD_THREAD_STATE();
2366 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
2367 jump %ENTRY_CODE(Sp(0));
2371 getApStackValzh_fast
2373 W_ ap_stack, offset, val, ok;
2375 /* args: R1 = AP_STACK, R2 = offset */
2379 if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
2381 val = StgAP_STACK_payload(ap_stack,offset);
2389 /* -----------------------------------------------------------------------------
2391 -------------------------------------------------------------------------- */
2393 // Write the cost center stack of the first argument on stderr; return
2394 // the second. Possibly only makes sense for already evaluated
2401 ccs = StgHeader_ccs(UNTAG(R1));
2402 foreign "C" fprintCCS_stderr(ccs "ptr") [R2];
2413 #ifndef THREADED_RTS
2414 RET_NP(0,ghczmprim_GHCziBool_False_closure);
2416 (spark) = foreign "C" findSpark(MyCapability());
2420 RET_NP(0,ghczmprim_GHCziBool_False_closure);