1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Out-of-line primitive operations
7 * This file contains the implementations of all the primitive
8 * operations ("primops") which are not expanded inline. See
9 * ghc/compiler/prelude/primops.txt.pp for a list of all the primops;
10 * this file contains code for most of those with the attribute
13 * Entry convention: the entry convention for a primop is that all the
14 * args are in Stg registers (R1, R2, etc.). This is to make writing
15 * the primops easier. (see compiler/codeGen/CgCallConv.hs).
17 * Return convention: results from a primop are generally returned
18 * using the ordinary unboxed tuple return convention. The C-- parser
19 * implements the RET_xxxx() macros to perform unboxed-tuple returns
20 * based on the prevailing return convention.
22 * This file is written in a subset of C--, extended with various
23 * features specific to GHC. It is compiled by GHC directly. For the
24 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
26 * ---------------------------------------------------------------------------*/
31 import pthread_mutex_lock;
32 import pthread_mutex_unlock;
34 import base_ControlziExceptionziBase_nestedAtomically_closure;
35 import EnterCriticalSection;
36 import LeaveCriticalSection;
37 import ghczmprim_GHCziBool_False_closure;
38 #if !defined(mingw32_HOST_OS)
42 /*-----------------------------------------------------------------------------
45 Basically just new*Array - the others are all inline macros.
47 The size arg is always passed in R1, and the result returned in R1.
49 The slow entry point is for returning from a heap check, the saved
50 size argument must be re-loaded from the stack.
51 -------------------------------------------------------------------------- */
53 /* for objects that are *less* than the size of a word, make sure we
54 * round up to the nearest word for the size of the array.
59 W_ words, payload_words, n, p;
60 MAYBE_GC(NO_PTRS,stg_newByteArrayzh);
62 payload_words = ROUNDUP_BYTES_TO_WDS(n);
63 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
64 ("ptr" p) = foreign "C" allocate(MyCapability() "ptr",words) [];
65 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
66 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
67 StgArrWords_bytes(p) = n;
72 #define BA_MASK (BA_ALIGN-1)
74 stg_newPinnedByteArrayzh
76 W_ words, n, bytes, payload_words, p;
78 MAYBE_GC(NO_PTRS,stg_newPinnedByteArrayzh);
81 /* payload_words is what we will tell the profiler we had to allocate */
82 payload_words = ROUNDUP_BYTES_TO_WDS(bytes);
83 /* When we actually allocate memory, we need to allow space for the
85 bytes = bytes + SIZEOF_StgArrWords;
86 /* And we want to align to BA_ALIGN bytes, so we need to allow space
87 to shift up to BA_ALIGN - 1 bytes: */
88 bytes = bytes + BA_ALIGN - 1;
89 /* Now we convert to a number of words: */
90 words = ROUNDUP_BYTES_TO_WDS(bytes);
92 ("ptr" p) = foreign "C" allocatePinned(MyCapability() "ptr", words) [];
93 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
95 /* Now we need to move p forward so that the payload is aligned
97 p = p + ((-p - SIZEOF_StgArrWords) & BA_MASK);
99 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
100 StgArrWords_bytes(p) = n;
104 stg_newAlignedPinnedByteArrayzh
106 W_ words, n, bytes, payload_words, p, alignment;
108 MAYBE_GC(NO_PTRS,stg_newAlignedPinnedByteArrayzh);
114 /* payload_words is what we will tell the profiler we had to allocate */
115 payload_words = ROUNDUP_BYTES_TO_WDS(bytes);
117 /* When we actually allocate memory, we need to allow space for the
119 bytes = bytes + SIZEOF_StgArrWords;
120 /* And we want to align to <alignment> bytes, so we need to allow space
121 to shift up to <alignment - 1> bytes: */
122 bytes = bytes + alignment - 1;
123 /* Now we convert to a number of words: */
124 words = ROUNDUP_BYTES_TO_WDS(bytes);
126 ("ptr" p) = foreign "C" allocatePinned(MyCapability() "ptr", words) [];
127 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
129 /* Now we need to move p forward so that the payload is aligned
130 to <alignment> bytes. Note that we are assuming that
131 <alignment> is a power of 2, which is technically not guaranteed */
132 p = p + ((-p - SIZEOF_StgArrWords) & (alignment - 1));
134 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
135 StgArrWords_bytes(p) = n;
141 W_ words, n, init, arr, p, size;
142 /* Args: R1 = words, R2 = initialisation value */
145 MAYBE_GC(R2_PTR,stg_newArrayzh);
147 // the mark area contains one byte for each 2^MUT_ARR_PTRS_CARD_BITS words
148 // in the array, making sure we round up, and then rounding up to a whole
150 size = n + mutArrPtrsCardWords(n);
151 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + size;
152 ("ptr" arr) = foreign "C" allocate(MyCapability() "ptr",words) [R2];
153 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
155 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
156 StgMutArrPtrs_ptrs(arr) = n;
157 StgMutArrPtrs_size(arr) = size;
159 // Initialise all elements of the the array with the value in R2
161 p = arr + SIZEOF_StgMutArrPtrs;
163 if (p < arr + WDS(words)) {
168 // Initialise the mark bits with 0
170 if (p < arr + WDS(size)) {
179 stg_unsafeThawArrayzh
181 // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
183 // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
184 // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
185 // it on the mutable list for the GC to remove (removing something from
186 // the mutable list is not easy).
188 // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
189 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
190 // to indicate that it is still on the mutable list.
192 // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
193 // either it is on a mut_list, or it isn't. We adopt the convention that
194 // the closure type is MUT_ARR_PTRS_FROZEN0 if it is on the mutable list,
195 // and MUT_ARR_PTRS_FROZEN otherwise. In fact it wouldn't matter if
196 // we put it on the mutable list more than once, but it would get scavenged
197 // multiple times during GC, which would be unnecessarily slow.
199 if (StgHeader_info(R1) != stg_MUT_ARR_PTRS_FROZEN0_info) {
200 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
201 recordMutable(R1, R1);
202 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
205 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
210 /* -----------------------------------------------------------------------------
212 -------------------------------------------------------------------------- */
217 /* Args: R1 = initialisation value */
219 ALLOC_PRIM( SIZEOF_StgMutVar, R1_PTR, stg_newMutVarzh);
221 mv = Hp - SIZEOF_StgMutVar + WDS(1);
222 SET_HDR(mv,stg_MUT_VAR_DIRTY_info,W_[CCCS]);
223 StgMutVar_var(mv) = R1;
228 stg_atomicModifyMutVarzh
230 W_ mv, f, z, x, y, r, h;
231 /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */
233 /* If x is the current contents of the MutVar#, then
234 We want to make the new contents point to
238 and the return value is
242 obviously we can share (f x).
244 z = [stg_ap_2 f x] (max (HS + 2) MIN_UPD_SIZE)
245 y = [stg_sel_0 z] (max (HS + 1) MIN_UPD_SIZE)
246 r = [stg_sel_1 z] (max (HS + 1) MIN_UPD_SIZE)
250 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
251 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),WDS(MIN_UPD_SIZE-1))
253 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(1))
254 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),0)
258 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
259 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),WDS(MIN_UPD_SIZE-2))
261 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(2))
262 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),0)
265 #define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
267 HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, stg_atomicModifyMutVarzh);
272 TICK_ALLOC_THUNK_2();
273 CCCS_ALLOC(THUNK_2_SIZE);
274 z = Hp - THUNK_2_SIZE + WDS(1);
275 SET_HDR(z, stg_ap_2_upd_info, W_[CCCS]);
276 LDV_RECORD_CREATE(z);
277 StgThunk_payload(z,0) = f;
279 TICK_ALLOC_THUNK_1();
280 CCCS_ALLOC(THUNK_1_SIZE);
281 y = z - THUNK_1_SIZE;
282 SET_HDR(y, stg_sel_0_upd_info, W_[CCCS]);
283 LDV_RECORD_CREATE(y);
284 StgThunk_payload(y,0) = z;
286 TICK_ALLOC_THUNK_1();
287 CCCS_ALLOC(THUNK_1_SIZE);
288 r = y - THUNK_1_SIZE;
289 SET_HDR(r, stg_sel_1_upd_info, W_[CCCS]);
290 LDV_RECORD_CREATE(r);
291 StgThunk_payload(r,0) = z;
294 x = StgMutVar_var(mv);
295 StgThunk_payload(z,1) = x;
297 (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y) [];
298 if (h != x) { goto retry; }
300 StgMutVar_var(mv) = y;
303 if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
304 foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") [];
310 /* -----------------------------------------------------------------------------
311 Weak Pointer Primitives
312 -------------------------------------------------------------------------- */
314 STRING(stg_weak_msg,"New weak pointer at %p\n")
320 R3 = finalizer (or NULL)
325 R3 = stg_NO_FINALIZER_closure;
328 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, stg_mkWeakzh );
330 w = Hp - SIZEOF_StgWeak + WDS(1);
331 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
333 // We don't care about cfinalizer here.
334 // Should StgWeak_cfinalizer(w) be stg_NO_FINALIZER_closure or
338 StgWeak_value(w) = R2;
339 StgWeak_finalizer(w) = R3;
340 StgWeak_cfinalizer(w) = stg_NO_FINALIZER_closure;
342 ACQUIRE_LOCK(sm_mutex);
343 StgWeak_link(w) = W_[weak_ptr_list];
344 W_[weak_ptr_list] = w;
345 RELEASE_LOCK(sm_mutex);
347 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
352 stg_mkWeakForeignEnvzh
358 R5 = has environment (0 or 1)
361 W_ w, payload_words, words, p;
363 W_ key, val, fptr, ptr, flag, eptr;
372 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR, stg_mkWeakForeignEnvzh );
374 w = Hp - SIZEOF_StgWeak + WDS(1);
375 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
378 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
379 ("ptr" p) = foreign "C" allocate(MyCapability() "ptr", words) [];
381 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
382 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
384 StgArrWords_bytes(p) = WDS(payload_words);
385 StgArrWords_payload(p,0) = fptr;
386 StgArrWords_payload(p,1) = ptr;
387 StgArrWords_payload(p,2) = eptr;
388 StgArrWords_payload(p,3) = flag;
390 // We don't care about the value here.
391 // Should StgWeak_value(w) be stg_NO_FINALIZER_closure or something else?
393 StgWeak_key(w) = key;
394 StgWeak_value(w) = val;
395 StgWeak_finalizer(w) = stg_NO_FINALIZER_closure;
396 StgWeak_cfinalizer(w) = p;
398 ACQUIRE_LOCK(sm_mutex);
399 StgWeak_link(w) = W_[weak_ptr_list];
400 W_[weak_ptr_list] = w;
401 RELEASE_LOCK(sm_mutex);
403 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
417 if (GET_INFO(w) == stg_DEAD_WEAK_info) {
418 RET_NP(0,stg_NO_FINALIZER_closure);
424 // A weak pointer is inherently used, so we do not need to call
425 // LDV_recordDead_FILL_SLOP_DYNAMIC():
426 // LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)w);
427 // or, LDV_recordDead():
428 // LDV_recordDead((StgClosure *)w, sizeofW(StgWeak) - sizeofW(StgProfHeader));
429 // Furthermore, when PROFILING is turned on, dead weak pointers are exactly as
430 // large as weak pointers, so there is no need to fill the slop, either.
431 // See stg_DEAD_WEAK_info in StgMiscClosures.hc.
435 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
437 SET_INFO(w,stg_DEAD_WEAK_info);
438 LDV_RECORD_CREATE(w);
440 f = StgWeak_finalizer(w);
441 arr = StgWeak_cfinalizer(w);
443 StgDeadWeak_link(w) = StgWeak_link(w);
445 if (arr != stg_NO_FINALIZER_closure) {
446 foreign "C" runCFinalizer(StgArrWords_payload(arr,0),
447 StgArrWords_payload(arr,1),
448 StgArrWords_payload(arr,2),
449 StgArrWords_payload(arr,3)) [];
452 /* return the finalizer */
453 if (f == stg_NO_FINALIZER_closure) {
454 RET_NP(0,stg_NO_FINALIZER_closure);
466 if (GET_INFO(w) == stg_WEAK_info) {
468 val = StgWeak_value(w);
476 /* -----------------------------------------------------------------------------
477 Floating point operations.
478 -------------------------------------------------------------------------- */
480 stg_decodeFloatzuIntzh
487 STK_CHK_GEN( WDS(2), NO_PTRS, stg_decodeFloatzuIntzh );
489 mp_tmp1 = Sp - WDS(1);
490 mp_tmp_w = Sp - WDS(2);
492 /* arguments: F1 = Float# */
495 /* Perform the operation */
496 foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
498 /* returns: (Int# (mantissa), Int# (exponent)) */
499 RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
502 stg_decodeDoublezu2Intzh
511 STK_CHK_GEN( WDS(4), NO_PTRS, stg_decodeDoublezu2Intzh );
513 mp_tmp1 = Sp - WDS(1);
514 mp_tmp2 = Sp - WDS(2);
515 mp_result1 = Sp - WDS(3);
516 mp_result2 = Sp - WDS(4);
518 /* arguments: D1 = Double# */
521 /* Perform the operation */
522 foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
523 mp_result1 "ptr", mp_result2 "ptr",
527 (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
528 RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
531 /* -----------------------------------------------------------------------------
532 * Concurrency primitives
533 * -------------------------------------------------------------------------- */
537 /* args: R1 = closure to spark */
539 MAYBE_GC(R1_PTR, stg_forkzh);
545 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
546 RtsFlags_GcFlags_initialStkSize(RtsFlags),
549 /* start blocked if the current thread is blocked */
550 StgTSO_flags(threadid) = %lobits16(
551 TO_W_(StgTSO_flags(threadid)) |
552 TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE));
554 foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
556 // context switch soon, but not immediately: we don't want every
557 // forkIO to force a context-switch.
558 Capability_context_switch(MyCapability()) = 1 :: CInt;
565 /* args: R1 = cpu, R2 = closure to spark */
567 MAYBE_GC(R2_PTR, stg_forkOnzh);
575 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
576 RtsFlags_GcFlags_initialStkSize(RtsFlags),
579 /* start blocked if the current thread is blocked */
580 StgTSO_flags(threadid) = %lobits16(
581 TO_W_(StgTSO_flags(threadid)) |
582 TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE));
584 foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
586 // context switch soon, but not immediately: we don't want every
587 // forkIO to force a context-switch.
588 Capability_context_switch(MyCapability()) = 1 :: CInt;
595 jump stg_yield_noregs;
610 foreign "C" labelThread(R1 "ptr", R2 "ptr") [];
612 jump %ENTRY_CODE(Sp(0));
615 stg_isCurrentThreadBoundzh
619 (r) = foreign "C" isThreadBound(CurrentTSO) [];
625 /* args: R1 :: ThreadId# */
633 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
634 tso = StgTSO__link(tso);
638 what_next = TO_W_(StgTSO_what_next(tso));
639 why_blocked = TO_W_(StgTSO_why_blocked(tso));
640 // Note: these two reads are not atomic, so they might end up
641 // being inconsistent. It doesn't matter, since we
642 // only return one or the other. If we wanted to return the
643 // contents of block_info too, then we'd have to do some synchronisation.
645 if (what_next == ThreadComplete) {
646 ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
648 if (what_next == ThreadKilled) {
657 /* -----------------------------------------------------------------------------
659 * -------------------------------------------------------------------------- */
663 // Catch retry frame ------------------------------------------------------------
665 INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
666 #if defined(PROFILING)
667 W_ unused1, W_ unused2,
669 W_ unused3, P_ unused4, P_ unused5)
671 W_ r, frame, trec, outer;
674 trec = StgTSO_trec(CurrentTSO);
675 outer = StgTRecHeader_enclosing_trec(trec);
676 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
678 /* Succeeded (either first branch or second branch) */
679 StgTSO_trec(CurrentTSO) = outer;
680 Sp = Sp + SIZEOF_StgCatchRetryFrame;
681 jump %ENTRY_CODE(Sp(SP_OFF));
683 /* Did not commit: re-execute */
685 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
686 StgTSO_trec(CurrentTSO) = new_trec;
687 if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
688 R1 = StgCatchRetryFrame_alt_code(frame);
690 R1 = StgCatchRetryFrame_first_code(frame);
697 // Atomically frame ------------------------------------------------------------
699 INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
700 #if defined(PROFILING)
701 W_ unused1, W_ unused2,
703 P_ code, P_ next_invariant_to_check, P_ result)
705 W_ frame, trec, valid, next_invariant, q, outer;
708 trec = StgTSO_trec(CurrentTSO);
710 outer = StgTRecHeader_enclosing_trec(trec);
712 if (outer == NO_TREC) {
713 /* First time back at the atomically frame -- pick up invariants */
714 ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
715 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
716 StgAtomicallyFrame_result(frame) = result;
719 /* Second/subsequent time back at the atomically frame -- abort the
720 * tx that's checking the invariant and move on to the next one */
721 StgTSO_trec(CurrentTSO) = outer;
722 q = StgAtomicallyFrame_next_invariant_to_check(frame);
723 StgInvariantCheckQueue_my_execution(q) = trec;
724 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
725 /* Don't free trec -- it's linked from q and will be stashed in the
726 * invariant if we eventually commit. */
727 q = StgInvariantCheckQueue_next_queue_entry(q);
728 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
732 q = StgAtomicallyFrame_next_invariant_to_check(frame);
734 if (q != END_INVARIANT_CHECK_QUEUE) {
735 /* We can't commit yet: another invariant to check */
736 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
737 StgTSO_trec(CurrentTSO) = trec;
739 next_invariant = StgInvariantCheckQueue_invariant(q);
740 R1 = StgAtomicInvariant_code(next_invariant);
745 /* We've got no more invariants to check, try to commit */
746 (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
748 /* Transaction was valid: commit succeeded */
749 StgTSO_trec(CurrentTSO) = NO_TREC;
750 R1 = StgAtomicallyFrame_result(frame);
751 Sp = Sp + SIZEOF_StgAtomicallyFrame;
752 jump %ENTRY_CODE(Sp(SP_OFF));
754 /* Transaction was not valid: try again */
755 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
756 StgTSO_trec(CurrentTSO) = trec;
757 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
758 R1 = StgAtomicallyFrame_code(frame);
764 INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
765 #if defined(PROFILING)
766 W_ unused1, W_ unused2,
768 P_ code, P_ next_invariant_to_check, P_ result)
770 W_ frame, trec, valid;
774 /* The TSO is currently waiting: should we stop waiting? */
775 (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
777 /* Previous attempt is still valid: no point trying again yet */
778 jump stg_block_noregs;
780 /* Previous attempt is no longer valid: try again */
781 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
782 StgTSO_trec(CurrentTSO) = trec;
783 StgHeader_info(frame) = stg_atomically_frame_info;
784 R1 = StgAtomicallyFrame_code(frame);
789 // STM catch frame --------------------------------------------------------------
793 /* Catch frames are very similar to update frames, but when entering
794 * one we just pop the frame off the stack and perform the correct
795 * kind of return to the activation record underneath us on the stack.
798 INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
799 #if defined(PROFILING)
800 W_ unused1, W_ unused2,
802 P_ unused3, P_ unused4)
804 W_ r, frame, trec, outer;
806 trec = StgTSO_trec(CurrentTSO);
807 outer = StgTRecHeader_enclosing_trec(trec);
808 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
810 /* Commit succeeded */
811 StgTSO_trec(CurrentTSO) = outer;
812 Sp = Sp + SIZEOF_StgCatchSTMFrame;
817 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
818 StgTSO_trec(CurrentTSO) = new_trec;
819 R1 = StgCatchSTMFrame_code(frame);
825 // Primop definition ------------------------------------------------------------
833 // stmStartTransaction may allocate
834 MAYBE_GC (R1_PTR, stg_atomicallyzh);
836 /* Args: R1 = m :: STM a */
837 STK_CHK_GEN(SIZEOF_StgAtomicallyFrame + WDS(1), R1_PTR, stg_atomicallyzh);
839 old_trec = StgTSO_trec(CurrentTSO);
841 /* Nested transactions are not allowed; raise an exception */
842 if (old_trec != NO_TREC) {
843 R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
847 /* Set up the atomically frame */
848 Sp = Sp - SIZEOF_StgAtomicallyFrame;
851 SET_HDR(frame,stg_atomically_frame_info, W_[CCCS]);
852 StgAtomicallyFrame_code(frame) = R1;
853 StgAtomicallyFrame_result(frame) = NO_TREC;
854 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
856 /* Start the memory transcation */
857 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
858 StgTSO_trec(CurrentTSO) = new_trec;
860 /* Apply R1 to the realworld token */
869 /* Args: R1 :: STM a */
870 /* Args: R2 :: Exception -> STM a */
871 STK_CHK_GEN(SIZEOF_StgCatchSTMFrame + WDS(1), R1_PTR & R2_PTR, stg_catchSTMzh);
873 /* Set up the catch frame */
874 Sp = Sp - SIZEOF_StgCatchSTMFrame;
877 SET_HDR(frame, stg_catch_stm_frame_info, W_[CCCS]);
878 StgCatchSTMFrame_handler(frame) = R2;
879 StgCatchSTMFrame_code(frame) = R1;
881 /* Start a nested transaction to run the body of the try block in */
884 cur_trec = StgTSO_trec(CurrentTSO);
885 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
886 StgTSO_trec(CurrentTSO) = new_trec;
888 /* Apply R1 to the realworld token */
899 // stmStartTransaction may allocate
900 MAYBE_GC (R1_PTR & R2_PTR, stg_catchRetryzh);
902 /* Args: R1 :: STM a */
903 /* Args: R2 :: STM a */
904 STK_CHK_GEN(SIZEOF_StgCatchRetryFrame + WDS(1), R1_PTR & R2_PTR, stg_catchRetryzh);
906 /* Start a nested transaction within which to run the first code */
907 trec = StgTSO_trec(CurrentTSO);
908 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
909 StgTSO_trec(CurrentTSO) = new_trec;
911 /* Set up the catch-retry frame */
912 Sp = Sp - SIZEOF_StgCatchRetryFrame;
915 SET_HDR(frame, stg_catch_retry_frame_info, W_[CCCS]);
916 StgCatchRetryFrame_running_alt_code(frame) = 0 :: CInt; // false;
917 StgCatchRetryFrame_first_code(frame) = R1;
918 StgCatchRetryFrame_alt_code(frame) = R2;
920 /* Apply R1 to the realworld token */
933 MAYBE_GC (NO_PTRS, stg_retryzh); // STM operations may allocate
935 // Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
937 StgTSO_sp(CurrentTSO) = Sp;
938 (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
939 Sp = StgTSO_sp(CurrentTSO);
941 trec = StgTSO_trec(CurrentTSO);
942 outer = StgTRecHeader_enclosing_trec(trec);
944 if (frame_type == CATCH_RETRY_FRAME) {
945 // The retry reaches a CATCH_RETRY_FRAME before the atomic frame
946 ASSERT(outer != NO_TREC);
947 // Abort the transaction attempting the current branch
948 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
949 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
950 if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
951 // Retry in the first branch: try the alternative
952 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
953 StgTSO_trec(CurrentTSO) = trec;
954 StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
955 R1 = StgCatchRetryFrame_alt_code(frame);
958 // Retry in the alternative code: propagate the retry
959 StgTSO_trec(CurrentTSO) = outer;
960 Sp = Sp + SIZEOF_StgCatchRetryFrame;
961 goto retry_pop_stack;
965 // We've reached the ATOMICALLY_FRAME: attempt to wait
966 ASSERT(frame_type == ATOMICALLY_FRAME);
967 if (outer != NO_TREC) {
968 // We called retry while checking invariants, so abort the current
969 // invariant check (merging its TVar accesses into the parents read
970 // set so we'll wait on them)
971 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
972 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
974 StgTSO_trec(CurrentTSO) = trec;
975 outer = StgTRecHeader_enclosing_trec(trec);
977 ASSERT(outer == NO_TREC);
979 (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
981 // Transaction was valid: stmWait put us on the TVars' queues, we now block
982 StgHeader_info(frame) = stg_atomically_waiting_frame_info;
984 // Fix up the stack in the unregisterised case: the return convention is different.
985 R3 = trec; // passing to stmWaitUnblock()
986 jump stg_block_stmwait;
988 // Transaction was not valid: retry immediately
989 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
990 StgTSO_trec(CurrentTSO) = trec;
991 R1 = StgAtomicallyFrame_code(frame);
1002 /* Args: R1 = invariant closure */
1003 MAYBE_GC (R1_PTR, stg_checkzh);
1005 trec = StgTSO_trec(CurrentTSO);
1007 foreign "C" stmAddInvariantToCheck(MyCapability() "ptr",
1011 jump %ENTRY_CODE(Sp(0));
1020 /* Args: R1 = initialisation value */
1022 MAYBE_GC (R1_PTR, stg_newTVarzh);
1024 ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
1035 /* Args: R1 = TVar closure */
1037 MAYBE_GC (R1_PTR, stg_readTVarzh); // Call to stmReadTVar may allocate
1038 trec = StgTSO_trec(CurrentTSO);
1040 ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
1050 result = StgTVar_current_value(R1);
1051 if (%INFO_PTR(result) == stg_TREC_HEADER_info) {
1063 /* Args: R1 = TVar closure */
1064 /* R2 = New value */
1066 MAYBE_GC (R1_PTR & R2_PTR, stg_writeTVarzh); // Call to stmWriteTVar may allocate
1067 trec = StgTSO_trec(CurrentTSO);
1070 foreign "C" stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr", new_value "ptr") [];
1072 jump %ENTRY_CODE(Sp(0));
1076 /* -----------------------------------------------------------------------------
1079 * take & putMVar work as follows. Firstly, an important invariant:
1081 * If the MVar is full, then the blocking queue contains only
1082 * threads blocked on putMVar, and if the MVar is empty then the
1083 * blocking queue contains only threads blocked on takeMVar.
1086 * MVar empty : then add ourselves to the blocking queue
1087 * MVar full : remove the value from the MVar, and
1088 * blocking queue empty : return
1089 * blocking queue non-empty : perform the first blocked putMVar
1090 * from the queue, and wake up the
1091 * thread (MVar is now full again)
1093 * putMVar is just the dual of the above algorithm.
1095 * How do we "perform a putMVar"? Well, we have to fiddle around with
1096 * the stack of the thread waiting to do the putMVar. See
1097 * stg_block_putmvar and stg_block_takemvar in HeapStackCheck.c for
1098 * the stack layout, and the PerformPut and PerformTake macros below.
1100 * It is important that a blocked take or put is woken up with the
1101 * take/put already performed, because otherwise there would be a
1102 * small window of vulnerability where the thread could receive an
1103 * exception and never perform its take or put, and we'd end up with a
1106 * -------------------------------------------------------------------------- */
1110 /* args: R1 = MVar closure */
1112 if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
1124 ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, stg_newMVarzh );
1126 mvar = Hp - SIZEOF_StgMVar + WDS(1);
1127 SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
1128 // MVARs start dirty: generation 0 has no mutable list
1129 StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
1130 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1131 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1136 #define PerformTake(tso, value) \
1137 W_[StgTSO_sp(tso) + WDS(1)] = value; \
1138 W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
1140 #define PerformPut(tso,lval) \
1141 StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
1142 lval = W_[StgTSO_sp(tso) - WDS(1)];
1146 W_ mvar, val, info, tso, q;
1148 /* args: R1 = MVar closure */
1151 #if defined(THREADED_RTS)
1152 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1154 info = GET_INFO(mvar);
1157 if (info == stg_MVAR_CLEAN_info) {
1158 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
1161 /* If the MVar is empty, put ourselves on its blocking queue,
1162 * and wait until we're woken up.
1164 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1166 // Note [mvar-heap-check] We want to do the heap check in the
1167 // branch here, to avoid the conditional in the common case.
1168 // However, we've already locked the MVar above, so we better
1169 // be careful to unlock it again if the the heap check fails.
1170 // Unfortunately we don't have an easy way to inject any code
1171 // into the heap check generated by the code generator, so we
1172 // have to do it in stg_gc_gen (see HeapStackCheck.cmm).
1173 HP_CHK_GEN_TICKY(SIZEOF_StgMVarTSOQueue, R1_PTR, stg_takeMVarzh);
1175 q = Hp - SIZEOF_StgMVarTSOQueue + WDS(1);
1177 SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
1178 StgMVarTSOQueue_link(q) = END_TSO_QUEUE;
1179 StgMVarTSOQueue_tso(q) = CurrentTSO;
1181 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1182 StgMVar_head(mvar) = q;
1184 StgMVarTSOQueue_link(StgMVar_tail(mvar)) = q;
1185 foreign "C" recordClosureMutated(MyCapability() "ptr",
1186 StgMVar_tail(mvar)) [];
1188 StgTSO__link(CurrentTSO) = q;
1189 StgTSO_block_info(CurrentTSO) = mvar;
1190 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1191 StgMVar_tail(mvar) = q;
1194 jump stg_block_takemvar;
1197 /* we got the value... */
1198 val = StgMVar_value(mvar);
1200 q = StgMVar_head(mvar);
1202 if (q == stg_END_TSO_QUEUE_closure) {
1203 /* No further putMVars, MVar is now empty */
1204 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1205 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1208 if (StgHeader_info(q) == stg_IND_info ||
1209 StgHeader_info(q) == stg_MSG_NULL_info) {
1210 q = StgInd_indirectee(q);
1214 // There are putMVar(s) waiting... wake up the first thread on the queue
1216 tso = StgMVarTSOQueue_tso(q);
1217 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1218 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1219 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1223 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
1224 tso = StgTSO__link(tso);
1228 ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
1229 ASSERT(StgTSO_block_info(tso) == mvar);
1231 // actually perform the putMVar for the thread that we just woke up
1232 PerformPut(tso,StgMVar_value(mvar));
1234 // indicate that the MVar operation has now completed.
1235 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1237 // no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
1239 foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
1241 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1248 W_ mvar, val, info, tso, q;
1250 /* args: R1 = MVar closure */
1253 #if defined(THREADED_RTS)
1254 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1256 info = GET_INFO(mvar);
1259 /* If the MVar is empty, put ourselves on its blocking queue,
1260 * and wait until we're woken up.
1262 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1263 #if defined(THREADED_RTS)
1264 unlockClosure(mvar, info);
1266 /* HACK: we need a pointer to pass back,
1267 * so we abuse NO_FINALIZER_closure
1269 RET_NP(0, stg_NO_FINALIZER_closure);
1272 if (info == stg_MVAR_CLEAN_info) {
1273 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
1276 /* we got the value... */
1277 val = StgMVar_value(mvar);
1279 q = StgMVar_head(mvar);
1281 if (q == stg_END_TSO_QUEUE_closure) {
1282 /* No further putMVars, MVar is now empty */
1283 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1284 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1287 if (StgHeader_info(q) == stg_IND_info ||
1288 StgHeader_info(q) == stg_MSG_NULL_info) {
1289 q = StgInd_indirectee(q);
1293 // There are putMVar(s) waiting... wake up the first thread on the queue
1295 tso = StgMVarTSOQueue_tso(q);
1296 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1297 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1298 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1302 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
1303 tso = StgTSO__link(tso);
1307 ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
1308 ASSERT(StgTSO_block_info(tso) == mvar);
1310 // actually perform the putMVar for the thread that we just woke up
1311 PerformPut(tso,StgMVar_value(mvar));
1313 // indicate that the MVar operation has now completed.
1314 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1316 // no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
1318 foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
1320 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1327 W_ mvar, val, info, tso, q;
1329 /* args: R1 = MVar, R2 = value */
1333 #if defined(THREADED_RTS)
1334 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1336 info = GET_INFO(mvar);
1339 if (info == stg_MVAR_CLEAN_info) {
1340 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1343 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1345 // see Note [mvar-heap-check] above
1346 HP_CHK_GEN_TICKY(SIZEOF_StgMVarTSOQueue, R1_PTR & R2_PTR, stg_putMVarzh);
1348 q = Hp - SIZEOF_StgMVarTSOQueue + WDS(1);
1350 SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
1351 StgMVarTSOQueue_link(q) = END_TSO_QUEUE;
1352 StgMVarTSOQueue_tso(q) = CurrentTSO;
1354 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1355 StgMVar_head(mvar) = q;
1357 StgMVarTSOQueue_link(StgMVar_tail(mvar)) = q;
1358 foreign "C" recordClosureMutated(MyCapability() "ptr",
1359 StgMVar_tail(mvar)) [];
1361 StgTSO__link(CurrentTSO) = q;
1362 StgTSO_block_info(CurrentTSO) = mvar;
1363 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1364 StgMVar_tail(mvar) = q;
1368 jump stg_block_putmvar;
1371 q = StgMVar_head(mvar);
1373 if (q == stg_END_TSO_QUEUE_closure) {
1374 /* No further takes, the MVar is now full. */
1375 StgMVar_value(mvar) = val;
1376 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1377 jump %ENTRY_CODE(Sp(0));
1379 if (StgHeader_info(q) == stg_IND_info ||
1380 StgHeader_info(q) == stg_MSG_NULL_info) {
1381 q = StgInd_indirectee(q);
1385 // There are takeMVar(s) waiting: wake up the first one
1387 tso = StgMVarTSOQueue_tso(q);
1388 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1389 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1390 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1394 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
1395 tso = StgTSO__link(tso);
1399 ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
1400 ASSERT(StgTSO_block_info(tso) == mvar);
1402 // actually perform the takeMVar
1403 PerformTake(tso, val);
1405 // indicate that the MVar operation has now completed.
1406 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1408 if (TO_W_(StgTSO_dirty(tso)) == 0) {
1409 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1412 foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
1414 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1415 jump %ENTRY_CODE(Sp(0));
1421 W_ mvar, val, info, tso, q;
1423 /* args: R1 = MVar, R2 = value */
1427 #if defined(THREADED_RTS)
1428 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1430 info = GET_INFO(mvar);
1433 if (info == stg_MVAR_CLEAN_info) {
1434 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1437 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1438 #if defined(THREADED_RTS)
1439 unlockClosure(mvar, info);
1444 q = StgMVar_head(mvar);
1446 if (q == stg_END_TSO_QUEUE_closure) {
1447 /* No further takes, the MVar is now full. */
1448 StgMVar_value(mvar) = val;
1449 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1450 jump %ENTRY_CODE(Sp(0));
1452 if (StgHeader_info(q) == stg_IND_info ||
1453 StgHeader_info(q) == stg_MSG_NULL_info) {
1454 q = StgInd_indirectee(q);
1458 // There are takeMVar(s) waiting: wake up the first one
1460 tso = StgMVarTSOQueue_tso(q);
1461 StgMVar_head(mvar) = StgMVarTSOQueue_link(q);
1462 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1463 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1467 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
1468 tso = StgTSO__link(tso);
1472 ASSERT(StgTSO_why_blocked(tso) == BlockedOnMVar::I16);
1473 ASSERT(StgTSO_block_info(tso) == mvar);
1475 // actually perform the takeMVar
1476 PerformTake(tso, val);
1478 // indicate that the MVar operation has now completed.
1479 StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
1481 if (TO_W_(StgTSO_dirty(tso)) == 0) {
1482 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1485 foreign "C" tryWakeupThread_(MyCapability() "ptr", tso) [];
1487 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1488 jump %ENTRY_CODE(Sp(0));
1492 /* -----------------------------------------------------------------------------
1493 Stable pointer primitives
1494 ------------------------------------------------------------------------- */
1496 stg_makeStableNamezh
1500 ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, stg_makeStableNamezh );
1502 (index) = foreign "C" lookupStableName(R1 "ptr") [];
1504 /* Is there already a StableName for this heap object?
1505 * stable_ptr_table is a pointer to an array of snEntry structs.
1507 if ( snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) == NULL ) {
1508 sn_obj = Hp - SIZEOF_StgStableName + WDS(1);
1509 SET_HDR(sn_obj, stg_STABLE_NAME_info, W_[CCCS]);
1510 StgStableName_sn(sn_obj) = index;
1511 snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) = sn_obj;
1513 sn_obj = snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry);
1524 MAYBE_GC(R1_PTR, stg_makeStablePtrzh);
1525 ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
1529 stg_deRefStablePtrzh
1531 /* Args: R1 = the stable ptr */
1534 r = snEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_snEntry);
1538 /* -----------------------------------------------------------------------------
1539 Bytecode object primitives
1540 ------------------------------------------------------------------------- */
1550 W_ bco, bitmap_arr, bytes, words;
1554 words = BYTES_TO_WDS(SIZEOF_StgBCO) + BYTE_ARR_WDS(bitmap_arr);
1557 ALLOC_PRIM( bytes, R1_PTR&R2_PTR&R3_PTR&R5_PTR, stg_newBCOzh );
1559 bco = Hp - bytes + WDS(1);
1560 SET_HDR(bco, stg_BCO_info, W_[CCCS]);
1562 StgBCO_instrs(bco) = R1;
1563 StgBCO_literals(bco) = R2;
1564 StgBCO_ptrs(bco) = R3;
1565 StgBCO_arity(bco) = HALF_W_(R4);
1566 StgBCO_size(bco) = HALF_W_(words);
1568 // Copy the arity/bitmap info into the BCO
1572 if (i < BYTE_ARR_WDS(bitmap_arr)) {
1573 StgBCO_bitmap(bco,i) = StgArrWords_payload(bitmap_arr,i);
1584 // R1 = the BCO# for the AP
1588 // This function is *only* used to wrap zero-arity BCOs in an
1589 // updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
1590 // saturated and always points directly to a FUN or BCO.
1591 ASSERT(%INFO_TYPE(%GET_STD_INFO(R1)) == HALF_W_(BCO) &&
1592 StgBCO_arity(R1) == HALF_W_(0));
1594 HP_CHK_GEN_TICKY(SIZEOF_StgAP, R1_PTR, stg_mkApUpd0zh);
1595 TICK_ALLOC_UP_THK(0, 0);
1596 CCCS_ALLOC(SIZEOF_StgAP);
1598 ap = Hp - SIZEOF_StgAP + WDS(1);
1599 SET_HDR(ap, stg_AP_info, W_[CCCS]);
1601 StgAP_n_args(ap) = HALF_W_(0);
1609 /* args: R1 = closure to analyze */
1610 // TODO: Consider the absence of ptrs or nonptrs as a special case ?
1612 W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
1613 info = %GET_STD_INFO(UNTAG(R1));
1615 // Some closures have non-standard layout, so we omit those here.
1617 type = TO_W_(%INFO_TYPE(info));
1618 switch [0 .. N_CLOSURE_TYPES] type {
1619 case THUNK_SELECTOR : {
1624 case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
1625 THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
1631 ptrs = TO_W_(%INFO_PTRS(info));
1632 nptrs = TO_W_(%INFO_NPTRS(info));
1637 W_ ptrs_arr_sz, ptrs_arr_cards, nptrs_arr_sz;
1638 nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
1639 ptrs_arr_cards = mutArrPtrsCardWords(ptrs);
1640 ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs) + WDS(ptrs_arr_cards);
1642 ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, stg_unpackClosurezh);
1647 ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
1648 nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
1650 SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
1651 StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
1652 StgMutArrPtrs_size(ptrs_arr) = ptrs + ptrs_arr_cards;
1657 W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
1661 /* We can leave the card table uninitialised, since the array is
1662 allocated in the nursery. The GC will fill it in if/when the array
1665 SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
1666 StgArrWords_bytes(nptrs_arr) = WDS(nptrs);
1670 W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
1674 RET_NPP(info, ptrs_arr, nptrs_arr);
1677 /* -----------------------------------------------------------------------------
1678 Thread I/O blocking primitives
1679 -------------------------------------------------------------------------- */
1681 /* Add a thread to the end of the blocked queue. (C-- version of the C
1682 * macro in Schedule.h).
1684 #define APPEND_TO_BLOCKED_QUEUE(tso) \
1685 ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
1686 if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
1687 W_[blocked_queue_hd] = tso; \
1689 foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
1691 W_[blocked_queue_tl] = tso;
1697 foreign "C" barf("waitRead# on threaded RTS") never returns;
1700 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1701 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
1702 StgTSO_block_info(CurrentTSO) = R1;
1703 // No locking - we're not going to use this interface in the
1704 // threaded RTS anyway.
1705 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1706 jump stg_block_noregs;
1714 foreign "C" barf("waitWrite# on threaded RTS") never returns;
1717 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1718 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
1719 StgTSO_block_info(CurrentTSO) = R1;
1720 // No locking - we're not going to use this interface in the
1721 // threaded RTS anyway.
1722 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1723 jump stg_block_noregs;
1728 STRING(stg_delayzh_malloc_str, "stg_delayzh")
1731 #ifdef mingw32_HOST_OS
1739 foreign "C" barf("delay# on threaded RTS") never returns;
1742 /* args: R1 (microsecond delay amount) */
1743 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1744 StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
1746 #ifdef mingw32_HOST_OS
1748 /* could probably allocate this on the heap instead */
1749 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1750 stg_delayzh_malloc_str);
1751 (reqID) = foreign "C" addDelayRequest(R1);
1752 StgAsyncIOResult_reqID(ares) = reqID;
1753 StgAsyncIOResult_len(ares) = 0;
1754 StgAsyncIOResult_errCode(ares) = 0;
1755 StgTSO_block_info(CurrentTSO) = ares;
1757 /* Having all async-blocked threads reside on the blocked_queue
1758 * simplifies matters, so change the status to OnDoProc put the
1759 * delayed thread on the blocked_queue.
1761 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
1762 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1763 jump stg_block_async_void;
1769 (time) = foreign "C" getourtimeofday() [R1];
1770 divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags));
1774 divisor = divisor * 1000;
1775 target = ((R1 + divisor - 1) / divisor) /* divide rounding up */
1776 + time + 1; /* Add 1 as getourtimeofday rounds down */
1777 StgTSO_block_info(CurrentTSO) = target;
1779 /* Insert the new thread in the sleeping queue. */
1781 t = W_[sleeping_queue];
1783 if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
1785 t = StgTSO__link(t);
1789 StgTSO__link(CurrentTSO) = t;
1791 W_[sleeping_queue] = CurrentTSO;
1793 foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
1795 jump stg_block_noregs;
1797 #endif /* !THREADED_RTS */
1801 #ifdef mingw32_HOST_OS
1802 STRING(stg_asyncReadzh_malloc_str, "stg_asyncReadzh")
1809 foreign "C" barf("asyncRead# on threaded RTS") never returns;
1812 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
1813 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1814 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
1816 /* could probably allocate this on the heap instead */
1817 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1818 stg_asyncReadzh_malloc_str)
1820 (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
1821 StgAsyncIOResult_reqID(ares) = reqID;
1822 StgAsyncIOResult_len(ares) = 0;
1823 StgAsyncIOResult_errCode(ares) = 0;
1824 StgTSO_block_info(CurrentTSO) = ares;
1825 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1826 jump stg_block_async;
1830 STRING(stg_asyncWritezh_malloc_str, "stg_asyncWritezh")
1837 foreign "C" barf("asyncWrite# on threaded RTS") never returns;
1840 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
1841 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1842 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
1844 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1845 stg_asyncWritezh_malloc_str)
1847 (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
1849 StgAsyncIOResult_reqID(ares) = reqID;
1850 StgAsyncIOResult_len(ares) = 0;
1851 StgAsyncIOResult_errCode(ares) = 0;
1852 StgTSO_block_info(CurrentTSO) = ares;
1853 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1854 jump stg_block_async;
1858 STRING(stg_asyncDoProczh_malloc_str, "stg_asyncDoProczh")
1865 foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
1868 /* args: R1 = proc, R2 = param */
1869 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1870 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
1872 /* could probably allocate this on the heap instead */
1873 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1874 stg_asyncDoProczh_malloc_str)
1876 (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
1877 StgAsyncIOResult_reqID(ares) = reqID;
1878 StgAsyncIOResult_len(ares) = 0;
1879 StgAsyncIOResult_errCode(ares) = 0;
1880 StgTSO_block_info(CurrentTSO) = ares;
1881 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1882 jump stg_block_async;
1887 /* -----------------------------------------------------------------------------
1890 * noDuplicate# tries to ensure that none of the thunks under
1891 * evaluation by the current thread are also under evaluation by
1892 * another thread. It relies on *both* threads doing noDuplicate#;
1893 * the second one will get blocked if they are duplicating some work.
1895 * The idea is that noDuplicate# is used within unsafePerformIO to
1896 * ensure that the IO operation is performed at most once.
1897 * noDuplicate# calls threadPaused which acquires an exclusive lock on
1898 * all the thunks currently under evaluation by the current thread.
1900 * Consider the following scenario. There is a thunk A, whose
1901 * evaluation requires evaluating thunk B, where thunk B is an
1902 * unsafePerformIO. Two threads, 1 and 2, bother enter A. Thread 2
1903 * is pre-empted before it enters B, and claims A by blackholing it
1904 * (in threadPaused). Thread 1 now enters B, and calls noDuplicate#.
1907 * +-----------+ +---------------+
1908 * | -------+-----> A <-------+------- |
1909 * | update | BLACKHOLE | marked_update |
1910 * +-----------+ +---------------+
1913 * | | +---------------+
1916 * | update | BLACKHOLE
1919 * At this point: A is a blackhole, owned by thread 2. noDuplicate#
1920 * calls threadPaused, which walks up the stack and
1921 * - claims B on behalf of thread 1
1922 * - then it reaches the update frame for A, which it sees is already
1923 * a BLACKHOLE and is therefore owned by another thread. Since
1924 * thread 1 is duplicating work, the computation up to the update
1925 * frame for A is suspended, including thunk B.
1926 * - thunk B, which is an unsafePerformIO, has now been reverted to
1927 * an AP_STACK which could be duplicated - BAD!
1928 * - The solution is as follows: before calling threadPaused, we
1929 * leave a frame on the stack (stg_noDuplicate_info) that will call
1930 * noDuplicate# again if the current computation is suspended and
1933 * See the test program in concurrent/prog003 for a way to demonstrate
1934 * this. It needs to be run with +RTS -N3 or greater, and the bug
1935 * only manifests occasionally (once very 10 runs or so).
1936 * -------------------------------------------------------------------------- */
1938 INFO_TABLE_RET(stg_noDuplicate, RET_SMALL)
1941 jump stg_noDuplicatezh;
1946 STK_CHK_GEN( WDS(1), NO_PTRS, stg_noDuplicatezh );
1947 // leave noDuplicate frame in case the current
1948 // computation is suspended and restarted (see above).
1950 Sp(0) = stg_noDuplicate_info;
1952 SAVE_THREAD_STATE();
1953 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
1954 foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
1956 if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
1957 jump stg_threadFinished;
1959 LOAD_THREAD_STATE();
1960 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
1961 // remove the stg_noDuplicate frame if it is still there.
1962 if (Sp(0) == stg_noDuplicate_info) {
1965 jump %ENTRY_CODE(Sp(0));
1969 /* -----------------------------------------------------------------------------
1971 -------------------------------------------------------------------------- */
1975 W_ ap_stack, offset, val, ok;
1977 /* args: R1 = AP_STACK, R2 = offset */
1981 if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
1983 val = StgAP_STACK_payload(ap_stack,offset);
1991 // Write the cost center stack of the first argument on stderr; return
1992 // the second. Possibly only makes sense for already evaluated
1999 ccs = StgHeader_ccs(UNTAG(R1));
2000 foreign "C" fprintCCS_stderr(ccs "ptr") [R2];
2011 #ifndef THREADED_RTS
2012 RET_NP(0,ghczmprim_GHCziBool_False_closure);
2014 (spark) = foreign "C" findSpark(MyCapability());
2018 RET_NP(0,ghczmprim_GHCziBool_False_closure);
2028 #if defined(TRACING) || defined(DEBUG)
2030 foreign "C" traceUserMsg(MyCapability() "ptr", msg "ptr") [];
2032 #elif defined(DTRACE)
2036 // We should go through the macro HASKELLEVENT_USER_MSG_ENABLED from
2037 // RtsProbes.h, but that header file includes unistd.h, which doesn't
2039 (enabled) = foreign "C" __dtrace_isenabled$HaskellEvent$user__msg$v1() [];
2041 foreign "C" dtraceUserMsgWrapper(MyCapability() "ptr", msg "ptr") [];
2045 jump %ENTRY_CODE(Sp(0));