1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Out-of-line primitive operations
7 * This file contains the implementations of all the primitive
8 * operations ("primops") which are not expanded inline. See
9 * ghc/compiler/prelude/primops.txt.pp for a list of all the primops;
10 * this file contains code for most of those with the attribute
13 * Entry convention: the entry convention for a primop is that all the
14 * args are in Stg registers (R1, R2, etc.). This is to make writing
15 * the primops easier. (see compiler/codeGen/CgCallConv.hs).
17 * Return convention: results from a primop are generally returned
18 * using the ordinary unboxed tuple return convention. The C-- parser
19 * implements the RET_xxxx() macros to perform unboxed-tuple returns
20 * based on the prevailing return convention.
22 * This file is written in a subset of C--, extended with various
23 * features specific to GHC. It is compiled by GHC directly. For the
24 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
26 * ---------------------------------------------------------------------------*/
31 import pthread_mutex_lock;
32 import pthread_mutex_unlock;
34 import base_ControlziExceptionziBase_nestedAtomically_closure;
35 import EnterCriticalSection;
36 import LeaveCriticalSection;
37 import ghczmprim_GHCziBool_False_closure;
39 /*-----------------------------------------------------------------------------
42 Basically just new*Array - the others are all inline macros.
44 The size arg is always passed in R1, and the result returned in R1.
46 The slow entry point is for returning from a heap check, the saved
47 size argument must be re-loaded from the stack.
48 -------------------------------------------------------------------------- */
50 /* for objects that are *less* than the size of a word, make sure we
51 * round up to the nearest word for the size of the array.
56 W_ words, payload_words, n, p;
57 MAYBE_GC(NO_PTRS,newByteArrayzh_fast);
59 payload_words = ROUNDUP_BYTES_TO_WDS(n);
60 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
61 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr",words) [];
62 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
63 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
64 StgArrWords_words(p) = payload_words;
69 #define BA_MASK (BA_ALIGN-1)
71 newPinnedByteArrayzh_fast
73 W_ words, bytes, payload_words, p;
75 MAYBE_GC(NO_PTRS,newPinnedByteArrayzh_fast);
77 /* payload_words is what we will tell the profiler we had to allocate */
78 payload_words = ROUNDUP_BYTES_TO_WDS(bytes);
79 /* When we actually allocate memory, we need to allow space for the
81 bytes = bytes + SIZEOF_StgArrWords;
82 /* And we want to align to BA_ALIGN bytes, so we need to allow space
83 to shift up to BA_ALIGN - 1 bytes: */
84 bytes = bytes + BA_ALIGN - 1;
85 /* Now we convert to a number of words: */
86 words = ROUNDUP_BYTES_TO_WDS(bytes);
88 ("ptr" p) = foreign "C" allocatePinned(words) [];
89 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
91 /* Now we need to move p forward so that the payload is aligned
93 p = p + ((-p - SIZEOF_StgArrWords) & BA_MASK);
95 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
96 StgArrWords_words(p) = payload_words;
100 newAlignedPinnedByteArrayzh_fast
102 W_ words, bytes, payload_words, p, alignment;
104 MAYBE_GC(NO_PTRS,newAlignedPinnedByteArrayzh_fast);
108 /* payload_words is what we will tell the profiler we had to allocate */
109 payload_words = ROUNDUP_BYTES_TO_WDS(bytes);
111 /* When we actually allocate memory, we need to allow space for the
113 bytes = bytes + SIZEOF_StgArrWords;
114 /* And we want to align to <alignment> bytes, so we need to allow space
115 to shift up to <alignment - 1> bytes: */
116 bytes = bytes + alignment - 1;
117 /* Now we convert to a number of words: */
118 words = ROUNDUP_BYTES_TO_WDS(bytes);
120 ("ptr" p) = foreign "C" allocatePinned(words) [];
121 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
123 /* Now we need to move p forward so that the payload is aligned
124 to <alignment> bytes. Note that we are assuming that
125 <alignment> is a power of 2, which is technically not guaranteed */
126 p = p + ((-p - SIZEOF_StgArrWords) & (alignment - 1));
128 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
129 StgArrWords_words(p) = payload_words;
135 W_ words, n, init, arr, p;
136 /* Args: R1 = words, R2 = initialisation value */
139 MAYBE_GC(R2_PTR,newArrayzh_fast);
141 words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + n;
142 ("ptr" arr) = foreign "C" allocateLocal(MyCapability() "ptr",words) [R2];
143 TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
145 SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
146 StgMutArrPtrs_ptrs(arr) = n;
148 // Initialise all elements of the the array with the value in R2
150 p = arr + SIZEOF_StgMutArrPtrs;
152 if (p < arr + WDS(words)) {
161 unsafeThawArrayzh_fast
163 // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
165 // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
166 // normally doesn't. However, when we freeze a MUT_ARR_PTRS, we leave
167 // it on the mutable list for the GC to remove (removing something from
168 // the mutable list is not easy, because the mut_list is only singly-linked).
170 // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
171 // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0
172 // to indicate that it is still on the mutable list.
174 // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
175 // either it is on a mut_list, or it isn't. We adopt the convention that
176 // the closure type is MUT_ARR_PTRS_FROZEN0 if it is on the mutable list,
177 // and MUT_ARR_PTRS_FROZEN otherwise. In fact it wouldn't matter if
178 // we put it on the mutable list more than once, but it would get scavenged
179 // multiple times during GC, which would be unnecessarily slow.
181 if (StgHeader_info(R1) != stg_MUT_ARR_PTRS_FROZEN0_info) {
182 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
183 recordMutable(R1, R1);
184 // must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
187 SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
192 /* -----------------------------------------------------------------------------
194 -------------------------------------------------------------------------- */
199 /* Args: R1 = initialisation value */
201 ALLOC_PRIM( SIZEOF_StgMutVar, R1_PTR, newMutVarzh_fast);
203 mv = Hp - SIZEOF_StgMutVar + WDS(1);
204 SET_HDR(mv,stg_MUT_VAR_DIRTY_info,W_[CCCS]);
205 StgMutVar_var(mv) = R1;
210 atomicModifyMutVarzh_fast
212 W_ mv, f, z, x, y, r, h;
213 /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */
215 /* If x is the current contents of the MutVar#, then
216 We want to make the new contents point to
220 and the return value is
224 obviously we can share (f x).
226 z = [stg_ap_2 f x] (max (HS + 2) MIN_UPD_SIZE)
227 y = [stg_sel_0 z] (max (HS + 1) MIN_UPD_SIZE)
228 r = [stg_sel_1 z] (max (HS + 1) MIN_UPD_SIZE)
232 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
233 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),WDS(MIN_UPD_SIZE-1))
235 #define THUNK_1_SIZE (SIZEOF_StgThunkHeader + WDS(1))
236 #define TICK_ALLOC_THUNK_1() TICK_ALLOC_UP_THK(WDS(1),0)
240 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(MIN_UPD_SIZE))
241 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),WDS(MIN_UPD_SIZE-2))
243 #define THUNK_2_SIZE (SIZEOF_StgThunkHeader + WDS(2))
244 #define TICK_ALLOC_THUNK_2() TICK_ALLOC_UP_THK(WDS(2),0)
247 #define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
249 HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, atomicModifyMutVarzh_fast);
254 TICK_ALLOC_THUNK_2();
255 CCCS_ALLOC(THUNK_2_SIZE);
256 z = Hp - THUNK_2_SIZE + WDS(1);
257 SET_HDR(z, stg_ap_2_upd_info, W_[CCCS]);
258 LDV_RECORD_CREATE(z);
259 StgThunk_payload(z,0) = f;
261 TICK_ALLOC_THUNK_1();
262 CCCS_ALLOC(THUNK_1_SIZE);
263 y = z - THUNK_1_SIZE;
264 SET_HDR(y, stg_sel_0_upd_info, W_[CCCS]);
265 LDV_RECORD_CREATE(y);
266 StgThunk_payload(y,0) = z;
268 TICK_ALLOC_THUNK_1();
269 CCCS_ALLOC(THUNK_1_SIZE);
270 r = y - THUNK_1_SIZE;
271 SET_HDR(r, stg_sel_1_upd_info, W_[CCCS]);
272 LDV_RECORD_CREATE(r);
273 StgThunk_payload(r,0) = z;
276 x = StgMutVar_var(mv);
277 StgThunk_payload(z,1) = x;
279 (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y) [];
280 if (h != x) { goto retry; }
282 StgMutVar_var(mv) = y;
285 if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
286 foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") [];
292 /* -----------------------------------------------------------------------------
293 Weak Pointer Primitives
294 -------------------------------------------------------------------------- */
296 STRING(stg_weak_msg,"New weak pointer at %p\n")
302 R3 = finalizer (or NULL)
307 R3 = stg_NO_FINALIZER_closure;
310 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, mkWeakzh_fast );
312 w = Hp - SIZEOF_StgWeak + WDS(1);
313 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
315 // We don't care about cfinalizer here.
316 // Should StgWeak_cfinalizer(w) be stg_NO_FINALIZER_closure or
320 StgWeak_value(w) = R2;
321 StgWeak_finalizer(w) = R3;
322 StgWeak_cfinalizer(w) = stg_NO_FINALIZER_closure;
324 StgWeak_link(w) = W_[weak_ptr_list];
325 W_[weak_ptr_list] = w;
327 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
332 mkWeakForeignEnvzh_fast
338 R5 = has environment (0 or 1)
341 W_ w, payload_words, words, p;
343 W_ key, val, fptr, ptr, flag, eptr;
352 ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR, mkWeakForeignEnvzh_fast );
354 w = Hp - SIZEOF_StgWeak + WDS(1);
355 SET_HDR(w, stg_WEAK_info, W_[CCCS]);
358 words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
359 ("ptr" p) = foreign "C" allocateLocal(MyCapability() "ptr", words) [];
361 TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
362 SET_HDR(p, stg_ARR_WORDS_info, W_[CCCS]);
364 StgArrWords_words(p) = payload_words;
365 StgArrWords_payload(p,0) = fptr;
366 StgArrWords_payload(p,1) = ptr;
367 StgArrWords_payload(p,2) = eptr;
368 StgArrWords_payload(p,3) = flag;
370 // We don't care about the value here.
371 // Should StgWeak_value(w) be stg_NO_FINALIZER_closure or something else?
373 StgWeak_key(w) = key;
374 StgWeak_value(w) = val;
375 StgWeak_finalizer(w) = stg_NO_FINALIZER_closure;
376 StgWeak_cfinalizer(w) = p;
378 StgWeak_link(w) = W_[weak_ptr_list];
379 W_[weak_ptr_list] = w;
381 IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
395 if (GET_INFO(w) == stg_DEAD_WEAK_info) {
396 RET_NP(0,stg_NO_FINALIZER_closure);
402 // A weak pointer is inherently used, so we do not need to call
403 // LDV_recordDead_FILL_SLOP_DYNAMIC():
404 // LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)w);
405 // or, LDV_recordDead():
406 // LDV_recordDead((StgClosure *)w, sizeofW(StgWeak) - sizeofW(StgProfHeader));
407 // Furthermore, when PROFILING is turned on, dead weak pointers are exactly as
408 // large as weak pointers, so there is no need to fill the slop, either.
409 // See stg_DEAD_WEAK_info in StgMiscClosures.hc.
413 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
415 SET_INFO(w,stg_DEAD_WEAK_info);
416 LDV_RECORD_CREATE(w);
418 f = StgWeak_finalizer(w);
419 arr = StgWeak_cfinalizer(w);
421 StgDeadWeak_link(w) = StgWeak_link(w);
423 if (arr != stg_NO_FINALIZER_closure) {
424 foreign "C" runCFinalizer(StgArrWords_payload(arr,0),
425 StgArrWords_payload(arr,1),
426 StgArrWords_payload(arr,2),
427 StgArrWords_payload(arr,3)) [];
430 /* return the finalizer */
431 if (f == stg_NO_FINALIZER_closure) {
432 RET_NP(0,stg_NO_FINALIZER_closure);
444 if (GET_INFO(w) == stg_WEAK_info) {
446 val = StgWeak_value(w);
454 /* -----------------------------------------------------------------------------
455 Floating point operations.
456 -------------------------------------------------------------------------- */
458 decodeFloatzuIntzh_fast
465 STK_CHK_GEN( WDS(2), NO_PTRS, decodeFloatzuIntzh_fast );
467 mp_tmp1 = Sp - WDS(1);
468 mp_tmp_w = Sp - WDS(2);
470 /* arguments: F1 = Float# */
473 /* Perform the operation */
474 foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
476 /* returns: (Int# (mantissa), Int# (exponent)) */
477 RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
480 decodeDoublezu2Intzh_fast
489 STK_CHK_GEN( WDS(4), NO_PTRS, decodeDoublezu2Intzh_fast );
491 mp_tmp1 = Sp - WDS(1);
492 mp_tmp2 = Sp - WDS(2);
493 mp_result1 = Sp - WDS(3);
494 mp_result2 = Sp - WDS(4);
496 /* arguments: D1 = Double# */
499 /* Perform the operation */
500 foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
501 mp_result1 "ptr", mp_result2 "ptr",
505 (Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
506 RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
509 /* -----------------------------------------------------------------------------
510 * Concurrency primitives
511 * -------------------------------------------------------------------------- */
515 /* args: R1 = closure to spark */
517 MAYBE_GC(R1_PTR, forkzh_fast);
523 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
524 RtsFlags_GcFlags_initialStkSize(RtsFlags),
527 /* start blocked if the current thread is blocked */
528 StgTSO_flags(threadid) =
529 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
530 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
532 foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
534 // context switch soon, but not immediately: we don't want every
535 // forkIO to force a context-switch.
536 Capability_context_switch(MyCapability()) = 1 :: CInt;
543 /* args: R1 = cpu, R2 = closure to spark */
545 MAYBE_GC(R2_PTR, forkOnzh_fast);
553 ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
554 RtsFlags_GcFlags_initialStkSize(RtsFlags),
557 /* start blocked if the current thread is blocked */
558 StgTSO_flags(threadid) =
559 StgTSO_flags(threadid) | (StgTSO_flags(CurrentTSO) &
560 (TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32));
562 foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
564 // context switch soon, but not immediately: we don't want every
565 // forkIO to force a context-switch.
566 Capability_context_switch(MyCapability()) = 1 :: CInt;
573 jump stg_yield_noregs;
588 foreign "C" labelThread(R1 "ptr", R2 "ptr") [];
590 jump %ENTRY_CODE(Sp(0));
593 isCurrentThreadBoundzh_fast
597 (r) = foreign "C" isThreadBound(CurrentTSO) [];
603 /* args: R1 :: ThreadId# */
611 if (TO_W_(StgTSO_what_next(tso)) == ThreadRelocated) {
612 tso = StgTSO__link(tso);
616 what_next = TO_W_(StgTSO_what_next(tso));
617 why_blocked = TO_W_(StgTSO_why_blocked(tso));
618 // Note: these two reads are not atomic, so they might end up
619 // being inconsistent. It doesn't matter, since we
620 // only return one or the other. If we wanted to return the
621 // contents of block_info too, then we'd have to do some synchronisation.
623 if (what_next == ThreadComplete) {
624 ret = 16; // NB. magic, matches up with GHC.Conc.threadStatus
626 if (what_next == ThreadKilled) {
635 /* -----------------------------------------------------------------------------
637 * -------------------------------------------------------------------------- */
641 // Catch retry frame ------------------------------------------------------------
643 INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
644 #if defined(PROFILING)
645 W_ unused1, W_ unused2,
647 W_ unused3, P_ unused4, P_ unused5)
649 W_ r, frame, trec, outer;
652 trec = StgTSO_trec(CurrentTSO);
653 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
654 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
656 /* Succeeded (either first branch or second branch) */
657 StgTSO_trec(CurrentTSO) = outer;
658 Sp = Sp + SIZEOF_StgCatchRetryFrame;
659 jump %ENTRY_CODE(Sp(SP_OFF));
661 /* Did not commit: re-execute */
663 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
664 StgTSO_trec(CurrentTSO) = new_trec;
665 if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
666 R1 = StgCatchRetryFrame_alt_code(frame);
668 R1 = StgCatchRetryFrame_first_code(frame);
675 // Atomically frame ------------------------------------------------------------
677 INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
678 #if defined(PROFILING)
679 W_ unused1, W_ unused2,
681 P_ unused3, P_ unused4)
683 W_ frame, trec, valid, next_invariant, q, outer;
686 trec = StgTSO_trec(CurrentTSO);
687 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
689 if (outer == NO_TREC) {
690 /* First time back at the atomically frame -- pick up invariants */
691 ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
692 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
695 /* Second/subsequent time back at the atomically frame -- abort the
696 * tx that's checking the invariant and move on to the next one */
697 StgTSO_trec(CurrentTSO) = outer;
698 q = StgAtomicallyFrame_next_invariant_to_check(frame);
699 StgInvariantCheckQueue_my_execution(q) = trec;
700 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
701 /* Don't free trec -- it's linked from q and will be stashed in the
702 * invariant if we eventually commit. */
703 q = StgInvariantCheckQueue_next_queue_entry(q);
704 StgAtomicallyFrame_next_invariant_to_check(frame) = q;
708 q = StgAtomicallyFrame_next_invariant_to_check(frame);
710 if (q != END_INVARIANT_CHECK_QUEUE) {
711 /* We can't commit yet: another invariant to check */
712 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
713 StgTSO_trec(CurrentTSO) = trec;
715 next_invariant = StgInvariantCheckQueue_invariant(q);
716 R1 = StgAtomicInvariant_code(next_invariant);
721 /* We've got no more invariants to check, try to commit */
722 (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
724 /* Transaction was valid: commit succeeded */
725 StgTSO_trec(CurrentTSO) = NO_TREC;
726 Sp = Sp + SIZEOF_StgAtomicallyFrame;
727 jump %ENTRY_CODE(Sp(SP_OFF));
729 /* Transaction was not valid: try again */
730 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
731 StgTSO_trec(CurrentTSO) = trec;
732 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
733 R1 = StgAtomicallyFrame_code(frame);
739 INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
740 #if defined(PROFILING)
741 W_ unused1, W_ unused2,
743 P_ unused3, P_ unused4)
745 W_ frame, trec, valid;
749 /* The TSO is currently waiting: should we stop waiting? */
750 (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
752 /* Previous attempt is still valid: no point trying again yet */
753 jump stg_block_noregs;
755 /* Previous attempt is no longer valid: try again */
756 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
757 StgTSO_trec(CurrentTSO) = trec;
758 StgHeader_info(frame) = stg_atomically_frame_info;
759 R1 = StgAtomicallyFrame_code(frame);
764 // STM catch frame --------------------------------------------------------------
768 /* Catch frames are very similar to update frames, but when entering
769 * one we just pop the frame off the stack and perform the correct
770 * kind of return to the activation record underneath us on the stack.
773 INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
774 #if defined(PROFILING)
775 W_ unused1, W_ unused2,
777 P_ unused3, P_ unused4)
779 W_ r, frame, trec, outer;
781 trec = StgTSO_trec(CurrentTSO);
782 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
783 (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
785 /* Commit succeeded */
786 StgTSO_trec(CurrentTSO) = outer;
787 Sp = Sp + SIZEOF_StgCatchSTMFrame;
792 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
793 StgTSO_trec(CurrentTSO) = new_trec;
794 R1 = StgCatchSTMFrame_code(frame);
800 // Primop definition ------------------------------------------------------------
808 // stmStartTransaction may allocate
809 MAYBE_GC (R1_PTR, atomicallyzh_fast);
811 /* Args: R1 = m :: STM a */
812 STK_CHK_GEN(SIZEOF_StgAtomicallyFrame + WDS(1), R1_PTR, atomicallyzh_fast);
814 old_trec = StgTSO_trec(CurrentTSO);
816 /* Nested transactions are not allowed; raise an exception */
817 if (old_trec != NO_TREC) {
818 R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
822 /* Set up the atomically frame */
823 Sp = Sp - SIZEOF_StgAtomicallyFrame;
826 SET_HDR(frame,stg_atomically_frame_info, W_[CCCS]);
827 StgAtomicallyFrame_code(frame) = R1;
828 StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
830 /* Start the memory transcation */
831 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
832 StgTSO_trec(CurrentTSO) = new_trec;
834 /* Apply R1 to the realworld token */
843 /* Args: R1 :: STM a */
844 /* Args: R2 :: Exception -> STM a */
845 STK_CHK_GEN(SIZEOF_StgCatchSTMFrame + WDS(1), R1_PTR & R2_PTR, catchSTMzh_fast);
847 /* Set up the catch frame */
848 Sp = Sp - SIZEOF_StgCatchSTMFrame;
851 SET_HDR(frame, stg_catch_stm_frame_info, W_[CCCS]);
852 StgCatchSTMFrame_handler(frame) = R2;
853 StgCatchSTMFrame_code(frame) = R1;
855 /* Start a nested transaction to run the body of the try block in */
858 cur_trec = StgTSO_trec(CurrentTSO);
859 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
860 StgTSO_trec(CurrentTSO) = new_trec;
862 /* Apply R1 to the realworld token */
873 // stmStartTransaction may allocate
874 MAYBE_GC (R1_PTR & R2_PTR, catchRetryzh_fast);
876 /* Args: R1 :: STM a */
877 /* Args: R2 :: STM a */
878 STK_CHK_GEN(SIZEOF_StgCatchRetryFrame + WDS(1), R1_PTR & R2_PTR, catchRetryzh_fast);
880 /* Start a nested transaction within which to run the first code */
881 trec = StgTSO_trec(CurrentTSO);
882 ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
883 StgTSO_trec(CurrentTSO) = new_trec;
885 /* Set up the catch-retry frame */
886 Sp = Sp - SIZEOF_StgCatchRetryFrame;
889 SET_HDR(frame, stg_catch_retry_frame_info, W_[CCCS]);
890 StgCatchRetryFrame_running_alt_code(frame) = 0 :: CInt; // false;
891 StgCatchRetryFrame_first_code(frame) = R1;
892 StgCatchRetryFrame_alt_code(frame) = R2;
894 /* Apply R1 to the realworld token */
907 MAYBE_GC (NO_PTRS, retryzh_fast); // STM operations may allocate
909 // Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
911 StgTSO_sp(CurrentTSO) = Sp;
912 (frame_type) = foreign "C" findRetryFrameHelper(CurrentTSO "ptr") [];
913 Sp = StgTSO_sp(CurrentTSO);
915 trec = StgTSO_trec(CurrentTSO);
916 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
918 if (frame_type == CATCH_RETRY_FRAME) {
919 // The retry reaches a CATCH_RETRY_FRAME before the atomic frame
920 ASSERT(outer != NO_TREC);
921 // Abort the transaction attempting the current branch
922 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
923 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
924 if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
925 // Retry in the first branch: try the alternative
926 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
927 StgTSO_trec(CurrentTSO) = trec;
928 StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
929 R1 = StgCatchRetryFrame_alt_code(frame);
932 // Retry in the alternative code: propagate the retry
933 StgTSO_trec(CurrentTSO) = outer;
934 Sp = Sp + SIZEOF_StgCatchRetryFrame;
935 goto retry_pop_stack;
939 // We've reached the ATOMICALLY_FRAME: attempt to wait
940 ASSERT(frame_type == ATOMICALLY_FRAME);
941 if (outer != NO_TREC) {
942 // We called retry while checking invariants, so abort the current
943 // invariant check (merging its TVar accesses into the parents read
944 // set so we'll wait on them)
945 foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
946 foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
948 StgTSO_trec(CurrentTSO) = trec;
949 ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
951 ASSERT(outer == NO_TREC);
953 (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
955 // Transaction was valid: stmWait put us on the TVars' queues, we now block
956 StgHeader_info(frame) = stg_atomically_waiting_frame_info;
958 // Fix up the stack in the unregisterised case: the return convention is different.
959 R3 = trec; // passing to stmWaitUnblock()
960 jump stg_block_stmwait;
962 // Transaction was not valid: retry immediately
963 ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
964 StgTSO_trec(CurrentTSO) = trec;
965 R1 = StgAtomicallyFrame_code(frame);
976 /* Args: R1 = invariant closure */
977 MAYBE_GC (R1_PTR, checkzh_fast);
979 trec = StgTSO_trec(CurrentTSO);
981 foreign "C" stmAddInvariantToCheck(MyCapability() "ptr",
985 jump %ENTRY_CODE(Sp(0));
994 /* Args: R1 = initialisation value */
996 MAYBE_GC (R1_PTR, newTVarzh_fast);
998 ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
1009 /* Args: R1 = TVar closure */
1011 MAYBE_GC (R1_PTR, readTVarzh_fast); // Call to stmReadTVar may allocate
1012 trec = StgTSO_trec(CurrentTSO);
1014 ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
1024 result = StgTVar_current_value(R1);
1025 if (%INFO_PTR(result) == stg_TREC_HEADER_info) {
1037 /* Args: R1 = TVar closure */
1038 /* R2 = New value */
1040 MAYBE_GC (R1_PTR & R2_PTR, writeTVarzh_fast); // Call to stmWriteTVar may allocate
1041 trec = StgTSO_trec(CurrentTSO);
1044 foreign "C" stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr", new_value "ptr") [];
1046 jump %ENTRY_CODE(Sp(0));
1050 /* -----------------------------------------------------------------------------
1053 * take & putMVar work as follows. Firstly, an important invariant:
1055 * If the MVar is full, then the blocking queue contains only
1056 * threads blocked on putMVar, and if the MVar is empty then the
1057 * blocking queue contains only threads blocked on takeMVar.
1060 * MVar empty : then add ourselves to the blocking queue
1061 * MVar full : remove the value from the MVar, and
1062 * blocking queue empty : return
1063 * blocking queue non-empty : perform the first blocked putMVar
1064 * from the queue, and wake up the
1065 * thread (MVar is now full again)
1067 * putMVar is just the dual of the above algorithm.
1069 * How do we "perform a putMVar"? Well, we have to fiddle around with
1070 * the stack of the thread waiting to do the putMVar. See
1071 * stg_block_putmvar and stg_block_takemvar in HeapStackCheck.c for
1072 * the stack layout, and the PerformPut and PerformTake macros below.
1074 * It is important that a blocked take or put is woken up with the
1075 * take/put already performed, because otherwise there would be a
1076 * small window of vulnerability where the thread could receive an
1077 * exception and never perform its take or put, and we'd end up with a
1080 * -------------------------------------------------------------------------- */
1084 /* args: R1 = MVar closure */
1086 if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
1098 ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
1100 mvar = Hp - SIZEOF_StgMVar + WDS(1);
1101 SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
1102 // MVARs start dirty: generation 0 has no mutable list
1103 StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
1104 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1105 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1110 #define PerformTake(tso, value) \
1111 W_[StgTSO_sp(tso) + WDS(1)] = value; \
1112 W_[StgTSO_sp(tso) + WDS(0)] = stg_gc_unpt_r1_info;
1114 #define PerformPut(tso,lval) \
1115 StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
1116 lval = W_[StgTSO_sp(tso) - WDS(1)];
1120 W_ mvar, val, info, tso;
1122 /* args: R1 = MVar closure */
1125 #if defined(THREADED_RTS)
1126 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1128 info = GET_INFO(mvar);
1131 if (info == stg_MVAR_CLEAN_info) {
1132 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
1135 /* If the MVar is empty, put ourselves on its blocking queue,
1136 * and wait until we're woken up.
1138 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1139 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1140 StgMVar_head(mvar) = CurrentTSO;
1142 foreign "C" setTSOLink(MyCapability() "ptr",
1143 StgMVar_tail(mvar) "ptr",
1146 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1147 StgTSO_block_info(CurrentTSO) = mvar;
1148 // write barrier for throwTo(), which looks at block_info
1149 // if why_blocked==BlockedOnMVar.
1150 prim %write_barrier() [];
1151 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1152 StgMVar_tail(mvar) = CurrentTSO;
1155 jump stg_block_takemvar;
1158 /* we got the value... */
1159 val = StgMVar_value(mvar);
1161 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure)
1163 /* There are putMVar(s) waiting...
1164 * wake up the first thread on the queue
1166 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1168 /* actually perform the putMVar for the thread that we just woke up */
1169 tso = StgMVar_head(mvar);
1170 PerformPut(tso,StgMVar_value(mvar));
1172 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1173 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1176 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1177 StgMVar_head(mvar) "ptr", 1) [];
1178 StgMVar_head(mvar) = tso;
1180 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1181 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1184 #if defined(THREADED_RTS)
1185 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1187 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1193 /* No further putMVars, MVar is now empty */
1194 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1196 #if defined(THREADED_RTS)
1197 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1199 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1209 W_ mvar, val, info, tso;
1211 /* args: R1 = MVar closure */
1215 #if defined(THREADED_RTS)
1216 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1218 info = GET_INFO(mvar);
1221 if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
1222 #if defined(THREADED_RTS)
1223 unlockClosure(mvar, info);
1225 /* HACK: we need a pointer to pass back,
1226 * so we abuse NO_FINALIZER_closure
1228 RET_NP(0, stg_NO_FINALIZER_closure);
1231 if (info == stg_MVAR_CLEAN_info) {
1232 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1235 /* we got the value... */
1236 val = StgMVar_value(mvar);
1238 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1240 /* There are putMVar(s) waiting...
1241 * wake up the first thread on the queue
1243 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1245 /* actually perform the putMVar for the thread that we just woke up */
1246 tso = StgMVar_head(mvar);
1247 PerformPut(tso,StgMVar_value(mvar));
1248 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1249 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1252 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1253 StgMVar_head(mvar) "ptr", 1) [];
1254 StgMVar_head(mvar) = tso;
1256 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1257 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1259 #if defined(THREADED_RTS)
1260 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1262 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1267 /* No further putMVars, MVar is now empty */
1268 StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
1269 #if defined(THREADED_RTS)
1270 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1272 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1282 W_ mvar, val, info, tso;
1284 /* args: R1 = MVar, R2 = value */
1288 #if defined(THREADED_RTS)
1289 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
1291 info = GET_INFO(mvar);
1294 if (info == stg_MVAR_CLEAN_info) {
1295 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1298 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1299 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1300 StgMVar_head(mvar) = CurrentTSO;
1302 foreign "C" setTSOLink(MyCapability() "ptr",
1303 StgMVar_tail(mvar) "ptr",
1306 StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
1307 StgTSO_block_info(CurrentTSO) = mvar;
1308 // write barrier for throwTo(), which looks at block_info
1309 // if why_blocked==BlockedOnMVar.
1310 prim %write_barrier() [];
1311 StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
1312 StgMVar_tail(mvar) = CurrentTSO;
1316 jump stg_block_putmvar;
1319 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1321 /* There are takeMVar(s) waiting: wake up the first one
1323 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1325 /* actually perform the takeMVar */
1326 tso = StgMVar_head(mvar);
1327 PerformTake(tso, val);
1328 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1329 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1332 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1333 StgMVar_head(mvar) "ptr", 1) [];
1334 StgMVar_head(mvar) = tso;
1336 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1337 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1340 #if defined(THREADED_RTS)
1341 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1343 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1345 jump %ENTRY_CODE(Sp(0));
1349 /* No further takes, the MVar is now full. */
1350 StgMVar_value(mvar) = val;
1352 #if defined(THREADED_RTS)
1353 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1355 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1357 jump %ENTRY_CODE(Sp(0));
1360 /* ToDo: yield afterward for better communication performance? */
1368 /* args: R1 = MVar, R2 = value */
1371 #if defined(THREADED_RTS)
1372 ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [R2];
1374 info = GET_INFO(mvar);
1377 if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
1378 #if defined(THREADED_RTS)
1379 unlockClosure(mvar, info);
1384 if (info == stg_MVAR_CLEAN_info) {
1385 foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
1388 if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
1390 /* There are takeMVar(s) waiting: wake up the first one
1392 ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
1394 /* actually perform the takeMVar */
1395 tso = StgMVar_head(mvar);
1396 PerformTake(tso, R2);
1397 if (TO_W_(StgTSO_flags(tso)) & TSO_DIRTY == 0) {
1398 foreign "C" dirty_TSO(MyCapability() "ptr", tso "ptr") [];
1401 ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
1402 StgMVar_head(mvar) "ptr", 1) [];
1403 StgMVar_head(mvar) = tso;
1405 if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
1406 StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
1409 #if defined(THREADED_RTS)
1410 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1412 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1417 /* No further takes, the MVar is now full. */
1418 StgMVar_value(mvar) = R2;
1420 #if defined(THREADED_RTS)
1421 unlockClosure(mvar, stg_MVAR_DIRTY_info);
1423 SET_INFO(mvar,stg_MVAR_DIRTY_info);
1428 /* ToDo: yield afterward for better communication performance? */
1432 /* -----------------------------------------------------------------------------
1433 Stable pointer primitives
1434 ------------------------------------------------------------------------- */
1436 makeStableNamezh_fast
1440 ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, makeStableNamezh_fast );
1442 (index) = foreign "C" lookupStableName(R1 "ptr") [];
1444 /* Is there already a StableName for this heap object?
1445 * stable_ptr_table is a pointer to an array of snEntry structs.
1447 if ( snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) == NULL ) {
1448 sn_obj = Hp - SIZEOF_StgStableName + WDS(1);
1449 SET_HDR(sn_obj, stg_STABLE_NAME_info, W_[CCCS]);
1450 StgStableName_sn(sn_obj) = index;
1451 snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry) = sn_obj;
1453 sn_obj = snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry);
1460 makeStablePtrzh_fast
1464 MAYBE_GC(R1_PTR, makeStablePtrzh_fast);
1465 ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
1469 deRefStablePtrzh_fast
1471 /* Args: R1 = the stable ptr */
1474 r = snEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_snEntry);
1478 /* -----------------------------------------------------------------------------
1479 Bytecode object primitives
1480 ------------------------------------------------------------------------- */
1490 W_ bco, bitmap_arr, bytes, words;
1494 words = BYTES_TO_WDS(SIZEOF_StgBCO) + StgArrWords_words(bitmap_arr);
1497 ALLOC_PRIM( bytes, R1_PTR&R2_PTR&R3_PTR&R5_PTR, newBCOzh_fast );
1499 bco = Hp - bytes + WDS(1);
1500 SET_HDR(bco, stg_BCO_info, W_[CCCS]);
1502 StgBCO_instrs(bco) = R1;
1503 StgBCO_literals(bco) = R2;
1504 StgBCO_ptrs(bco) = R3;
1505 StgBCO_arity(bco) = HALF_W_(R4);
1506 StgBCO_size(bco) = HALF_W_(words);
1508 // Copy the arity/bitmap info into the BCO
1512 if (i < StgArrWords_words(bitmap_arr)) {
1513 StgBCO_bitmap(bco,i) = StgArrWords_payload(bitmap_arr,i);
1524 // R1 = the BCO# for the AP
1528 // This function is *only* used to wrap zero-arity BCOs in an
1529 // updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
1530 // saturated and always points directly to a FUN or BCO.
1531 ASSERT(%INFO_TYPE(%GET_STD_INFO(R1)) == HALF_W_(BCO) &&
1532 StgBCO_arity(R1) == HALF_W_(0));
1534 HP_CHK_GEN_TICKY(SIZEOF_StgAP, R1_PTR, mkApUpd0zh_fast);
1535 TICK_ALLOC_UP_THK(0, 0);
1536 CCCS_ALLOC(SIZEOF_StgAP);
1538 ap = Hp - SIZEOF_StgAP + WDS(1);
1539 SET_HDR(ap, stg_AP_info, W_[CCCS]);
1541 StgAP_n_args(ap) = HALF_W_(0);
1547 unpackClosurezh_fast
1549 /* args: R1 = closure to analyze */
1550 // TODO: Consider the absence of ptrs or nonptrs as a special case ?
1552 W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
1553 info = %GET_STD_INFO(UNTAG(R1));
1555 // Some closures have non-standard layout, so we omit those here.
1557 type = TO_W_(%INFO_TYPE(info));
1558 switch [0 .. N_CLOSURE_TYPES] type {
1559 case THUNK_SELECTOR : {
1564 case THUNK, THUNK_1_0, THUNK_0_1, THUNK_2_0, THUNK_1_1,
1565 THUNK_0_2, THUNK_STATIC, AP, PAP, AP_STACK, BCO : {
1571 ptrs = TO_W_(%INFO_PTRS(info));
1572 nptrs = TO_W_(%INFO_NPTRS(info));
1577 W_ ptrs_arr_sz, nptrs_arr_sz;
1578 nptrs_arr_sz = SIZEOF_StgArrWords + WDS(nptrs);
1579 ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs);
1581 ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, unpackClosurezh_fast);
1586 ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
1587 nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
1589 SET_HDR(ptrs_arr, stg_MUT_ARR_PTRS_FROZEN_info, W_[CCCS]);
1590 StgMutArrPtrs_ptrs(ptrs_arr) = ptrs;
1594 W_[ptrs_arr + SIZEOF_StgMutArrPtrs + WDS(p)] = StgClosure_payload(clos,p);
1599 SET_HDR(nptrs_arr, stg_ARR_WORDS_info, W_[CCCS]);
1600 StgArrWords_words(nptrs_arr) = nptrs;
1604 W_[BYTE_ARR_CTS(nptrs_arr) + WDS(p)] = StgClosure_payload(clos, p+ptrs);
1608 RET_NPP(info, ptrs_arr, nptrs_arr);
1611 /* -----------------------------------------------------------------------------
1612 Thread I/O blocking primitives
1613 -------------------------------------------------------------------------- */
1615 /* Add a thread to the end of the blocked queue. (C-- version of the C
1616 * macro in Schedule.h).
1618 #define APPEND_TO_BLOCKED_QUEUE(tso) \
1619 ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
1620 if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
1621 W_[blocked_queue_hd] = tso; \
1623 foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
1625 W_[blocked_queue_tl] = tso;
1631 foreign "C" barf("waitRead# on threaded RTS") never returns;
1634 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1635 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
1636 StgTSO_block_info(CurrentTSO) = R1;
1637 // No locking - we're not going to use this interface in the
1638 // threaded RTS anyway.
1639 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1640 jump stg_block_noregs;
1648 foreign "C" barf("waitWrite# on threaded RTS") never returns;
1651 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1652 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
1653 StgTSO_block_info(CurrentTSO) = R1;
1654 // No locking - we're not going to use this interface in the
1655 // threaded RTS anyway.
1656 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1657 jump stg_block_noregs;
1662 STRING(stg_delayzh_malloc_str, "delayzh_fast")
1665 #ifdef mingw32_HOST_OS
1673 foreign "C" barf("delay# on threaded RTS") never returns;
1676 /* args: R1 (microsecond delay amount) */
1677 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1678 StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
1680 #ifdef mingw32_HOST_OS
1682 /* could probably allocate this on the heap instead */
1683 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1684 stg_delayzh_malloc_str);
1685 (reqID) = foreign "C" addDelayRequest(R1);
1686 StgAsyncIOResult_reqID(ares) = reqID;
1687 StgAsyncIOResult_len(ares) = 0;
1688 StgAsyncIOResult_errCode(ares) = 0;
1689 StgTSO_block_info(CurrentTSO) = ares;
1691 /* Having all async-blocked threads reside on the blocked_queue
1692 * simplifies matters, so change the status to OnDoProc put the
1693 * delayed thread on the blocked_queue.
1695 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
1696 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1697 jump stg_block_async_void;
1703 (time) = foreign "C" getourtimeofday() [R1];
1704 divisor = TO_W_(RtsFlags_MiscFlags_tickInterval(RtsFlags));
1708 divisor = divisor * 1000;
1709 target = ((R1 + divisor - 1) / divisor) /* divide rounding up */
1710 + time + 1; /* Add 1 as getourtimeofday rounds down */
1711 StgTSO_block_info(CurrentTSO) = target;
1713 /* Insert the new thread in the sleeping queue. */
1715 t = W_[sleeping_queue];
1717 if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
1719 t = StgTSO__link(t);
1723 StgTSO__link(CurrentTSO) = t;
1725 W_[sleeping_queue] = CurrentTSO;
1727 foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
1729 jump stg_block_noregs;
1731 #endif /* !THREADED_RTS */
1735 #ifdef mingw32_HOST_OS
1736 STRING(stg_asyncReadzh_malloc_str, "asyncReadzh_fast")
1743 foreign "C" barf("asyncRead# on threaded RTS") never returns;
1746 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
1747 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1748 StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
1750 /* could probably allocate this on the heap instead */
1751 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1752 stg_asyncReadzh_malloc_str)
1754 (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
1755 StgAsyncIOResult_reqID(ares) = reqID;
1756 StgAsyncIOResult_len(ares) = 0;
1757 StgAsyncIOResult_errCode(ares) = 0;
1758 StgTSO_block_info(CurrentTSO) = ares;
1759 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1760 jump stg_block_async;
1764 STRING(stg_asyncWritezh_malloc_str, "asyncWritezh_fast")
1771 foreign "C" barf("asyncWrite# on threaded RTS") never returns;
1774 /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
1775 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1776 StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
1778 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1779 stg_asyncWritezh_malloc_str)
1781 (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
1783 StgAsyncIOResult_reqID(ares) = reqID;
1784 StgAsyncIOResult_len(ares) = 0;
1785 StgAsyncIOResult_errCode(ares) = 0;
1786 StgTSO_block_info(CurrentTSO) = ares;
1787 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1788 jump stg_block_async;
1792 STRING(stg_asyncDoProczh_malloc_str, "asyncDoProczh_fast")
1799 foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
1802 /* args: R1 = proc, R2 = param */
1803 ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
1804 StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
1806 /* could probably allocate this on the heap instead */
1807 ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
1808 stg_asyncDoProczh_malloc_str)
1810 (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
1811 StgAsyncIOResult_reqID(ares) = reqID;
1812 StgAsyncIOResult_len(ares) = 0;
1813 StgAsyncIOResult_errCode(ares) = 0;
1814 StgTSO_block_info(CurrentTSO) = ares;
1815 APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
1816 jump stg_block_async;
1821 // noDuplicate# tries to ensure that none of the thunks under
1822 // evaluation by the current thread are also under evaluation by
1823 // another thread. It relies on *both* threads doing noDuplicate#;
1824 // the second one will get blocked if they are duplicating some work.
1827 SAVE_THREAD_STATE();
1828 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
1829 foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
1831 if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
1832 jump stg_threadFinished;
1834 LOAD_THREAD_STATE();
1835 ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
1836 jump %ENTRY_CODE(Sp(0));
1840 getApStackValzh_fast
1842 W_ ap_stack, offset, val, ok;
1844 /* args: R1 = AP_STACK, R2 = offset */
1848 if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
1850 val = StgAP_STACK_payload(ap_stack,offset);
1858 /* -----------------------------------------------------------------------------
1860 -------------------------------------------------------------------------- */
1862 // Write the cost center stack of the first argument on stderr; return
1863 // the second. Possibly only makes sense for already evaluated
1870 ccs = StgHeader_ccs(UNTAG(R1));
1871 foreign "C" fprintCCS_stderr(ccs "ptr") [R2];
1882 #ifndef THREADED_RTS
1883 RET_NP(0,ghczmprim_GHCziBool_False_closure);
1885 (spark) = foreign "C" findSpark(MyCapability());
1889 RET_NP(0,ghczmprim_GHCziBool_False_closure);