* at all, it won't yield. Hopefully this won't be a problem in practice.
*/
+#define PRE_RETURN(why,what_next) \
+ StgTSO_what_next(CurrentTSO) = what_next::I16; \
+ StgRegTable_rRet(BaseReg) = why; \
+ R1 = BaseReg;
+
/* Remember that the return address is *removed* when returning to a
* ThreadRunGHC thread.
*/
R1 = StackOverflow; \
} \
sched: \
- StgTSO_what_next(CurrentTSO) = ThreadRunGHC::I16; \
+ PRE_RETURN(R1,ThreadRunGHC); \
jump stg_returnToSched;
-#define RETURN_TO_SCHED(why,what_next) \
- StgTSO_what_next(CurrentTSO) = what_next::I16; \
- R1 = why; \
+#define HP_GENERIC \
+ PRE_RETURN(HeapOverflow, ThreadRunGHC) \
jump stg_returnToSched;
-#define RETURN_TO_SCHED_BUT_FIRST(why,what_next,cont) \
- StgTSO_what_next(CurrentTSO) = what_next::I16; \
- R1 = why; \
- R2 = cont; \
+#define BLOCK_GENERIC \
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ jump stg_returnToSched;
+
+#define YIELD_GENERIC \
+ PRE_RETURN(ThreadYielding, ThreadRunGHC) \
+ jump stg_returnToSched;
+
+#define BLOCK_BUT_FIRST(c) \
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ R2 = c; \
jump stg_returnToSchedButFirst;
-#define HP_GENERIC RETURN_TO_SCHED(HeapOverflow, ThreadRunGHC)
-#define YIELD_GENERIC RETURN_TO_SCHED(ThreadYielding, ThreadRunGHC)
-#define YIELD_TO_INTERPRETER RETURN_TO_SCHED(ThreadYielding, ThreadInterpret)
-#define BLOCK_GENERIC RETURN_TO_SCHED(ThreadBlocked, ThreadRunGHC)
-#define BLOCK_BUT_FIRST(c) RETURN_TO_SCHED_BUT_FIRST(ThreadBlocked, ThreadRunGHC, c)
+#define YIELD_TO_INTERPRETER \
+ PRE_RETURN(ThreadYielding, ThreadInterpret) \
+ jump stg_returnToSchedNotPaused;
/* -----------------------------------------------------------------------------
Heap checks in thunks/functions.
// code fragment executed just before we return to the scheduler
stg_block_takemvar_finally
{
-#ifdef SMP
+#ifdef THREADED_RTS
foreign "C" unlockClosure(R3 "ptr", stg_EMPTY_MVAR_info);
#endif
jump StgReturn;
// code fragment executed just before we return to the scheduler
stg_block_putmvar_finally
{
-#ifdef SMP
+#ifdef THREADED_RTS
foreign "C" unlockClosure(R3 "ptr", stg_FULL_MVAR_info);
#endif
jump StgReturn;
BLOCK_BUT_FIRST(stg_block_putmvar_finally);
}
+// code fragment executed just before we return to the scheduler
+stg_block_blackhole_finally
+{
+#if defined(THREADED_RTS)
+ // The last thing we do is release sched_lock, which is
+ // preventing other threads from accessing blackhole_queue and
+ // picking up this thread before we are finished with it.
+ foreign "C" RELEASE_LOCK(sched_mutex "ptr");
+#endif
+ jump StgReturn;
+}
+
+stg_block_blackhole
+{
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_enter_info;
+ BLOCK_BUT_FIRST(stg_block_blackhole_finally);
+}
+
#ifdef mingw32_HOST_OS
INFO_TABLE_RET( stg_block_async, 0/*framesize*/, 0/*bitmap*/, RET_SMALL )
{
}
#endif
+
+/* -----------------------------------------------------------------------------
+ STM-specific waiting
+ -------------------------------------------------------------------------- */
+
+stg_block_stmwait_finally
+{
+ foreign "C" stmWaitUnlock(MyCapability() "ptr", R3 "ptr");
+ jump StgReturn;
+}
+
+stg_block_stmwait
+{
+ BLOCK_BUT_FIRST(stg_block_stmwait_finally);
+}