* at all, it won't yield. Hopefully this won't be a problem in practice.
*/
+#define PRE_RETURN(why,what_next) \
+ StgTSO_what_next(CurrentTSO) = what_next::I16; \
+ StgRegTable_rRet(BaseReg) = why; \
+ R1 = BaseReg;
+
/* Remember that the return address is *removed* when returning to a
* ThreadRunGHC thread.
*/
R1 = StackOverflow; \
} \
sched: \
- StgTSO_what_next(CurrentTSO) = ThreadRunGHC::I16; \
+ PRE_RETURN(R1,ThreadRunGHC); \
jump stg_returnToSched;
-#define PRE_RETURN(why,what_next) \
- StgTSO_what_next(CurrentTSO) = what_next::I16; \
- R1 = why;
-
#define HP_GENERIC \
PRE_RETURN(HeapOverflow, ThreadRunGHC) \
jump stg_returnToSched;
// code fragment executed just before we return to the scheduler
stg_block_takemvar_finally
{
-#ifdef SMP
+#ifdef THREADED_RTS
foreign "C" unlockClosure(R3 "ptr", stg_EMPTY_MVAR_info);
#endif
jump StgReturn;
// code fragment executed just before we return to the scheduler
stg_block_putmvar_finally
{
-#ifdef SMP
+#ifdef THREADED_RTS
foreign "C" unlockClosure(R3 "ptr", stg_FULL_MVAR_info);
#endif
jump StgReturn;
// code fragment executed just before we return to the scheduler
stg_block_blackhole_finally
{
-#if defined(SMP)
+#if defined(THREADED_RTS)
// The last thing we do is release sched_lock, which is
// preventing other threads from accessing blackhole_queue and
// picking up this thread before we are finished with it.
}
#endif
+
+/* -----------------------------------------------------------------------------
+ STM-specific waiting
+ -------------------------------------------------------------------------- */
+
+stg_block_stmwait_finally
+{
+ foreign "C" stmWaitUnlock(MyCapability() "ptr", R3 "ptr");
+ jump StgReturn;
+}
+
+stg_block_stmwait
+{
+ BLOCK_BUT_FIRST(stg_block_stmwait_finally);
+}