* at all, it won't yield. Hopefully this won't be a problem in practice.
*/
+#define PRE_RETURN(why,what_next) \
+ StgTSO_what_next(CurrentTSO) = what_next::I16; \
+ StgRegTable_rRet(BaseReg) = why; \
+ R1 = BaseReg;
+
/* Remember that the return address is *removed* when returning to a
* ThreadRunGHC thread.
*/
R1 = StackOverflow; \
} \
sched: \
- SAVE_THREAD_STATE(); \
- StgTSO_what_next(CurrentTSO) = ThreadRunGHC::I16; \
- jump StgReturn;
+ PRE_RETURN(R1,ThreadRunGHC); \
+ jump stg_returnToSched;
-#define RETURN_TO_SCHED(why,what_next) \
- SAVE_THREAD_STATE(); \
- StgTSO_what_next(CurrentTSO) = what_next::I16; \
- R1 = why; \
- jump StgReturn;
+#define HP_GENERIC \
+ PRE_RETURN(HeapOverflow, ThreadRunGHC) \
+ jump stg_returnToSched;
+
+#define BLOCK_GENERIC \
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ jump stg_returnToSched;
-#define HP_GENERIC RETURN_TO_SCHED(HeapOverflow, ThreadRunGHC)
-#define YIELD_GENERIC RETURN_TO_SCHED(ThreadYielding, ThreadRunGHC)
-#define YIELD_TO_INTERPRETER RETURN_TO_SCHED(ThreadYielding, ThreadInterpret)
-#define BLOCK_GENERIC RETURN_TO_SCHED(ThreadBlocked, ThreadRunGHC)
+#define YIELD_GENERIC \
+ PRE_RETURN(ThreadYielding, ThreadRunGHC) \
+ jump stg_returnToSched;
+
+#define BLOCK_BUT_FIRST(c) \
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ R2 = c; \
+ jump stg_returnToSchedButFirst;
+
+#define YIELD_TO_INTERPRETER \
+ PRE_RETURN(ThreadYielding, ThreadInterpret) \
+ jump stg_returnToSchedNotPaused;
/* -----------------------------------------------------------------------------
Heap checks in thunks/functions.
Sp(2) = R1;
Sp(1) = size;
Sp(0) = stg_gc_fun_info;
- // DEBUG_ONLY(foreign "C" fprintf(stderr, "stg_fun_gc_gen(ARG_GEN)"););
+ // DEBUG_ONLY(foreign "C" debugBelch("stg_fun_gc_gen(ARG_GEN)"););
GC_GENERIC
} else {
jump W_[stg_stack_save_entries + WDS(type)];
Sp(5) = R3; \
Sp(4) = R2; \
Sp(3) = R1; \
- Sp(2) = R10.w; /* return address */ \
+ Sp(2) = R10; /* return address */ \
Sp(1) = R9; /* liveness mask */ \
Sp(0) = stg_gc_gen_info;
jump takeMVarzh_fast;
}
+// code fragment executed just before we return to the scheduler
+stg_block_takemvar_finally
+{
+#ifdef THREADED_RTS
+ foreign "C" unlockClosure(R3 "ptr", stg_EMPTY_MVAR_info);
+#endif
+ jump StgReturn;
+}
+
stg_block_takemvar
{
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_block_takemvar_info;
- BLOCK_GENERIC;
+ R3 = R1;
+ BLOCK_BUT_FIRST(stg_block_takemvar_finally);
}
INFO_TABLE_RET( stg_block_putmvar, 2/*framesize*/, 0/*bitmap*/, RET_SMALL )
jump putMVarzh_fast;
}
+// code fragment executed just before we return to the scheduler
+stg_block_putmvar_finally
+{
+#ifdef THREADED_RTS
+ foreign "C" unlockClosure(R3 "ptr", stg_FULL_MVAR_info);
+#endif
+ jump StgReturn;
+}
+
stg_block_putmvar
{
Sp_adj(-3);
Sp(2) = R2;
Sp(1) = R1;
Sp(0) = stg_block_putmvar_info;
- BLOCK_GENERIC;
+ R3 = R1;
+ BLOCK_BUT_FIRST(stg_block_putmvar_finally);
+}
+
+// code fragment executed just before we return to the scheduler
+stg_block_blackhole_finally
+{
+#if defined(THREADED_RTS)
+ // The last thing we do is release sched_lock, which is
+ // preventing other threads from accessing blackhole_queue and
+ // picking up this thread before we are finished with it.
+ foreign "C" RELEASE_LOCK(sched_mutex "ptr");
+#endif
+ jump StgReturn;
+}
+
+stg_block_blackhole
+{
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_enter_info;
+ BLOCK_BUT_FIRST(stg_block_blackhole_finally);
}
#ifdef mingw32_HOST_OS
}
#endif
+
+/* -----------------------------------------------------------------------------
+ STM-specific waiting
+ -------------------------------------------------------------------------- */
+
+stg_block_stmwait_finally
+{
+ foreign "C" stmWaitUnlock(MyCapability() "ptr", R3 "ptr");
+ jump StgReturn;
+}
+
+stg_block_stmwait
+{
+ BLOCK_BUT_FIRST(stg_block_stmwait_finally);
+}