* at all, it won't yield. Hopefully this won't be a problem in practice.
*/
+#define PRE_RETURN(why,what_next) \
+ StgTSO_what_next(CurrentTSO) = what_next::I16; \
+ StgRegTable_rRet(BaseReg) = why; \
+ R1 = BaseReg;
+
/* Remember that the return address is *removed* when returning to a
* ThreadRunGHC thread.
*/
CLOSE_NURSERY(); \
CurrentNursery = bdescr_link(CurrentNursery); \
OPEN_NURSERY(); \
- if (CInt[context_switch] != 0) { \
+ if (CInt[context_switch] != 0 :: CInt) { \
R1 = ThreadYielding; \
goto sched; \
} else { \
R1 = StackOverflow; \
} \
sched: \
- SAVE_THREAD_STATE(); \
- StgTSO_what_next(CurrentTSO) = ThreadRunGHC::I16; \
- jump StgReturn;
+ PRE_RETURN(R1,ThreadRunGHC); \
+ jump stg_returnToSched;
-#define RETURN_TO_SCHED(why,what_next) \
- SAVE_THREAD_STATE(); \
- StgTSO_what_next(CurrentTSO) = what_next::I16; \
- R1 = why; \
- jump StgReturn;
+#define HP_GENERIC \
+ PRE_RETURN(HeapOverflow, ThreadRunGHC) \
+ jump stg_returnToSched;
+
+#define BLOCK_GENERIC \
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ jump stg_returnToSched;
-#define HP_GENERIC RETURN_TO_SCHED(HeapOverflow, ThreadRunGHC)
-#define YIELD_GENERIC RETURN_TO_SCHED(ThreadYielding, ThreadRunGHC)
-#define YIELD_TO_INTERPRETER RETURN_TO_SCHED(ThreadYielding, ThreadInterpret)
-#define BLOCK_GENERIC RETURN_TO_SCHED(ThreadBlocked, ThreadRunGHC)
+#define YIELD_GENERIC \
+ PRE_RETURN(ThreadYielding, ThreadRunGHC) \
+ jump stg_returnToSched;
+
+#define BLOCK_BUT_FIRST(c) \
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ R2 = c; \
+ jump stg_returnToSchedButFirst;
+
+#define YIELD_TO_INTERPRETER \
+ PRE_RETURN(ThreadYielding, ThreadInterpret) \
+ jump stg_returnToSchedNotPaused;
/* -----------------------------------------------------------------------------
Heap checks in thunks/functions.
GC_GENERIC
}
-#ifdef SMP
-stg_gc_enter_1_hponly
-{
- Sp_adj(-1);
- Sp(0) = R1;
- R1 = HeapOverflow;
- SAVE_THREAD_STATE();
- TSO_what_next(CurrentTSO) = ThreadRunGHC::I16;
- jump StgReturn;
-}
-#endif
-
#if defined(GRAN)
/*
ToDo: merge the block and yield macros, calling something like BLOCK(N)
size = BITMAP_SIZE(StgFunInfoExtra_bitmap(info));
} else {
if (type == ARG_GEN_BIG) {
+#ifdef TABLES_NEXT_TO_CODE
+ // bitmap field holds an offset
+ size = StgLargeBitmap_size( StgFunInfoExtra_bitmap(info)
+ + %GET_ENTRY(R1) /* ### */ );
+#else
size = StgLargeBitmap_size( StgFunInfoExtra_bitmap(info) );
+#endif
} else {
size = BITMAP_SIZE(W_[stg_arg_bitmaps + WDS(type)]);
}
Sp(2) = R1;
Sp(1) = size;
Sp(0) = stg_gc_fun_info;
- // DEBUG_ONLY(foreign "C" fprintf(stderr, "stg_fun_gc_gen(ARG_GEN)"););
+ // DEBUG_ONLY(foreign "C" debugBelch("stg_fun_gc_gen(ARG_GEN)"););
GC_GENERIC
} else {
jump W_[stg_stack_save_entries + WDS(type)];
// jumps to stg_gc_noregs after saving stuff
}
-#endif // !NO_ARG_REGS
+#endif /* !NO_ARG_REGS */
}
/* -----------------------------------------------------------------------------
Sp(5) = R3; \
Sp(4) = R2; \
Sp(3) = R1; \
- Sp(2) = R10.w; /* return address */ \
+ Sp(2) = R10; /* return address */ \
Sp(1) = R9; /* liveness mask */ \
Sp(0) = stg_gc_gen_info;
jump takeMVarzh_fast;
}
+// code fragment executed just before we return to the scheduler
+stg_block_takemvar_finally
+{
+#ifdef THREADED_RTS
+ foreign "C" unlockClosure(R3 "ptr", stg_EMPTY_MVAR_info);
+#endif
+ jump StgReturn;
+}
+
stg_block_takemvar
{
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_block_takemvar_info;
- BLOCK_GENERIC;
+ R3 = R1;
+ BLOCK_BUT_FIRST(stg_block_takemvar_finally);
}
INFO_TABLE_RET( stg_block_putmvar, 2/*framesize*/, 0/*bitmap*/, RET_SMALL )
jump putMVarzh_fast;
}
+// code fragment executed just before we return to the scheduler
+stg_block_putmvar_finally
+{
+#ifdef THREADED_RTS
+ foreign "C" unlockClosure(R3 "ptr", stg_FULL_MVAR_info);
+#endif
+ jump StgReturn;
+}
+
stg_block_putmvar
{
Sp_adj(-3);
Sp(2) = R2;
Sp(1) = R1;
Sp(0) = stg_block_putmvar_info;
- BLOCK_GENERIC;
+ R3 = R1;
+ BLOCK_BUT_FIRST(stg_block_putmvar_finally);
+}
+
+// code fragment executed just before we return to the scheduler
+stg_block_blackhole_finally
+{
+#if defined(THREADED_RTS)
+ // The last thing we do is release sched_lock, which is
+ // preventing other threads from accessing blackhole_queue and
+ // picking up this thread before we are finished with it.
+ foreign "C" RELEASE_LOCK(sched_mutex "ptr");
+#endif
+ jump StgReturn;
}
-#ifdef mingw32_TARGET_OS
+stg_block_blackhole
+{
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_enter_info;
+ BLOCK_BUT_FIRST(stg_block_blackhole_finally);
+}
+
+#ifdef mingw32_HOST_OS
INFO_TABLE_RET( stg_block_async, 0/*framesize*/, 0/*bitmap*/, RET_SMALL )
{
W_ ares;
BLOCK_GENERIC;
}
+/* Used by threadDelay implementation; it would be desirable to get rid of
+ * this free()'ing void return continuation.
+ */
+INFO_TABLE_RET( stg_block_async_void, 0/*framesize*/, 0/*bitmap*/, RET_SMALL )
+{
+ W_ ares;
+
+ ares = StgTSO_block_info(CurrentTSO);
+ StgTSO_block_info(CurrentTSO) = NULL;
+ foreign "C" free(ares "ptr");
+ Sp_adj(1);
+ jump %ENTRY_CODE(Sp(0));
+}
+
+stg_block_async_void
+{
+ Sp_adj(-1);
+ Sp(0) = stg_block_async_void_info;
+ BLOCK_GENERIC;
+}
+
#endif
+
+/* -----------------------------------------------------------------------------
+ STM-specific waiting
+ -------------------------------------------------------------------------- */
+
+stg_block_stmwait_finally
+{
+ foreign "C" stmWaitUnlock(MyCapability() "ptr", R3 "ptr");
+ jump StgReturn;
+}
+
+stg_block_stmwait
+{
+ BLOCK_BUT_FIRST(stg_block_stmwait_finally);
+}