- Move the call to threadPaused() from the scheduler into STG land,
and put it in a new code fragment (stg_returnToSched) that we pass
through every time we return from STG to the scheduler. Also, the
SAVE_THREAD_STATE() is now in stg_returnToSched which might save a
little code space (at the expense of an extra jump for every return
to the scheduler).
- SMP: when blocking on an MVar, we now wait until the thread has been
made fully safe and placed on the blocked queue of the MVar before
we unlock the MVar. This closes a race whereby another OS thread could
begin waking us up before the current TSO had been properly tidied up.
Fixes one cause of crashes when using MVars with SMP. I still have a
deadlock problem to track down.
RTS_ENTRY(stg_stop_thread_ret);
RTS_FUN(stg_returnToStackTop);
RTS_ENTRY(stg_stop_thread_ret);
RTS_FUN(stg_returnToStackTop);
-RTS_FUN(stg_enterStackTop);
+RTS_FUN(stg_returnToSched);
+RTS_FUN(stg_returnToSchedButFirst);
RTS_FUN(stg_init_finish);
RTS_FUN(stg_init);
RTS_FUN(stg_init_finish);
RTS_FUN(stg_init);
R1 = StackOverflow; \
} \
sched: \
R1 = StackOverflow; \
} \
sched: \
StgTSO_what_next(CurrentTSO) = ThreadRunGHC::I16; \
StgTSO_what_next(CurrentTSO) = ThreadRunGHC::I16; \
+ jump stg_returnToSched;
#define RETURN_TO_SCHED(why,what_next) \
#define RETURN_TO_SCHED(why,what_next) \
StgTSO_what_next(CurrentTSO) = what_next::I16; \
R1 = why; \
StgTSO_what_next(CurrentTSO) = what_next::I16; \
R1 = why; \
+ jump stg_returnToSched;
+
+#define RETURN_TO_SCHED_BUT_FIRST(why,what_next,cont) \
+ StgTSO_what_next(CurrentTSO) = what_next::I16; \
+ R1 = why; \
+ R2 = cont; \
+ jump stg_returnToSchedButFirst;
#define HP_GENERIC RETURN_TO_SCHED(HeapOverflow, ThreadRunGHC)
#define YIELD_GENERIC RETURN_TO_SCHED(ThreadYielding, ThreadRunGHC)
#define YIELD_TO_INTERPRETER RETURN_TO_SCHED(ThreadYielding, ThreadInterpret)
#define BLOCK_GENERIC RETURN_TO_SCHED(ThreadBlocked, ThreadRunGHC)
#define HP_GENERIC RETURN_TO_SCHED(HeapOverflow, ThreadRunGHC)
#define YIELD_GENERIC RETURN_TO_SCHED(ThreadYielding, ThreadRunGHC)
#define YIELD_TO_INTERPRETER RETURN_TO_SCHED(ThreadYielding, ThreadInterpret)
#define BLOCK_GENERIC RETURN_TO_SCHED(ThreadBlocked, ThreadRunGHC)
+#define BLOCK_BUT_FIRST(c) RETURN_TO_SCHED_BUT_FIRST(ThreadBlocked, ThreadRunGHC, c)
/* -----------------------------------------------------------------------------
Heap checks in thunks/functions.
/* -----------------------------------------------------------------------------
Heap checks in thunks/functions.
+// code fragment executed just before we return to the scheduler
+stg_block_takemvar_finally
+{
+#ifdef SMP
+ foreign "C" unlockClosure(R3 "ptr", stg_EMPTY_MVAR_info);
+#endif
+ jump StgReturn;
+}
+
stg_block_takemvar
{
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_block_takemvar_info;
stg_block_takemvar
{
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_block_takemvar_info;
+ R3 = R1;
+ BLOCK_BUT_FIRST(stg_block_takemvar_finally);
}
INFO_TABLE_RET( stg_block_putmvar, 2/*framesize*/, 0/*bitmap*/, RET_SMALL )
}
INFO_TABLE_RET( stg_block_putmvar, 2/*framesize*/, 0/*bitmap*/, RET_SMALL )
+// code fragment executed just before we return to the scheduler
+stg_block_putmvar_finally
+{
+#ifdef SMP
+ foreign "C" unlockClosure(R3 "ptr", stg_FULL_MVAR_info);
+#endif
+ jump StgReturn;
+}
+
stg_block_putmvar
{
Sp_adj(-3);
Sp(2) = R2;
Sp(1) = R1;
Sp(0) = stg_block_putmvar_info;
stg_block_putmvar
{
Sp_adj(-3);
Sp(2) = R2;
Sp(1) = R1;
Sp(0) = stg_block_putmvar_info;
+ R3 = R1;
+ BLOCK_BUT_FIRST(stg_block_putmvar_finally);
StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
lval = W_[StgTSO_sp(tso) - WDS(1)];
StgTSO_sp(tso) = StgTSO_sp(tso) + WDS(3); \
lval = W_[StgTSO_sp(tso) - WDS(1)];
+/*
+ * Only in threaded mode: we have to be careful when manipulating another thread's TSO,
+ * because the scheduler might also be manipulating it.
+ */
+#if defined(RTS_SUPPORTS_THREADS)
+#define ACQUIRE_SCHED_LOCK foreign "C" ACQUIRE_LOCK(sched_mutex "ptr");
+#define RELEASE_SCHED_LOCK foreign "C" RELEASE_LOCK(sched_mutex "ptr");
+#else
+#define ACQUIRE_SCHED_LOCK
+#define RELEASE_SCHED_LOCK
+#endif
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
-#if defined(SMP)
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
-#endif
-
jump stg_block_takemvar;
}
jump stg_block_takemvar;
}
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure)
{
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure)
{
/* There are putMVar(s) waiting...
* wake up the first thread on the queue
*/
/* There are putMVar(s) waiting...
* wake up the first thread on the queue
*/
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar),mvar);
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar),mvar);
StgMVar_head(mvar) = tso;
#else
StgMVar_head(mvar) = tso;
#else
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr");
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar) "ptr");
StgMVar_head(mvar) = tso;
#endif
StgMVar_head(mvar) = tso;
#endif
+
+ RELEASE_SCHED_LOCK;
+
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info);
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
- /* unlocks the closure in the SMP case */
+#if defined(SMP)
+ foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info);
+#else
SET_INFO(mvar,stg_EMPTY_MVAR_info);
SET_INFO(mvar,stg_EMPTY_MVAR_info);
if (info == stg_EMPTY_MVAR_info) {
#if defined(SMP)
if (info == stg_EMPTY_MVAR_info) {
#if defined(SMP)
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info);
#endif
/* HACK: we need a pointer to pass back,
* so we abuse NO_FINALIZER_closure
#endif
/* HACK: we need a pointer to pass back,
* so we abuse NO_FINALIZER_closure
val = StgMVar_value(mvar);
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
val = StgMVar_value(mvar);
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
+
+ ACQUIRE_SCHED_LOCK;
+
/* There are putMVar(s) waiting...
* wake up the first thread on the queue
*/
/* There are putMVar(s) waiting...
* wake up the first thread on the queue
*/
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr", mvar "ptr");
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar) "ptr", mvar "ptr");
StgMVar_head(mvar) = tso;
#else
StgMVar_head(mvar) = tso;
#else
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr");
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar) "ptr");
StgMVar_head(mvar) = tso;
#endif
StgMVar_head(mvar) = tso;
#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info);
#endif
}
else
{
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#endif
}
else
{
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
+#if defined(SMP)
+ foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info);
+#else
SET_INFO(mvar,stg_EMPTY_MVAR_info);
SET_INFO(mvar,stg_EMPTY_MVAR_info);
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
-#if defined(SMP)
- SET_INFO(mvar,stg_FULL_MVAR_info);
-#endif
jump stg_block_putmvar;
}
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
jump stg_block_putmvar;
}
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
+
+ ACQUIRE_SCHED_LOCK;
+
/* There are takeMVar(s) waiting: wake up the first one
*/
ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
/* There are takeMVar(s) waiting: wake up the first one
*/
ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr",mvar "ptr");
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar) "ptr",mvar "ptr");
StgMVar_head(mvar) = tso;
#else
StgMVar_head(mvar) = tso;
#else
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr");
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar) "ptr");
StgMVar_head(mvar) = tso;
#endif
StgMVar_head(mvar) = tso;
#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
#endif
jump %ENTRY_CODE(Sp(0));
}
{
/* No further takes, the MVar is now full. */
StgMVar_value(mvar) = R2;
{
/* No further takes, the MVar is now full. */
StgMVar_value(mvar) = R2;
- /* unlocks the MVar in the SMP case */
- SET_INFO(mvar,stg_FULL_MVAR_info);
+#if defined(SMP)
+ foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info);
+#else
+ SET_INFO(mvar,stg_FULL_MVAR_info);
+#endif
jump %ENTRY_CODE(Sp(0));
}
jump %ENTRY_CODE(Sp(0));
}
if (info == stg_FULL_MVAR_info) {
#if defined(SMP)
if (info == stg_FULL_MVAR_info) {
#if defined(SMP)
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info);
#endif
RET_N(0);
}
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
#endif
RET_N(0);
}
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
+
+ ACQUIRE_SCHED_LOCK;
+
/* There are takeMVar(s) waiting: wake up the first one
*/
ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
/* There are takeMVar(s) waiting: wake up the first one
*/
ASSERT(StgTSO_why_blocked(StgMVar_head(mvar)) == BlockedOnMVar::I16);
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr",mvar "ptr");
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar) "ptr",mvar "ptr");
StgMVar_head(mvar) = tso;
#else
StgMVar_head(mvar) = tso;
#else
- "ptr" tso = foreign "C" unblockOne(StgMVar_head(mvar) "ptr");
+ "ptr" tso = foreign "C" unblockOneLocked(StgMVar_head(mvar) "ptr");
StgMVar_head(mvar) = tso;
#endif
StgMVar_head(mvar) = tso;
#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(SMP)
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ foreign "C" unlockClosure(mvar "ptr", stg_EMPTY_MVAR_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
#endif
jump %ENTRY_CODE(Sp(0));
}
{
/* No further takes, the MVar is now full. */
StgMVar_value(mvar) = R2;
{
/* No further takes, the MVar is now full. */
StgMVar_value(mvar) = R2;
- /* unlocks the MVar in the SMP case */
- SET_INFO(mvar,stg_FULL_MVAR_info);
+#if defined(SMP)
+ foreign "C" unlockClosure(mvar "ptr", stg_FULL_MVAR_info);
+#else
+ SET_INFO(mvar,stg_FULL_MVAR_info);
+#endif
jump %ENTRY_CODE(Sp(0));
}
jump %ENTRY_CODE(Sp(0));
}
CurrentTSO = event->tso;
#endif
CurrentTSO = event->tso;
#endif
- IF_DEBUG(scheduler, printAllThreads());
-
#if defined(RTS_SUPPORTS_THREADS)
// Yield the capability to higher-priority tasks if necessary.
//
#if defined(RTS_SUPPORTS_THREADS)
// Yield the capability to higher-priority tasks if necessary.
//
case ThreadBlocked:
scheduleHandleThreadBlocked(t);
case ThreadBlocked:
scheduleHandleThreadBlocked(t);
break;
case ThreadFinished:
break;
case ThreadFinished:
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
GarbageCollect(GetRoots,rtsTrue);
recent_activity = ACTIVITY_DONE_GC;
if ( !EMPTY_RUN_QUEUE() ) return;
GarbageCollect(GetRoots,rtsTrue);
recent_activity = ACTIVITY_DONE_GC;
if ( !EMPTY_RUN_QUEUE() ) return;
IF_DEBUG(scheduler,
debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
(long)t->id, whatNext_strs[t->what_next]));
IF_DEBUG(scheduler,
debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
(long)t->id, whatNext_strs[t->what_next]));
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
#elif defined(PARALLEL_HASKELL)
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
#elif defined(PARALLEL_HASKELL)
/* just adjust the stack for this thread, then pop it back
* on the run queue.
*/
/* just adjust the stack for this thread, then pop it back
* on the run queue.
*/
{
/* enlarge the stack */
StgTSO *new_t = threadStackOverflow(t);
{
/* enlarge the stack */
StgTSO *new_t = threadStackOverflow(t);
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
emitSchedule = rtsTrue;
#else /* !GRAN */
emitSchedule = rtsTrue;
#else /* !GRAN */
- /* don't need to do anything. Either the thread is blocked on
- * I/O, in which case we'll have called addToBlockedQueue
- * previously, or it's blocked on an MVar or Blackhole, in which
- * case it'll be on the relevant queue already.
- */
+
+ // We don't need to do anything. The thread is blocked, and it
+ // has tidied up its stack and placed itself on whatever queue
+ // it needs to be on.
+
+#if !defined(SMP)
ASSERT(t->why_blocked != NotBlocked);
ASSERT(t->why_blocked != NotBlocked);
+ // This might not be true under SMP: we don't have
+ // exclusive access to this TSO, so someone might have
+ // woken it up by now. This actually happens: try
+ // conc023 +RTS -N2.
+#endif
+
IF_DEBUG(scheduler,
debugBelch("--<< thread %d (%s) stopped: ",
t->id, whatNext_strs[t->what_next]);
IF_DEBUG(scheduler,
debugBelch("--<< thread %d (%s) stopped: ",
t->id, whatNext_strs[t->what_next]);
// so this happens periodically:
scheduleCheckBlackHoles();
// so this happens periodically:
scheduleCheckBlackHoles();
+ IF_DEBUG(scheduler, printAllThreads());
+
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
* to do it in another thread. Either way, we need to
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
* to do it in another thread. Either way, we need to
*/
#if defined(GRAN) || defined(PAR)
StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node);
*/
#if defined(GRAN) || defined(PAR)
StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node);
+StgBlockingQueueElement *unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node);
#else
StgTSO *unblockOne(StgTSO *tso);
#else
StgTSO *unblockOne(StgTSO *tso);
+StgTSO *unblockOneLocked(StgTSO *tso);
Start a thread from the scheduler by returning to the address on
the top of the stack. This is used for all entries to STG code
from C land.
Start a thread from the scheduler by returning to the address on
the top of the stack. This is used for all entries to STG code
from C land.
+
+ On the way back, we (usually) pass through stg_returnToSched which saves
+ the thread's state away nicely.
-------------------------------------------------------------------------- */
stg_returnToStackTop
-------------------------------------------------------------------------- */
stg_returnToStackTop
jump %ENTRY_CODE(Sp(0));
}
jump %ENTRY_CODE(Sp(0));
}
+stg_returnToSched
+{
+ SAVE_THREAD_STATE();
+ foreign "C" threadPaused(CurrentTSO);
+ jump StgReturn;
+}
+
+// A variant of stg_returnToSched, but instead of returning directly to the
+// scheduler, we jump to the code fragment pointed to by R2. This lets us
+// perform some final actions after making the thread safe, such as unlocking
+// the MVar on which we are about to block in SMP mode.
+stg_returnToSchedButFirst
+{
+ SAVE_THREAD_STATE();
+ foreign "C" threadPaused(CurrentTSO);
+ jump R2;
+}
+
/* -----------------------------------------------------------------------------
Strict IO application - performing an IO action and entering its result.
/* -----------------------------------------------------------------------------
Strict IO application - performing an IO action and entering its result.