/* ---------------------------------------------------------------------------
- * $Id: Schedule.c,v 1.134 2002/03/12 13:57:11 simonmar Exp $
+ * $Id: Schedule.c,v 1.147 2002/07/10 09:28:56 simonmar Exp $
*
* (c) The GHC Team, 1998-2000
*
#include "Stats.h"
#include "Itimer.h"
#include "Prelude.h"
+#include "ThreadLabels.h"
#ifdef PROFILING
#include "Proftimer.h"
#include "ProfHeap.h"
#include "OSThreads.h"
#include "Task.h"
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
#include <stdarg.h>
//@node Variables and Data structures, Prototypes, Includes, Main scheduling code
rtsBool interrupted;
/* Next thread ID to allocate.
- * Locks required: sched_mutex
+ * Locks required: thread_id_mutex
*/
//@cindex next_thread_id
StgThreadID next_thread_id = 1;
rtsBool ready_to_gc;
+/*
+ * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
+ * in an MT setting, needed to signal that a worker thread shouldn't hang around
+ * in the scheduler when it is out of work.
+ */
+static rtsBool shutting_down_scheduler = rtsFalse;
+
void addToBlockedQueue ( StgTSO *tso );
static void schedule ( void );
void interruptStgRts ( void );
-#if defined(GRAN)
-static StgTSO * createThread_ ( nat size, rtsBool have_lock, StgInt pri );
-#else
-static StgTSO * createThread_ ( nat size, rtsBool have_lock );
-#endif
static void detectBlackHoles ( void );
Mutex sched_mutex = INIT_MUTEX_VAR;
Mutex term_mutex = INIT_MUTEX_VAR;
+/*
+ * A heavyweight solution to the problem of protecting
+ * the thread_id from concurrent update.
+ */
+Mutex thread_id_mutex = INIT_MUTEX_VAR;
+
+
# if defined(SMP)
static Condition gc_pending_cond = INIT_COND_VAR;
nat await_death;
ACQUIRE_LOCK(&sched_mutex);
#if defined(RTS_SUPPORTS_THREADS)
- /* Check to see whether there are any worker threads
- waiting to deposit external call results. If so,
- yield our capability */
- yieldToReturningWorker(&sched_mutex, cap);
-
waitForWorkCapability(&sched_mutex, &cap, rtsFalse);
+#else
+ /* simply initialise it in the non-threaded case */
+ grabCapability(&cap);
#endif
#if defined(GRAN)
IF_DEBUG(scheduler, printAllThreads());
+#if defined(RTS_SUPPORTS_THREADS)
+ /* Check to see whether there are any worker threads
+ waiting to deposit external call results. If so,
+ yield our capability */
+ yieldToReturningWorker(&sched_mutex, &cap);
+#endif
+
/* If we're interrupted (the user pressed ^C, or some other
* termination condition occurred), kill all the currently running
* threads.
*prev = m->link;
m->stat = Success;
broadcastCondition(&m->wakeup);
+#ifdef DEBUG
+ removeThreadLabel(m->tso);
+#endif
break;
case ThreadKilled:
if (m->ret) *(m->ret) = NULL;
m->stat = Killed;
}
broadcastCondition(&m->wakeup);
+#ifdef DEBUG
+ removeThreadLabel(m->tso);
+#endif
break;
default:
break;
StgMainThread *m = main_threads;
if (m->tso->what_next == ThreadComplete
|| m->tso->what_next == ThreadKilled) {
+#ifdef DEBUG
+ removeThreadLabel((StgWord)m->tso);
+#endif
main_threads = main_threads->link;
if (m->tso->what_next == ThreadComplete) {
/* we finished successfully, fill in the return value */
/* check for signals each time around the scheduler */
#ifndef mingw32_TARGET_OS
if (signals_pending()) {
+ RELEASE_LOCK(&sched_mutex); /* ToDo: kill */
startSignalHandlers();
+ ACQUIRE_LOCK(&sched_mutex);
}
#endif
* for signals to arrive rather then bombing out with a
* deadlock.
*/
+#if defined(RTS_SUPPORTS_THREADS)
+ if ( 0 ) { /* hmm..what to do? Simply stop waiting for
+ a signal with no runnable threads (or I/O
+ suspended ones) leads nowhere quick.
+ For now, simply shut down when we reach this
+ condition.
+
+ ToDo: define precisely under what conditions
+ the Scheduler should shut down in an MT setting.
+ */
+#else
if ( anyUserHandlers() ) {
+#endif
IF_DEBUG(scheduler,
sched_belch("still deadlocked, waiting for signals..."));
if (interrupted) { continue; }
if (signals_pending()) {
+ RELEASE_LOCK(&sched_mutex);
startSignalHandlers();
+ ACQUIRE_LOCK(&sched_mutex);
}
ASSERT(!EMPTY_RUN_QUEUE());
goto not_deadlocked;
/* ToDo: revisit conditions (and mechanism) for shutting
down a multi-threaded world */
IF_DEBUG(scheduler, sched_belch("all done, i think...shutting down."));
- shutdownHaskellAndExit(0);
+ RELEASE_LOCK(&sched_mutex);
+ shutdownHaskell();
+ return;
#endif
}
not_deadlocked:
if ( EMPTY_RUN_QUEUE() ) {
/* Give up our capability */
releaseCapability(cap);
+
+ /* If we're in the process of shutting down (& running the
+ * a batch of finalisers), don't wait around.
+ */
+ if ( shutting_down_scheduler ) {
+ RELEASE_LOCK(&sched_mutex);
+ return;
+ }
IF_DEBUG(scheduler, sched_belch("thread %d: waiting for work", osThreadId()));
waitForWorkCapability(&sched_mutex, &cap, rtsTrue);
IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId()));
-#if 0
- while ( EMPTY_RUN_QUEUE() ) {
- waitForWorkCapability(&sched_mutex, &cap);
- IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId()));
- }
-#endif
}
#endif
IF_DEBUG(sanity,checkTSO(t));
#endif
- grabCapability(&cap);
cap->r.rCurrentTSO = t;
/* context switches are now initiated by the timer signal, unless
}
cap->r.rCurrentNursery->u.back = bd;
- // initialise it as a nursery block
- bd->step = g0s0;
- bd->gen_no = 0;
- bd->flags = 0;
- bd->free = bd->start;
+ // initialise it as a nursery block. We initialise the
+ // step, gen_no, and flags field of *every* sub-block in
+ // this large block, because this is easier than making
+ // sure that we always find the block head of a large
+ // block whenever we call Bdescr() (eg. evacuate() and
+ // isAlive() in the GC would both have to do this, at
+ // least).
+ {
+ bdescr *x;
+ for (x = bd; x < bd + blocks; x++) {
+ x->step = g0s0;
+ x->gen_no = 0;
+ x->flags = 0;
+ x->free = x->start;
+ }
+ }
// don't forget to update the block count in g0s0.
g0s0->n_blocks += blocks;
default:
barf("schedule: invalid thread return code %d", (int)ret);
}
-
-#if defined(RTS_SUPPORTS_THREADS)
- /* I don't understand what this re-grab is doing -- sof */
- grabCapability(&cap);
-#endif
#ifdef PROFILING
if (RtsFlags.ProfFlags.profileInterval==0 || performHeapProfile) {
}
/* ---------------------------------------------------------------------------
+ * Singleton fork(). Do not copy any running threads.
+ * ------------------------------------------------------------------------- */
+
+StgInt forkProcess(StgTSO* tso) {
+
+#ifndef mingw32_TARGET_OS
+ pid_t pid;
+ StgTSO* t,*next;
+
+ IF_DEBUG(scheduler,sched_belch("forking!"));
+
+ pid = fork();
+ if (pid) { /* parent */
+
+ /* just return the pid */
+
+ } else { /* child */
+ /* wipe all other threads */
+ run_queue_hd = tso;
+ tso->link = END_TSO_QUEUE;
+
+ /* DO NOT TOUCH THE QUEUES directly because most of the code around
+ us is picky about finding the threat still in its queue when
+ handling the deleteThread() */
+
+ for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->link;
+ if (t->id != tso->id) {
+ deleteThread(t);
+ }
+ }
+ }
+ return pid;
+#else /* mingw32 */
+ barf("forkProcess#: primop not implemented for mingw32, sorry! (%u)\n", tso->id);
+ /* pointlessly printing out the TSOs 'id' to avoid CC unused warning. */
+ return -1;
+#endif /* mingw32 */
+}
+
+/* ---------------------------------------------------------------------------
* deleteAllThreads(): kill all the live threads.
*
* This is used when we catch a user interrupt (^C), before performing
* any necessary cleanups and running finalizers.
+ *
+ * Locks: sched_mutex held.
* ------------------------------------------------------------------------- */
void deleteAllThreads ( void )
StgInt
suspendThread( StgRegTable *reg,
rtsBool concCall
-#if !defined(RTS_SUPPORTS_THREADS)
+#if !defined(RTS_SUPPORTS_THREADS) && !defined(DEBUG)
STG_UNUSED
#endif
)
ACQUIRE_LOCK(&sched_mutex);
IF_DEBUG(scheduler,
- sched_belch("thread %d did a _ccall_gc", cap->r.rCurrentTSO->id));
+ sched_belch("thread %d did a _ccall_gc (is_concurrent: %d)", cap->r.rCurrentTSO->id,concCall));
threadPaused(cap->r.rCurrentTSO);
cap->r.rCurrentTSO->link = suspended_ccalling_threads;
#if defined(RTS_SUPPORTS_THREADS)
/* Wait for permission to re-enter the RTS with the result. */
if ( concCall ) {
+ ACQUIRE_LOCK(&sched_mutex);
grabReturnCapability(&sched_mutex, &cap);
} else {
grabCapability(&cap);
/* Reset blocking status */
tso->why_blocked = NotBlocked;
- RELEASE_LOCK(&sched_mutex);
-
cap->r.rCurrentTSO = tso;
+ RELEASE_LOCK(&sched_mutex);
return &cap->r;
}
return tso->id;
}
+#ifdef DEBUG
+void labelThread(StgTSO *tso, char *label)
+{
+ int len;
+ void *buf;
+
+ /* Caveat: Once set, you can only set the thread name to "" */
+ len = strlen(label)+1;
+ buf = malloc(len);
+ if (buf == NULL) {
+ fprintf(stderr,"insufficient memory for labelThread!\n");
+ } else
+ strncpy(buf,label,len);
+ /* Update will free the old memory for us */
+ updateThreadLabel((StgWord)tso,buf);
+}
+#endif /* DEBUG */
+
/* ---------------------------------------------------------------------------
Create a new thread.
#if defined(GRAN)
/* currently pri (priority) is only used in a GRAN setup -- HWL */
StgTSO *
-createThread(nat stack_size, StgInt pri)
-{
- return createThread_(stack_size, rtsFalse, pri);
-}
-
-static StgTSO *
-createThread_(nat size, rtsBool have_lock, StgInt pri)
-{
+createThread(nat size, StgInt pri)
#else
StgTSO *
-createThread(nat stack_size)
-{
- return createThread_(stack_size, rtsFalse);
-}
-
-static StgTSO *
-createThread_(nat size, rtsBool have_lock)
-{
+createThread(nat size)
#endif
+{
StgTSO *tso;
nat stack_size;
* protect the increment operation on next_thread_id.
* In future, we could use an atomic increment instead.
*/
- if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
+ ACQUIRE_LOCK(&thread_id_mutex);
tso->id = next_thread_id++;
- if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
+ RELEASE_LOCK(&thread_id_mutex);
tso->why_blocked = NotBlocked;
tso->blocked_exceptions = NULL;
}
else
{ threadsCreated++;
- tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
+ tso = createThread(RtsFlags.GcFlags.initialStkSize);
if (tso==END_TSO_QUEUE)
barf("createSparkThread: Cannot create TSO");
#if defined(DIST)
}
#endif
+static SchedulerStatus waitThread_(/*out*/StgMainThread* m
+#if defined(THREADED_RTS)
+ , rtsBool blockWaiting
+#endif
+ );
+
+
/* ---------------------------------------------------------------------------
* scheduleThread()
*
void scheduleThread(StgTSO* tso)
{
- return scheduleThread_(tso, rtsFalse);
+ scheduleThread_(tso, rtsFalse);
}
-void scheduleExtThread(StgTSO* tso)
+SchedulerStatus
+scheduleWaitThread(StgTSO* tso, /*[out]*/HaskellObj* ret)
{
- return scheduleThread_(tso, rtsTrue);
+ StgMainThread *m;
+
+ m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
+ m->tso = tso;
+ m->ret = ret;
+ m->stat = NoStatus;
+#if defined(RTS_SUPPORTS_THREADS)
+ initCondition(&m->wakeup);
+#endif
+
+ /* Put the thread on the main-threads list prior to scheduling the TSO.
+ Failure to do so introduces a race condition in the MT case (as
+ identified by Wolfgang Thaller), whereby the new task/OS thread
+ created by scheduleThread_() would complete prior to the thread
+ that spawned it managed to put 'itself' on the main-threads list.
+ The upshot of it all being that the worker thread wouldn't get to
+ signal the completion of the its work item for the main thread to
+ see (==> it got stuck waiting.) -- sof 6/02.
+ */
+ ACQUIRE_LOCK(&sched_mutex);
+ IF_DEBUG(scheduler, sched_belch("== scheduler: waiting for thread (%d)\n", tso->id));
+
+ m->link = main_threads;
+ main_threads = m;
+
+ /* Inefficient (scheduleThread_() acquires it again right away),
+ * but obviously correct.
+ */
+ RELEASE_LOCK(&sched_mutex);
+
+ scheduleThread_(tso, rtsTrue);
+#if defined(THREADED_RTS)
+ return waitThread_(m, rtsTrue);
+#else
+ return waitThread_(m);
+#endif
}
/* ---------------------------------------------------------------------------
* the scheduler. */
initMutex(&sched_mutex);
initMutex(&term_mutex);
+ initMutex(&thread_id_mutex);
initCondition(&thread_ready_cond);
#endif
#if defined(RTS_SUPPORTS_THREADS)
stopTaskManager();
#endif
+ shutting_down_scheduler = rtsTrue;
}
/* -----------------------------------------------------------------------------
SchedulerStatus
waitThread(StgTSO *tso, /*out*/StgClosure **ret)
{
-#if defined(THREADED_RTS)
- return waitThread_(tso,ret, rtsFalse);
-#else
- return waitThread_(tso,ret);
-#endif
-}
-
-SchedulerStatus
-waitThread_(StgTSO *tso,
- /*out*/StgClosure **ret
-#if defined(THREADED_RTS)
- , rtsBool blockWaiting
-#endif
- )
-{
StgMainThread *m;
- SchedulerStatus stat;
- ACQUIRE_LOCK(&sched_mutex);
-
m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
-
m->tso = tso;
m->ret = ret;
m->stat = NoStatus;
initCondition(&m->wakeup);
#endif
+ /* see scheduleWaitThread() comment */
+ ACQUIRE_LOCK(&sched_mutex);
+ IF_DEBUG(scheduler, sched_belch("== scheduler: waiting for thread (%d)\n", tso->id));
m->link = main_threads;
main_threads = m;
+ RELEASE_LOCK(&sched_mutex);
+
+ IF_DEBUG(scheduler, sched_belch("== scheduler: waiting for thread (%d)\n", tso->id));
+#if defined(THREADED_RTS)
+ return waitThread_(m, rtsFalse);
+#else
+ return waitThread_(m);
+#endif
+}
+
+static
+SchedulerStatus
+waitThread_(StgMainThread* m
+#if defined(THREADED_RTS)
+ , rtsBool blockWaiting
+#endif
+ )
+{
+ SchedulerStatus stat;
IF_DEBUG(scheduler, sched_belch("== scheduler: new main thread (%d)\n", m->tso->id));
* gets to enter the RTS directly without going via another
* task/thread.
*/
- RELEASE_LOCK(&sched_mutex);
schedule();
ASSERT(m->stat != NoStatus);
} else
# endif
{
- IF_DEBUG(scheduler, sched_belch("sfoo"));
+ ACQUIRE_LOCK(&sched_mutex);
do {
waitCondition(&m->wakeup, &sched_mutex);
} while (m->stat == NoStatus);
void
GetRoots(evac_fn evac)
{
- StgMainThread *m;
-
#if defined(GRAN)
{
nat i;
NB: only the type of the blocking queue is different in GranSim and GUM
the operations on the queue-elements are the same
long live polymorphism!
+
+ Locks: sched_mutex is held upon entry and exit.
+
*/
static void
unblockThread(StgTSO *tso)
{
StgBlockingQueueElement *t, **last;
- ACQUIRE_LOCK(&sched_mutex);
switch (tso->why_blocked) {
case NotBlocked:
tso->why_blocked = NotBlocked;
tso->block_info.closure = NULL;
PUSH_ON_RUN_QUEUE(tso);
- RELEASE_LOCK(&sched_mutex);
}
#else
static void
unblockThread(StgTSO *tso)
{
StgTSO *t, **last;
+
+ /* To avoid locking unnecessarily. */
+ if (tso->why_blocked == NotBlocked) {
+ return;
+ }
- ACQUIRE_LOCK(&sched_mutex);
switch (tso->why_blocked) {
- case NotBlocked:
- return; /* not blocked */
-
case BlockedOnMVar:
ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
{
tso->why_blocked = NotBlocked;
tso->block_info.closure = NULL;
PUSH_ON_RUN_QUEUE(tso);
- RELEASE_LOCK(&sched_mutex);
}
#endif
* CATCH_FRAME on the stack. In either case, we strip the entire
* stack and replace the thread with a zombie.
*
+ * Locks: sched_mutex held upon entry nor exit.
+ *
* -------------------------------------------------------------------------- */
void
}
void
+raiseAsyncWithLock(StgTSO *tso, StgClosure *exception)
+{
+ /* When raising async exs from contexts where sched_mutex isn't held;
+ use raiseAsyncWithLock(). */
+ ACQUIRE_LOCK(&sched_mutex);
+ raiseAsync(tso,exception);
+ RELEASE_LOCK(&sched_mutex);
+}
+
+void
raiseAsync(StgTSO *tso, StgClosure *exception)
{
StgUpdateFrame* su = tso->su;
/* Remove it from any blocking queues */
unblockThread(tso);
+ IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
/* The stack freezing code assumes there's a closure pointer on
* the top of the stack. This isn't always the case with compiled
* code, so we have to push a dummy closure on the top which just
* top of the CATCH_FRAME ready to enter.
*/
if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
+#ifdef PROFILING
StgCatchFrame *cf = (StgCatchFrame *)su;
+#endif
StgClosure *raise;
/* we've got an exception to raise, so let's pass it to the
up and sent a signal: BlockedOnDeadMVar if the thread was blocked
on an MVar, or NonTermination if the thread was blocked on a Black
Hole.
+
+ Locks: sched_mutex isn't held upon entry nor exit.
-------------------------------------------------------------------------- */
void
switch (tso->why_blocked) {
case BlockedOnMVar:
case BlockedOnException:
+ /* Called by GC - sched_mutex lock is currently held. */
raiseAsync(tso,(StgClosure *)BlockedOnDeadMVar_closure);
break;
case BlockedOnBlackHole:
*
* This is only done in a deadlock situation in order to avoid
* performance overhead in the normal case.
+ *
+ * Locks: sched_mutex is held upon entry and exit.
* -------------------------------------------------------------------------- */
static void
printAllThreads(void)
{
StgTSO *t;
+ void *label;
# if defined(GRAN)
char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
# endif
for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
- fprintf(stderr, "\tthread %d ", t->id);
+ fprintf(stderr, "\tthread %d @ %p ", t->id, (void *)t);
+ label = lookupThreadLabel((StgWord)t);
+ if (label) fprintf(stderr,"[\"%s\"] ",(char *)label);
printThreadStatus(t);
fprintf(stderr,"\n");
}
#endif
vfprintf(stderr, s, ap);
fprintf(stderr, "\n");
+ va_end(ap);
}
#endif /* DEBUG */