/* ---------------------------------------------------------------------------
- * $Id: Schedule.c,v 1.138 2002/04/23 06:34:27 sof Exp $
*
- * (c) The GHC Team, 1998-2000
+ * (c) The GHC Team, 1998-2004
*
* Scheduler
*
*
* WAY Name CPP flag What's it for
* --------------------------------------
- * mp GUM PAR Parallel execution on a distributed memory machine
+ * mp GUM PAR Parallel execution on a distrib. memory machine
* s SMP SMP Parallel execution on a shared memory machine
* mg GranSim GRAN Simulation of parallel execution
* md GUM/GdH DIST Distributed execution (based on GUM)
*
* --------------------------------------------------------------------------*/
-//@node Main scheduling code, , ,
-//@section Main scheduling code
-
/*
- * Version with scheduler monitor support for SMPs (WAY=s):
-
- This design provides a high-level API to create and schedule threads etc.
- as documented in the SMP design document.
-
- It uses a monitor design controlled by a single mutex to exercise control
- over accesses to shared data structures, and builds on the Posix threads
- library.
-
- The majority of state is shared. In order to keep essential per-task state,
- there is a Capability structure, which contains all the information
- needed to run a thread: its STG registers, a pointer to its TSO, a
- nursery etc. During STG execution, a pointer to the capability is
- kept in a register (BaseReg).
-
- In a non-SMP build, there is one global capability, namely MainRegTable.
-
- SDM & KH, 10/99
-
* Version with support for distributed memory parallelism aka GUM (WAY=mp):
The main scheduling loop in GUM iterates until a finish message is received.
over the events in the global event queue. -- HWL
*/
-//@menu
-//* Includes::
-//* Variables and Data structures::
-//* Main scheduling loop::
-//* Suspend and Resume::
-//* Run queue code::
-//* Garbage Collextion Routines::
-//* Blocking Queue Routines::
-//* Exception Handling Routines::
-//* Debugging Routines::
-//* Index::
-//@end menu
-
-//@node Includes, Variables and Data structures, Main scheduling code, Main scheduling code
-//@subsection Includes
-
#include "PosixSource.h"
#include "Rts.h"
#include "SchedAPI.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
+#include "BlockAlloc.h"
#include "Storage.h"
#include "StgRun.h"
-#include "StgStartup.h"
#include "Hooks.h"
+#define COMPILING_SCHEDULER
#include "Schedule.h"
#include "StgMiscClosures.h"
#include "Storage.h"
#include "Interpreter.h"
#include "Exception.h"
#include "Printer.h"
-#include "Main.h"
#include "Signals.h"
#include "Sanity.h"
#include "Stats.h"
-#include "Itimer.h"
+#include "Timer.h"
#include "Prelude.h"
+#include "ThreadLabels.h"
+#include "LdvProfile.h"
+#include "Updates.h"
#ifdef PROFILING
#include "Proftimer.h"
#include "ProfHeap.h"
#include <unistd.h>
#endif
+#include <string.h>
+#include <stdlib.h>
#include <stdarg.h>
-//@node Variables and Data structures, Prototypes, Includes, Main scheduling code
-//@subsection Variables and Data structures
+#ifdef HAVE_ERRNO_H
+#include <errno.h>
+#endif
+
+#ifdef THREADED_RTS
+#define USED_IN_THREADED_RTS
+#else
+#define USED_IN_THREADED_RTS STG_UNUSED
+#endif
+
+#ifdef RTS_SUPPORTS_THREADS
+#define USED_WHEN_RTS_SUPPORTS_THREADS
+#else
+#define USED_WHEN_RTS_SUPPORTS_THREADS STG_UNUSED
+#endif
/* Main thread queue.
* Locks required: sched_mutex.
*/
-StgMainThread *main_threads;
+StgMainThread *main_threads = NULL;
/* Thread queues.
* Locks required: sched_mutex.
#else /* !GRAN */
-StgTSO *run_queue_hd, *run_queue_tl;
-StgTSO *blocked_queue_hd, *blocked_queue_tl;
-StgTSO *sleeping_queue; /* perhaps replace with a hash table? */
+StgTSO *run_queue_hd = NULL;
+StgTSO *run_queue_tl = NULL;
+StgTSO *blocked_queue_hd = NULL;
+StgTSO *blocked_queue_tl = NULL;
+StgTSO *sleeping_queue = NULL; /* perhaps replace with a hash table? */
#endif
/* Linked list of all threads.
* Used for detecting garbage collected threads.
*/
-StgTSO *all_threads;
+StgTSO *all_threads = NULL;
/* When a thread performs a safe C call (_ccall_GC, using old
* terminology), it gets put on the suspended_ccalling_threads
*/
/* flag set by signal handler to precipitate a context switch */
-//@cindex context_switch
-nat context_switch;
+int context_switch = 0;
/* if this flag is set as well, give up execution */
-//@cindex interrupted
-rtsBool interrupted;
+rtsBool interrupted = rtsFalse;
/* Next thread ID to allocate.
- * Locks required: sched_mutex
+ * Locks required: thread_id_mutex
*/
-//@cindex next_thread_id
-StgThreadID next_thread_id = 1;
+static StgThreadID next_thread_id = 1;
/*
* Pointers to the state of the current thread.
/* The smallest stack size that makes any sense is:
* RESERVED_STACK_WORDS (so we can get back from the stack overflow)
* + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
- * + 1 (the realworld token for an IO thread)
* + 1 (the closure to enter)
+ * + 1 (stg_ap_v_ret)
+ * + 1 (spare slot req'd by stg_ap_v_ret)
*
* A thread with this stack will bomb immediately with a stack
* overflow, which will increase its stack size.
*/
-#define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
+#define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)
#if defined(GRAN)
*/
StgTSO dummy_tso;
-rtsBool ready_to_gc;
+static rtsBool ready_to_gc;
+
+/*
+ * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
+ * in an MT setting, needed to signal that a worker thread shouldn't hang around
+ * in the scheduler when it is out of work.
+ */
+static rtsBool shutting_down_scheduler = rtsFalse;
void addToBlockedQueue ( StgTSO *tso );
-static void schedule ( void );
+static void schedule ( StgMainThread *mainThread, Capability *initialCapability );
void interruptStgRts ( void );
-#if defined(GRAN)
-static StgTSO * createThread_ ( nat size, rtsBool have_lock, StgInt pri );
-#else
-static StgTSO * createThread_ ( nat size, rtsBool have_lock );
-#endif
+#if !defined(PAR) && !defined(RTS_SUPPORTS_THREADS)
static void detectBlackHoles ( void );
-
-#ifdef DEBUG
-static void sched_belch(char *s, ...);
#endif
#if defined(RTS_SUPPORTS_THREADS)
Mutex sched_mutex = INIT_MUTEX_VAR;
Mutex term_mutex = INIT_MUTEX_VAR;
-# if defined(SMP)
-static Condition gc_pending_cond = INIT_COND_VAR;
-nat await_death;
-# endif
-
#endif /* RTS_SUPPORTS_THREADS */
#if defined(PAR)
#endif
#if DEBUG
-char *whatNext_strs[] = {
- "ThreadEnterGHC",
+static char *whatNext_strs[] = {
+ "(unknown)",
"ThreadRunGHC",
- "ThreadEnterInterp",
+ "ThreadInterpret",
"ThreadKilled",
+ "ThreadRelocated",
"ThreadComplete"
};
-
-char *threadReturnCode_strs[] = {
- "HeapOverflow", /* might also be StackOverflow */
- "StackOverflow",
- "ThreadYielding",
- "ThreadBlocked",
- "ThreadFinished"
-};
#endif
#if defined(PAR)
StgTSO * activateSpark (rtsSpark spark);
#endif
-/*
- * The thread state for the main thread.
-// ToDo: check whether not needed any more
-StgTSO *MainTSO;
- */
+/* ----------------------------------------------------------------------------
+ * Starting Tasks
+ * ------------------------------------------------------------------------- */
+
+#if defined(RTS_SUPPORTS_THREADS)
+static rtsBool startingWorkerThread = rtsFalse;
-#if defined(PAR) || defined(RTS_SUPPORTS_THREADS)
static void taskStart(void);
static void
taskStart(void)
{
- schedule();
+ ACQUIRE_LOCK(&sched_mutex);
+ startingWorkerThread = rtsFalse;
+ schedule(NULL,NULL);
+ RELEASE_LOCK(&sched_mutex);
}
-#endif
-
-
-
-//@node Main scheduling loop, Suspend and Resume, Prototypes, Main scheduling code
-//@subsection Main scheduling loop
+void
+startSchedulerTaskIfNecessary(void)
+{
+ if(run_queue_hd != END_TSO_QUEUE
+ || blocked_queue_hd != END_TSO_QUEUE
+ || sleeping_queue != END_TSO_QUEUE)
+ {
+ if(!startingWorkerThread)
+ { // we don't want to start another worker thread
+ // just because the last one hasn't yet reached the
+ // "waiting for capability" state
+ startingWorkerThread = rtsTrue;
+ if(!startTask(taskStart))
+ {
+ startingWorkerThread = rtsFalse;
+ }
+ }
+ }
+}
+#endif
/* ---------------------------------------------------------------------------
Main scheduling loop.
This is not the ugliest code you could imagine, but it's bloody close.
------------------------------------------------------------------------ */
-//@cindex schedule
static void
-schedule( void )
+schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
+ Capability *initialCapability )
{
StgTSO *t;
Capability *cap;
# endif
#endif
rtsBool was_interrupted = rtsFalse;
+ nat prev_what_next;
- ACQUIRE_LOCK(&sched_mutex);
-
+ // Pre-condition: sched_mutex is held.
+ // We might have a capability, passed in as initialCapability.
+ cap = initialCapability;
+
#if defined(RTS_SUPPORTS_THREADS)
- waitForWorkCapability(&sched_mutex, &cap, rtsFalse);
+ //
+ // in the threaded case, the capability is either passed in via the
+ // initialCapability parameter, or initialized inside the scheduler
+ // loop
+ //
+ IF_DEBUG(scheduler,
+ sched_belch("### NEW SCHEDULER LOOP (main thr: %p, cap: %p)",
+ mainThread, initialCapability);
+ );
#else
- /* simply initialise it in the non-threaded case */
+ // simply initialise it in the non-threaded case
grabCapability(&cap);
#endif
CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
IF_DEBUG(gran,
- fprintf(stderr, "GRAN: Init CurrentTSO (in schedule) = %p\n", CurrentTSO);
+ debugBelch("GRAN: Init CurrentTSO (in schedule) = %p\n", CurrentTSO);
G_TSO(CurrentTSO, 5));
if (RtsFlags.GranFlags.Light) {
while (!receivedFinish) { /* set by processMessages */
/* when receiving PP_FINISH message */
-#else
- while (1) {
-
-#endif
+#else // everything except GRAN and PAR
- IF_DEBUG(scheduler, printAllThreads());
+ while (1) {
-#if defined(RTS_SUPPORTS_THREADS)
- /* Check to see whether there are any worker threads
- waiting to deposit external call results. If so,
- yield our capability */
- yieldToReturningWorker(&sched_mutex, &cap);
#endif
- /* If we're interrupted (the user pressed ^C, or some other
- * termination condition occurred), kill all the currently running
- * threads.
- */
- if (interrupted) {
- IF_DEBUG(scheduler, sched_belch("interrupted"));
- deleteAllThreads();
- interrupted = rtsFalse;
- was_interrupted = rtsTrue;
- }
+ IF_DEBUG(scheduler, printAllThreads());
- /* Go through the list of main threads and wake up any
- * clients whose computations have finished. ToDo: this
- * should be done more efficiently without a linear scan
- * of the main threads list, somehow...
- */
#if defined(RTS_SUPPORTS_THREADS)
- {
- StgMainThread *m, **prev;
- prev = &main_threads;
- for (m = main_threads; m != NULL; m = m->link) {
- switch (m->tso->what_next) {
- case ThreadComplete:
- if (m->ret) {
- *(m->ret) = (StgClosure *)m->tso->sp[0];
- }
- *prev = m->link;
- m->stat = Success;
- broadcastCondition(&m->wakeup);
-#ifdef DEBUG
- free(m->tso->label);
-#endif
- break;
- case ThreadKilled:
- if (m->ret) *(m->ret) = NULL;
- *prev = m->link;
- if (was_interrupted) {
- m->stat = Interrupted;
- } else {
- m->stat = Killed;
- }
- broadcastCondition(&m->wakeup);
-#ifdef DEBUG
- free(m->tso->label);
-#endif
- break;
- default:
- break;
- }
+ // Yield the capability to higher-priority tasks if necessary.
+ //
+ if (cap != NULL) {
+ yieldCapability(&cap);
}
- }
-
-#else /* not threaded */
-# if defined(PAR)
- /* in GUM do this only on the Main PE */
- if (IAmMainThread)
-# endif
- /* If our main thread has finished or been killed, return.
- */
- {
- StgMainThread *m = main_threads;
- if (m->tso->what_next == ThreadComplete
- || m->tso->what_next == ThreadKilled) {
-#ifdef DEBUG
- free(m->tso->label);
-#endif
- main_threads = main_threads->link;
- if (m->tso->what_next == ThreadComplete) {
- /* we finished successfully, fill in the return value */
- if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
- m->stat = Success;
- return;
- } else {
- if (m->ret) { *(m->ret) = NULL; };
- if (was_interrupted) {
- m->stat = Interrupted;
- } else {
- m->stat = Killed;
- }
- return;
- }
+ // If we do not currently hold a capability, we wait for one
+ //
+ if (cap == NULL) {
+ waitForCapability(&sched_mutex, &cap,
+ mainThread ? &mainThread->bound_thread_cond : NULL);
}
- }
-#endif
-
- /* Top up the run queue from our spark pool. We try to make the
- * number of threads in the run queue equal to the number of
- * free capabilities.
- *
- * Disable spark support in SMP for now, non-essential & requires
- * a little bit of work to make it compile cleanly. -- sof 1/02.
- */
-#if 0 /* defined(SMP) */
- {
- nat n = getFreeCapabilities();
- StgTSO *tso = run_queue_hd;
- /* Count the run queue */
- while (n > 0 && tso != END_TSO_QUEUE) {
- tso = tso->link;
- n--;
- }
+ // We now have a capability...
+#endif
- for (; n > 0; n--) {
- StgClosure *spark;
- spark = findSpark(rtsFalse);
- if (spark == NULL) {
- break; /* no more sparks in the pool */
- } else {
- /* I'd prefer this to be done in activateSpark -- HWL */
- /* tricky - it needs to hold the scheduler lock and
- * not try to re-acquire it -- SDM */
- createSparkThread(spark);
- IF_DEBUG(scheduler,
- sched_belch("==^^ turning spark of closure %p into a thread",
- (StgClosure *)spark));
- }
- }
- /* We need to wake up the other tasks if we just created some
- * work for them.
- */
- if (getFreeCapabilities() - n > 1) {
- signalCondition( &thread_ready_cond );
- }
+ //
+ // If we're interrupted (the user pressed ^C, or some other
+ // termination condition occurred), kill all the currently running
+ // threads.
+ //
+ if (interrupted) {
+ IF_DEBUG(scheduler, sched_belch("interrupted"));
+ interrupted = rtsFalse;
+ was_interrupted = rtsTrue;
+#if defined(RTS_SUPPORTS_THREADS)
+ // In the threaded RTS, deadlock detection doesn't work,
+ // so just exit right away.
+ errorBelch("interrupted");
+ releaseCapability(cap);
+ RELEASE_LOCK(&sched_mutex);
+ shutdownHaskellAndExit(EXIT_SUCCESS);
+#else
+ deleteAllThreads();
+#endif
}
-#endif // SMP
- /* check for signals each time around the scheduler */
-#ifndef mingw32_TARGET_OS
+#if defined(RTS_USER_SIGNALS)
+ // check for signals each time around the scheduler
if (signals_pending()) {
RELEASE_LOCK(&sched_mutex); /* ToDo: kill */
startSignalHandlers();
}
#endif
- /* Check whether any waiting threads need to be woken up. If the
- * run queue is empty, and there are no other tasks running, we
- * can wait indefinitely for something to happen.
- * ToDo: what if another client comes along & requests another
- * main thread?
- */
- if ( !EMPTY_QUEUE(blocked_queue_hd) || !EMPTY_QUEUE(sleeping_queue) ) {
- awaitEvent( EMPTY_RUN_QUEUE()
-#if defined(SMP)
- && allFreeCapabilities()
+ //
+ // Check whether any waiting threads need to be woken up. If the
+ // run queue is empty, and there are no other tasks running, we
+ // can wait indefinitely for something to happen.
+ //
+ if ( !EMPTY_QUEUE(blocked_queue_hd) || !EMPTY_QUEUE(sleeping_queue) )
+ {
+#if defined(RTS_SUPPORTS_THREADS)
+ // We shouldn't be here...
+ barf("schedule: awaitEvent() in threaded RTS");
#endif
- );
+ awaitEvent( EMPTY_RUN_QUEUE() );
}
- /* we can be interrupted while waiting for I/O... */
+ // we can be interrupted while waiting for I/O...
if (interrupted) continue;
/*
* If no threads are black holed, we have a deadlock situation, so
* inform all the main threads.
*/
-#ifndef PAR
- if ( EMPTY_THREAD_QUEUES()
-#if defined(RTS_SUPPORTS_THREADS)
- && EMPTY_QUEUE(suspended_ccalling_threads)
-#endif
-#ifdef SMP
- && allFreeCapabilities()
-#endif
- )
+#if !defined(PAR) && !defined(RTS_SUPPORTS_THREADS)
+ if ( EMPTY_THREAD_QUEUES() )
{
IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
-#if defined(THREADED_RTS)
- /* and SMP mode ..? */
- releaseCapability(cap);
-#endif
+
// Garbage collection can release some new threads due to
// either (a) finalizers or (b) threads resurrected because
- // they are about to be send BlockedOnDeadMVar. Any threads
- // thus released will be immediately runnable.
+ // they are unreachable and will therefore be sent an
+ // exception. Any threads thus released will be immediately
+ // runnable.
GarbageCollect(GetRoots,rtsTrue);
-
- if ( !EMPTY_RUN_QUEUE() ) { goto not_deadlocked; }
-
- IF_DEBUG(scheduler,
- sched_belch("still deadlocked, checking for black holes..."));
- detectBlackHoles();
-
if ( !EMPTY_RUN_QUEUE() ) { goto not_deadlocked; }
-#ifndef mingw32_TARGET_OS
+#if defined(RTS_USER_SIGNALS)
/* If we have user-installed signal handlers, then wait
* for signals to arrive rather then bombing out with a
* deadlock.
*/
-#if defined(RTS_SUPPORTS_THREADS)
- if ( 0 ) { /* hmm..what to do? Simply stop waiting for
- a signal with no runnable threads (or I/O
- suspended ones) leads nowhere quick.
- For now, simply shut down when we reach this
- condition.
-
- ToDo: define precisely under what conditions
- the Scheduler should shut down in an MT setting.
- */
-#else
if ( anyUserHandlers() ) {
-#endif
IF_DEBUG(scheduler,
sched_belch("still deadlocked, waiting for signals..."));
*/
{
StgMainThread *m;
-#if defined(RTS_SUPPORTS_THREADS)
- for (m = main_threads; m != NULL; m = m->link) {
- switch (m->tso->why_blocked) {
- case BlockedOnBlackHole:
- raiseAsync(m->tso, (StgClosure *)NonTermination_closure);
- break;
- case BlockedOnException:
- case BlockedOnMVar:
- raiseAsync(m->tso, (StgClosure *)Deadlock_closure);
- break;
- default:
- barf("deadlock: main thread blocked in a strange way");
- }
- }
-#else
m = main_threads;
switch (m->tso->why_blocked) {
case BlockedOnBlackHole:
- raiseAsync(m->tso, (StgClosure *)NonTermination_closure);
- break;
case BlockedOnException:
case BlockedOnMVar:
- raiseAsync(m->tso, (StgClosure *)Deadlock_closure);
+ raiseAsync(m->tso, (StgClosure *)NonTermination_closure);
break;
default:
barf("deadlock: main thread blocked in a strange way");
}
-#endif
}
-
-#if defined(RTS_SUPPORTS_THREADS)
- /* ToDo: revisit conditions (and mechanism) for shutting
- down a multi-threaded world */
- IF_DEBUG(scheduler, sched_belch("all done, i think...shutting down."));
- shutdownHaskellAndExit(0);
-#endif
}
not_deadlocked:
+#elif defined(RTS_SUPPORTS_THREADS)
+ // ToDo: add deadlock detection in threaded RTS
#elif defined(PAR)
- /* ToDo: add deadlock detection in GUM (similar to SMP) -- HWL */
+ // ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
#endif
-#if defined(SMP)
- /* If there's a GC pending, don't do anything until it has
- * completed.
- */
- if (ready_to_gc) {
- IF_DEBUG(scheduler,sched_belch("waiting for GC"));
- waitCondition( &gc_pending_cond, &sched_mutex );
- }
-#endif
-
#if defined(RTS_SUPPORTS_THREADS)
- /* block until we've got a thread on the run queue and a free
- * capability.
- *
- */
if ( EMPTY_RUN_QUEUE() ) {
- /* Give up our capability */
- releaseCapability(cap);
- IF_DEBUG(scheduler, sched_belch("thread %d: waiting for work", osThreadId()));
- waitForWorkCapability(&sched_mutex, &cap, rtsTrue);
- IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId()));
-#if 0
- while ( EMPTY_RUN_QUEUE() ) {
- waitForWorkCapability(&sched_mutex, &cap);
- IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId()));
- }
-#endif
+ continue; // nothing to do
}
#endif
if (!RtsFlags.GranFlags.Light)
handleIdlePEs();
- IF_DEBUG(gran, fprintf(stderr, "GRAN: switch by event-type\n"));
+ IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
/* main event dispatcher in GranSim */
switch (event->evttype) {
/* Should just be continuing execution */
case ContinueThread:
- IF_DEBUG(gran, fprintf(stderr, "GRAN: doing ContinueThread\n"));
+ IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
/* ToDo: check assertion
ASSERT(run_queue_hd != (StgTSO*)NULL &&
run_queue_hd != END_TSO_QUEUE);
/* Ignore ContinueThreads for fetching threads (if synchr comm) */
if (!RtsFlags.GranFlags.DoAsyncFetch &&
procStatus[CurrentProc]==Fetching) {
- belch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]",
+ debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
CurrentTSO->id, CurrentTSO, CurrentProc);
goto next_thread;
}
/* Ignore ContinueThreads for completed threads */
if (CurrentTSO->what_next == ThreadComplete) {
- belch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)",
+ debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n",
CurrentTSO->id, CurrentTSO, CurrentProc);
goto next_thread;
}
/* Ignore ContinueThreads for threads that are being migrated */
if (PROCS(CurrentTSO)==Nowhere) {
- belch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)",
+ debugBelch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)\n",
CurrentTSO->id, CurrentTSO, CurrentProc);
goto next_thread;
}
/* The thread should be at the beginning of the run queue */
if (CurrentTSO!=run_queue_hds[CurrentProc]) {
- belch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread",
+ debugBelch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread\n",
CurrentTSO->id, CurrentTSO, CurrentProc);
break; // run the thread anyway
}
/* This point was scheduler_loop in the old RTS */
- IF_DEBUG(gran, belch("GRAN: after main switch"));
+ IF_DEBUG(gran, debugBelch("GRAN: after main switch\n"));
TimeOfLastEvent = CurrentTime[CurrentProc];
TimeOfNextEvent = get_time_of_next_event();
IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
// CurrentTSO = ThreadQueueHd;
- IF_DEBUG(gran, belch("GRAN: time of next event is: %ld",
+ IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n",
TimeOfNextEvent));
if (RtsFlags.GranFlags.Light)
EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
IF_DEBUG(gran,
- belch("GRAN: end of time-slice is %#lx", EndOfTimeSlice));
+ debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice));
/* in a GranSim setup the TSO stays on the run queue */
t = CurrentTSO;
/* Take a thread from the run queue. */
- t = POP_RUN_QUEUE(); // take_off_run_queue(t);
+ POP_RUN_QUEUE(t); // take_off_run_queue(t);
IF_DEBUG(gran,
- fprintf(stderr, "GRAN: About to run current thread, which is\n");
+ debugBelch("GRAN: About to run current thread, which is\n");
G_TSO(t,5));
context_switch = 0; // turned on via GranYield, checking events and time slice
if (spark != (rtsSpark) NULL) {
tso = activateSpark(spark); /* turn the spark into a thread */
IF_PAR_DEBUG(schedule,
- belch("==== schedule: Created TSO %d (%p); %d threads active",
+ debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
tso->id, tso, advisory_thread_count));
if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
- belch("==^^ failed to activate spark");
+ debugBelch("==^^ failed to activate spark\n");
goto next_thread;
} /* otherwise fall through & pick-up new tso */
} else {
IF_PAR_DEBUG(verbose,
- belch("==^^ no local sparks (spark pool contains only NFs: %d)",
+ debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n",
spark_queue_len(pool)));
goto next_thread;
}
*/
TIME now = msTime() /*CURRENT_TIME*/;
IF_PAR_DEBUG(verbose,
- belch("-- now=%ld", now));
+ debugBelch("-- now=%ld\n", now));
IF_PAR_DEBUG(verbose,
if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
(last_fish_arrived_at!=0 &&
last_fish_arrived_at+RtsFlags.ParFlags.fishDelay > now)) {
- belch("--$$ delaying FISH until %ld (last fish %ld, delay %ld, now %ld)",
+ debugBelch("--$$ delaying FISH until %ld (last fish %ld, delay %ld, now %ld)\n",
last_fish_arrived_at+RtsFlags.ParFlags.fishDelay,
last_fish_arrived_at,
RtsFlags.ParFlags.fishDelay, now);
ASSERT(run_queue_hd != END_TSO_QUEUE);
/* Take a thread from the run queue, if we have work */
- t = POP_RUN_QUEUE(); // take_off_run_queue(END_TSO_QUEUE);
+ POP_RUN_QUEUE(t); // take_off_run_queue(END_TSO_QUEUE);
IF_DEBUG(sanity,checkTSO(t));
/* ToDo: write something to the log-file
pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
IF_DEBUG(scheduler,
- belch("--=^ %d threads, %d sparks on [%#x]",
+ debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
# if 1
# endif
#else /* !GRAN && !PAR */
- /* grab a thread from the run queue */
+ // grab a thread from the run queue
ASSERT(run_queue_hd != END_TSO_QUEUE);
- t = POP_RUN_QUEUE();
+ POP_RUN_QUEUE(t);
+
// Sanity check the thread we're about to run. This can be
// expensive if there is lots of thread switching going on...
IF_DEBUG(sanity,checkTSO(t));
#endif
-
+
+#ifdef THREADED_RTS
+ {
+ StgMainThread *m = t->main;
+
+ if(m)
+ {
+ if(m == mainThread)
+ {
+ IF_DEBUG(scheduler,
+ sched_belch("### Running thread %d in bound thread", t->id));
+ // yes, the Haskell thread is bound to the current native thread
+ }
+ else
+ {
+ IF_DEBUG(scheduler,
+ sched_belch("### thread %d bound to another OS thread", t->id));
+ // no, bound to a different Haskell thread: pass to that thread
+ PUSH_ON_RUN_QUEUE(t);
+ passCapability(&m->bound_thread_cond);
+ continue;
+ }
+ }
+ else
+ {
+ if(mainThread != NULL)
+ // The thread we want to run is bound.
+ {
+ IF_DEBUG(scheduler,
+ sched_belch("### this OS thread cannot run thread %d", t->id));
+ // no, the current native thread is bound to a different
+ // Haskell thread, so pass it to any worker thread
+ PUSH_ON_RUN_QUEUE(t);
+ passCapabilityToWorker();
+ continue;
+ }
+ }
+ }
+#endif
+
cap->r.rCurrentTSO = t;
/* context switches are now initiated by the timer signal, unless
* the user specified "context switch as often as possible", with
* +RTS -C0
*/
- if (
-#ifdef PROFILING
- RtsFlags.ProfFlags.profileInterval == 0 ||
-#endif
- (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
+ if ((RtsFlags.ConcFlags.ctxtSwitchTicks == 0
&& (run_queue_hd != END_TSO_QUEUE
|| blocked_queue_hd != END_TSO_QUEUE
|| sleeping_queue != END_TSO_QUEUE)))
context_switch = 1;
- else
- context_switch = 0;
+
+run_thread:
RELEASE_LOCK(&sched_mutex);
- IF_DEBUG(scheduler, sched_belch("-->> Running TSO %ld (%p) %s ...",
- t->id, t, whatNext_strs[t->what_next]));
+ IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...",
+ (long)t->id, whatNext_strs[t->what_next]));
#ifdef PROFILING
startHeapProfTimer();
/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Run the current thread
*/
- switch (cap->r.rCurrentTSO->what_next) {
+ prev_what_next = t->what_next;
+
+ errno = t->saved_errno;
+
+ switch (prev_what_next) {
+
case ThreadKilled:
case ThreadComplete:
/* Thread already finished, return to scheduler. */
ret = ThreadFinished;
break;
- case ThreadEnterGHC:
- ret = StgRun((StgFunPtr) stg_enterStackTop, &cap->r);
- break;
+
case ThreadRunGHC:
ret = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
break;
- case ThreadEnterInterp:
+
+ case ThreadInterpret:
ret = interpretBCO(cap);
break;
+
default:
barf("schedule: invalid what_next field");
}
+
+ // The TSO might have moved, so find the new location:
+ t = cap->r.rCurrentTSO;
+
+ // And save the current errno in this thread.
+ t->saved_errno = errno;
+
/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
/* Costs for the scheduler are assigned to CCS_SYSTEM */
#endif
ACQUIRE_LOCK(&sched_mutex);
-
-#ifdef SMP
- IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", osThreadId()););
+
+#ifdef RTS_SUPPORTS_THREADS
+ IF_DEBUG(scheduler,debugBelch("sched (task %p): ", osThreadId()););
#elif !defined(GRAN) && !defined(PAR)
- IF_DEBUG(scheduler,fprintf(stderr,"scheduler: "););
+ IF_DEBUG(scheduler,debugBelch("sched: "););
#endif
- t = cap->r.rCurrentTSO;
#if defined(PAR)
/* HACK 675: if the last thread didn't yield, make sure to print a
#endif
// did the task ask for a large block?
- if (cap->r.rHpAlloc > BLOCK_SIZE_W) {
+ if (cap->r.rHpAlloc > BLOCK_SIZE) {
// if so, get one and push it on the front of the nursery.
bdescr *bd;
nat blocks;
- blocks = (nat)BLOCK_ROUND_UP(cap->r.rHpAlloc * sizeof(W_)) / BLOCK_SIZE;
+ blocks = (nat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
- IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped: requesting a large block (size %d)",
- t->id, t,
- whatNext_strs[t->what_next], blocks));
+ IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %d)\n",
+ (long)t->id, whatNext_strs[t->what_next], blocks));
// don't do this if it would push us over the
// alloc_blocks_lim limit; we'll GC first.
}
cap->r.rCurrentNursery->u.back = bd;
- // initialise it as a nursery block
- bd->step = g0s0;
- bd->gen_no = 0;
- bd->flags = 0;
- bd->free = bd->start;
+ // initialise it as a nursery block. We initialise the
+ // step, gen_no, and flags field of *every* sub-block in
+ // this large block, because this is easier than making
+ // sure that we always find the block head of a large
+ // block whenever we call Bdescr() (eg. evacuate() and
+ // isAlive() in the GC would both have to do this, at
+ // least).
+ {
+ bdescr *x;
+ for (x = bd; x < bd + blocks; x++) {
+ x->step = g0s0;
+ x->gen_no = 0;
+ x->flags = 0;
+ }
+ }
// don't forget to update the block count in g0s0.
g0s0->n_blocks += blocks;
+ // This assert can be a killer if the app is doing lots
+ // of large block allocations.
ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks);
// now update the nursery to point to the new block
* maybe set context_switch and wait till they all pile in,
* then have them wait on a GC condition variable.
*/
- IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped: HeapOverflow",
- t->id, t, whatNext_strs[t->what_next]));
+ IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]));
threadPaused(t);
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
// DumpGranEvent(GR_DESCHEDULE, t);
globalParStats.tot_stackover++;
#endif
- IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped, StackOverflow",
- t->id, t, whatNext_strs[t->what_next]));
+ IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]));
/* just adjust the stack for this thread, then pop it back
* on the run queue.
*/
threadPaused(t);
{
- StgMainThread *m;
/* enlarge the stack */
StgTSO *new_t = threadStackOverflow(t);
* main thread stack. It better not be on any other queues...
* (it shouldn't be).
*/
- for (m = main_threads; m != NULL; m = m->link) {
- if (m->tso == t) {
- m->tso = new_t;
- }
+ if (t->main != NULL) {
+ t->main->tso = new_t;
}
- threadPaused(new_t);
PUSH_ON_RUN_QUEUE(new_t);
}
break;
case ThreadYielding:
+ // Reset the context switch flag. We don't do this just before
+ // running the thread, because that would mean we would lose ticks
+ // during GC, which can lead to unfair scheduling (a thread hogs
+ // the CPU because the tick always arrives during GC). This way
+ // penalises threads that do a lot of allocation, but that seems
+ // better than the alternative.
+ context_switch = 0;
+
#if defined(GRAN)
IF_DEBUG(gran,
DumpGranEvent(GR_DESCHEDULE, t));
* GC is finished.
*/
IF_DEBUG(scheduler,
- if (t->what_next == ThreadEnterInterp) {
- /* ToDo: or maybe a timer expired when we were in Hugs?
- * or maybe someone hit ctrl-C
- */
- belch("--<< thread %ld (%p; %s) stopped to switch to Hugs",
- t->id, t, whatNext_strs[t->what_next]);
+ if (t->what_next != prev_what_next) {
+ debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n",
+ (long)t->id, whatNext_strs[t->what_next]);
} else {
- belch("--<< thread %ld (%p; %s) stopped, yielding",
- t->id, t, whatNext_strs[t->what_next]);
+ debugBelch("--<< thread %ld (%s) stopped, yielding\n",
+ (long)t->id, whatNext_strs[t->what_next]);
}
);
- threadPaused(t);
-
IF_DEBUG(sanity,
- //belch("&& Doing sanity check on yielding TSO %ld.", t->id);
+ //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
ASSERT(t->link == END_TSO_QUEUE);
+
+ // Shortcut if we're just switching evaluators: don't bother
+ // doing stack squeezing (which can be expensive), just run the
+ // thread.
+ if (t->what_next != prev_what_next) {
+ goto run_thread;
+ }
+
+ threadPaused(t);
+
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
IF_DEBUG(sanity,
- //belch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
+ //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
checkThreadQsSanity(rtsTrue));
#endif
+
#if defined(PAR)
if (RtsFlags.ParFlags.doFairScheduling) {
/* this does round-robin scheduling; good for concurrency */
PUSH_ON_RUN_QUEUE(t);
}
#else
- /* this does round-robin scheduling; good for concurrency */
+ // this does round-robin scheduling; good for concurrency
APPEND_TO_RUN_QUEUE(t);
#endif
+
#if defined(GRAN)
/* add a ContinueThread event to actually process the thread */
new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
ContinueThread,
t, (StgClosure*)NULL, (rtsSpark*)NULL);
IF_GRAN_DEBUG(bq,
- belch("GRAN: eventq and runnableq after adding yielded thread to queue again:");
+ debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
G_EVENTQ(0);
G_CURR_THREADQ(0));
#endif /* GRAN */
break;
-
+
case ThreadBlocked:
#if defined(GRAN)
IF_DEBUG(scheduler,
- belch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
+ debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n",
t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
*/
#elif defined(PAR)
IF_DEBUG(scheduler,
- belch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: ",
+ debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n",
t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
IF_PAR_DEBUG(bq,
* case it'll be on the relevant queue already.
*/
IF_DEBUG(scheduler,
- fprintf(stderr, "--<< thread %d (%p) stopped: ", t->id, t);
+ debugBelch("--<< thread %d (%s) stopped: ",
+ t->id, whatNext_strs[t->what_next]);
printThreadBlockage(t);
- fprintf(stderr, "\n"));
+ debugBelch("\n"));
/* Only for dumping event to log file
ToDo: do I need this in GranSim, too?
#endif
threadPaused(t);
break;
-
+
case ThreadFinished:
/* Need to check whether this was a main thread, and if so, signal
* the task that started it with the return value. If we have no
/* We also end up here if the thread kills itself with an
* uncaught exception, see Exception.hc.
*/
- IF_DEBUG(scheduler,belch("--++ thread %d (%p) finished", t->id, t));
+ IF_DEBUG(scheduler,debugBelch("--++ thread %d (%s) finished\n",
+ t->id, whatNext_strs[t->what_next]));
#if defined(GRAN)
endThread(t, CurrentProc); // clean-up the thread
#elif defined(PAR)
!RtsFlags.ParFlags.ParStats.Suppressed)
DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
#endif
+
+ //
+ // Check whether the thread that just completed was a main
+ // thread, and if so return with the result.
+ //
+ // There is an assumption here that all thread completion goes
+ // through this point; we need to make sure that if a thread
+ // ends up in the ThreadKilled state, that it stays on the run
+ // queue so it can be dealt with here.
+ //
+ if (
+#if defined(RTS_SUPPORTS_THREADS)
+ mainThread != NULL
+#else
+ mainThread->tso == t
+#endif
+ )
+ {
+ // We are a bound thread: this must be our thread that just
+ // completed.
+ ASSERT(mainThread->tso == t);
+
+ if (t->what_next == ThreadComplete) {
+ if (mainThread->ret) {
+ // NOTE: return val is tso->sp[1] (see StgStartup.hc)
+ *(mainThread->ret) = (StgClosure *)mainThread->tso->sp[1];
+ }
+ mainThread->stat = Success;
+ } else {
+ if (mainThread->ret) {
+ *(mainThread->ret) = NULL;
+ }
+ if (was_interrupted) {
+ mainThread->stat = Interrupted;
+ } else {
+ mainThread->stat = Killed;
+ }
+ }
+#ifdef DEBUG
+ removeThreadLabel((StgWord)mainThread->tso->id);
+#endif
+ if (mainThread->prev == NULL) {
+ main_threads = mainThread->link;
+ } else {
+ mainThread->prev->link = mainThread->link;
+ }
+ if (mainThread->link != NULL) {
+ mainThread->link->prev = NULL;
+ }
+ releaseCapability(cap);
+ return;
+ }
+
+#ifdef RTS_SUPPORTS_THREADS
+ ASSERT(t->main == NULL);
+#else
+ if (t->main != NULL) {
+ // Must be a main thread that is not the topmost one. Leave
+ // it on the run queue until the stack has unwound to the
+ // point where we can deal with this. Leaving it on the run
+ // queue also ensures that the garbage collector knows about
+ // this thread and its return value (it gets dropped from the
+ // all_threads list so there's no other way to find it).
+ APPEND_TO_RUN_QUEUE(t);
+ }
+#endif
break;
-
+
default:
barf("schedule: invalid thread return code %d", (int)ret);
}
#ifdef PROFILING
- if (RtsFlags.ProfFlags.profileInterval==0 || performHeapProfile) {
+ // When we have +RTS -i0 and we're heap profiling, do a census at
+ // every GC. This lets us get repeatable runs for debugging.
+ if (performHeapProfile ||
+ (RtsFlags.ProfFlags.profileInterval==0 &&
+ RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
GarbageCollect(GetRoots, rtsTrue);
heapCensus();
performHeapProfile = rtsFalse;
}
#endif
- if (ready_to_gc
-#ifdef SMP
- && allFreeCapabilities()
-#endif
- ) {
+ if (ready_to_gc) {
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
* to do it in another thread. Either way, we need to
#endif
GarbageCollect(GetRoots,rtsFalse);
ready_to_gc = rtsFalse;
-#ifdef SMP
- broadcastCondition(&gc_pending_cond);
-#endif
#if defined(GRAN)
/* add a ContinueThread event to continue execution of current thread */
new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
ContinueThread,
t, (StgClosure*)NULL, (rtsSpark*)NULL);
IF_GRAN_DEBUG(bq,
- fprintf(stderr, "GRAN: eventq and runnableq after Garbage collection:\n");
+ debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
G_EVENTQ(0);
G_CURR_THREADQ(0));
#endif /* GRAN */
} /* end of while(1) */
IF_PAR_DEBUG(verbose,
- belch("== Leaving schedule() after having received Finish"));
+ debugBelch("== Leaving schedule() after having received Finish\n"));
}
/* ---------------------------------------------------------------------------
- * Singleton fork(). Do not copy any running threads.
+ * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
+ * used by Control.Concurrent for error checking.
* ------------------------------------------------------------------------- */
+
+StgBool
+rtsSupportsBoundThreads(void)
+{
+#ifdef THREADED_RTS
+ return rtsTrue;
+#else
+ return rtsFalse;
+#endif
+}
+
+/* ---------------------------------------------------------------------------
+ * isThreadBound(tso): check whether tso is bound to an OS thread.
+ * ------------------------------------------------------------------------- */
+
+StgBool
+isThreadBound(StgTSO* tso USED_IN_THREADED_RTS)
+{
+#ifdef THREADED_RTS
+ return (tso->main != NULL);
+#endif
+ return rtsFalse;
+}
-StgInt forkProcess(StgTSO* tso) {
+/* ---------------------------------------------------------------------------
+ * Singleton fork(). Do not copy any running threads.
+ * ------------------------------------------------------------------------- */
#ifndef mingw32_TARGET_OS
+#define FORKPROCESS_PRIMOP_SUPPORTED
+#endif
+
+#ifdef FORKPROCESS_PRIMOP_SUPPORTED
+static void
+deleteThreadImmediately(StgTSO *tso);
+#endif
+StgInt
+forkProcess(HsStablePtr *entry
+#ifndef FORKPROCESS_PRIMOP_SUPPORTED
+ STG_UNUSED
+#endif
+ )
+{
+#ifdef FORKPROCESS_PRIMOP_SUPPORTED
pid_t pid;
StgTSO* t,*next;
+ StgMainThread *m;
+ SchedulerStatus rc;
IF_DEBUG(scheduler,sched_belch("forking!"));
+ rts_lock(); // This not only acquires sched_mutex, it also
+ // makes sure that no other threads are running
pid = fork();
+
if (pid) { /* parent */
/* just return the pid */
+ rts_unlock();
+ return pid;
} else { /* child */
- /* wipe all other threads */
- run_queue_hd = tso;
- tso->link = END_TSO_QUEUE;
-
- /* DO NOT TOUCH THE QUEUES directly because most of the code around
- us is picky about finding the threat still in its queue when
- handling the deleteThread() */
+
+
+ // delete all threads
+ run_queue_hd = run_queue_tl = END_TSO_QUEUE;
+
+ for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->link;
- for (t = all_threads; t != END_TSO_QUEUE; t = next) {
- next = t->link;
- if (t->id != tso->id) {
- deleteThread(t);
+ // don't allow threads to catch the ThreadKilled exception
+ deleteThreadImmediately(t);
}
+
+ // wipe the main thread list
+ while((m = main_threads) != NULL) {
+ main_threads = m->link;
+# ifdef THREADED_RTS
+ closeCondition(&m->bound_thread_cond);
+# endif
+ stgFree(m);
+ }
+
+ rc = rts_evalStableIO(entry, NULL); // run the action
+ rts_checkSchedStatus("forkProcess",rc);
+
+ rts_unlock();
+
+ hs_exit(); // clean up and exit
+ stg_exit(0);
}
- }
- return pid;
-#else /* mingw32 */
- barf("forkProcess#: primop not implemented for mingw32, sorry!");
+#else /* !FORKPROCESS_PRIMOP_SUPPORTED */
+ barf("forkProcess#: primop not supported, sorry!\n");
return -1;
-#endif /* mingw32 */
+#endif
}
/* ---------------------------------------------------------------------------
* Locks: sched_mutex held.
* ------------------------------------------------------------------------- */
-void deleteAllThreads ( void )
+void
+deleteAllThreads ( void )
{
StgTSO* t, *next;
IF_DEBUG(scheduler,sched_belch("deleting all threads"));
next = t->global_link;
deleteThread(t);
}
- run_queue_hd = run_queue_tl = END_TSO_QUEUE;
- blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
- sleeping_queue = END_TSO_QUEUE;
+
+ // The run queue now contains a bunch of ThreadKilled threads. We
+ // must not throw these away: the main thread(s) will be in there
+ // somewhere, and the main scheduler loop has to deal with it.
+ // Also, the run queue is the only thing keeping these threads from
+ // being GC'd, and we don't want the "main thread has been GC'd" panic.
+
+ ASSERT(blocked_queue_hd == END_TSO_QUEUE);
+ ASSERT(sleeping_queue == END_TSO_QUEUE);
}
/* startThread and insertThread are now in GranSim.c -- HWL */
-//@node Suspend and Resume, Run queue code, Main scheduling loop, Main scheduling code
-//@subsection Suspend and Resume
-
/* ---------------------------------------------------------------------------
* Suspending & resuming Haskell threads.
*
* ------------------------------------------------------------------------- */
StgInt
-suspendThread( StgRegTable *reg,
- rtsBool concCall
-#if !defined(RTS_SUPPORTS_THREADS) && !defined(DEBUG)
- STG_UNUSED
-#endif
- )
+suspendThread( StgRegTable *reg )
{
nat tok;
Capability *cap;
+ int saved_errno = errno;
/* assume that *reg is a pointer to the StgRegTable part
* of a Capability.
*/
- cap = (Capability *)((void *)reg - sizeof(StgFunTable));
+ cap = (Capability *)((void *)((unsigned char*)reg - sizeof(StgFunTable)));
ACQUIRE_LOCK(&sched_mutex);
IF_DEBUG(scheduler,
- sched_belch("thread %d did a _ccall_gc (is_concurrent: %d)", cap->r.rCurrentTSO->id,concCall));
+ sched_belch("thread %d did a _ccall_gc", cap->r.rCurrentTSO->id));
+
+ // XXX this might not be necessary --SDM
+ cap->r.rCurrentTSO->what_next = ThreadRunGHC;
threadPaused(cap->r.rCurrentTSO);
cap->r.rCurrentTSO->link = suspended_ccalling_threads;
suspended_ccalling_threads = cap->r.rCurrentTSO;
-#if defined(RTS_SUPPORTS_THREADS)
- cap->r.rCurrentTSO->why_blocked = BlockedOnCCall;
-#endif
+ if(cap->r.rCurrentTSO->blocked_exceptions == NULL) {
+ cap->r.rCurrentTSO->why_blocked = BlockedOnCCall;
+ cap->r.rCurrentTSO->blocked_exceptions = END_TSO_QUEUE;
+ } else {
+ cap->r.rCurrentTSO->why_blocked = BlockedOnCCall_NoUnblockExc;
+ }
/* Use the thread ID as the token; it should be unique */
tok = cap->r.rCurrentTSO->id;
#if defined(RTS_SUPPORTS_THREADS)
/* Preparing to leave the RTS, so ensure there's a native thread/task
waiting to take over.
-
- ToDo: optimise this and only create a new task if there's a need
- for one (i.e., if there's only one Concurrent Haskell thread alive,
- there's no need to create a new task).
*/
- IF_DEBUG(scheduler, sched_belch("worker thread (%d): leaving RTS", tok));
- if (concCall) {
- startTask(taskStart);
- }
+ IF_DEBUG(scheduler, sched_belch("worker (token %d): leaving RTS", tok));
#endif
- /* Other threads _might_ be available for execution; signal this */
- THREAD_RUNNABLE();
RELEASE_LOCK(&sched_mutex);
+
+ errno = saved_errno;
return tok;
}
StgRegTable *
-resumeThread( StgInt tok,
- rtsBool concCall
-#if !defined(RTS_SUPPORTS_THREADS)
- STG_UNUSED
-#endif
- )
+resumeThread( StgInt tok )
{
StgTSO *tso, **prev;
Capability *cap;
+ int saved_errno = errno;
#if defined(RTS_SUPPORTS_THREADS)
/* Wait for permission to re-enter the RTS with the result. */
- if ( concCall ) {
- ACQUIRE_LOCK(&sched_mutex);
- grabReturnCapability(&sched_mutex, &cap);
- } else {
- grabCapability(&cap);
- }
+ ACQUIRE_LOCK(&sched_mutex);
+ waitForReturnCapability(&sched_mutex, &cap);
+
+ IF_DEBUG(scheduler, sched_belch("worker (token %d): re-entering RTS", tok));
#else
grabCapability(&cap);
#endif
barf("resumeThread: thread not found");
}
tso->link = END_TSO_QUEUE;
+
+ if(tso->why_blocked == BlockedOnCCall) {
+ awakenBlockedQueueNoLock(tso->blocked_exceptions);
+ tso->blocked_exceptions = NULL;
+ }
+
/* Reset blocking status */
tso->why_blocked = NotBlocked;
cap->r.rCurrentTSO = tso;
RELEASE_LOCK(&sched_mutex);
+ errno = saved_errno;
return &cap->r;
}
* instances of Eq/Ord for ThreadIds.
* ------------------------------------------------------------------------ */
-int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
+int
+cmp_thread(StgPtr tso1, StgPtr tso2)
{
- StgThreadID id1 = tso1->id;
- StgThreadID id2 = tso2->id;
+ StgThreadID id1 = ((StgTSO *)tso1)->id;
+ StgThreadID id2 = ((StgTSO *)tso2)->id;
if (id1 < id2) return (-1);
if (id1 > id2) return 1;
*
* This is used in the implementation of Show for ThreadIds.
* ------------------------------------------------------------------------ */
-int rts_getThreadId(const StgTSO *tso)
+int
+rts_getThreadId(StgPtr tso)
{
- return tso->id;
+ return ((StgTSO *)tso)->id;
}
#ifdef DEBUG
-void labelThread(StgTSO *tso, char *label)
+void
+labelThread(StgPtr tso, char *label)
{
int len;
void *buf;
/* Caveat: Once set, you can only set the thread name to "" */
len = strlen(label)+1;
- buf = realloc(tso->label,len);
- if (buf == NULL) {
- fprintf(stderr,"insufficient memory for labelThread!\n");
- free(tso->label);
- } else
- strncpy(buf,label,len);
- tso->label = buf;
+ buf = stgMallocBytes(len * sizeof(char), "Schedule.c:labelThread()");
+ strncpy(buf,label,len);
+ /* Update will free the old memory for us */
+ updateThreadLabel(((StgTSO *)tso)->id,buf);
}
#endif /* DEBUG */
currently pri (priority) is only used in a GRAN setup -- HWL
------------------------------------------------------------------------ */
-//@cindex createThread
#if defined(GRAN)
/* currently pri (priority) is only used in a GRAN setup -- HWL */
StgTSO *
-createThread(nat stack_size, StgInt pri)
-{
- return createThread_(stack_size, rtsFalse, pri);
-}
-
-static StgTSO *
-createThread_(nat size, rtsBool have_lock, StgInt pri)
-{
+createThread(nat size, StgInt pri)
#else
StgTSO *
-createThread(nat stack_size)
-{
- return createThread_(stack_size, rtsFalse);
-}
-
-static StgTSO *
-createThread_(nat size, rtsBool have_lock)
-{
+createThread(nat size)
#endif
+{
StgTSO *tso;
nat stack_size;
/* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
threadsIgnored++;
- belch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
+ debugBelch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)\n",
RtsFlags.ParFlags.maxThreads, advisory_thread_count);
return END_TSO_QUEUE;
}
#if defined(GRAN)
SET_GRAN_HDR(tso, ThisPE);
#endif
- tso->what_next = ThreadEnterGHC;
-#ifdef DEBUG
- tso->label = NULL;
-#endif
+ // Always start with the compiled code evaluator
+ tso->what_next = ThreadRunGHC;
- /* tso->id needs to be unique. For now we use a heavyweight mutex to
- * protect the increment operation on next_thread_id.
- * In future, we could use an atomic increment instead.
- */
-#ifdef SMP
- if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
-#endif
tso->id = next_thread_id++;
-#ifdef SMP
- if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
-#endif
-
tso->why_blocked = NotBlocked;
tso->blocked_exceptions = NULL;
+ tso->saved_errno = 0;
+ tso->main = NULL;
+
tso->stack_size = stack_size;
tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
- TSO_STRUCT_SIZEW;
/* put a stop frame on the stack */
tso->sp -= sizeofW(StgStopFrame);
SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
- tso->su = (StgUpdateFrame*)tso->sp;
+ tso->link = END_TSO_QUEUE;
// ToDo: check this
#if defined(GRAN)
- tso->link = END_TSO_QUEUE;
/* uses more flexible routine in GranSim */
insertThread(tso, CurrentProc);
#else
// collect parallel global statistics (currently done together with GC stats)
if (RtsFlags.ParFlags.ParStats.Global &&
RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
- //fprintf(stderr, "Creating thread %d @ %11.2f\n", tso->id, usertime());
+ //debugBelch("Creating thread %d @ %11.2f\n", tso->id, usertime());
globalParStats.tot_threads_created++;
}
#endif
#if defined(GRAN)
IF_GRAN_DEBUG(pri,
- belch("==__ schedule: Created TSO %d (%p);",
+ sched_belch("==__ schedule: Created TSO %d (%p);",
CurrentProc, tso, tso->id));
#elif defined(PAR)
IF_PAR_DEBUG(verbose,
- belch("==__ schedule: Created TSO %d (%p); %d threads active",
- tso->id, tso, advisory_thread_count));
+ sched_belch("==__ schedule: Created TSO %d (%p); %d threads active",
+ (long)tso->id, tso, advisory_thread_count));
#else
IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
- tso->id, tso->stack_size));
+ (long)tso->id, (long)tso->stack_size));
#endif
return tso;
}
}
else
{ threadsCreated++;
- tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
+ tso = createThread(RtsFlags.GcFlags.initialStkSize);
if (tso==END_TSO_QUEUE)
barf("createSparkThread: Cannot create TSO");
#if defined(DIST)
ToDo: fix for SMP (needs to acquire SCHED_MUTEX!)
*/
#if defined(PAR)
-//@cindex activateSpark
StgTSO *
activateSpark (rtsSpark spark)
{
if (RtsFlags.ParFlags.ParStats.Full) {
//ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ...
IF_PAR_DEBUG(verbose,
- belch("==^^ activateSpark: turning spark of closure %p (%s) into a thread",
+ debugBelch("==^^ activateSpark: turning spark of closure %p (%s) into a thread\n",
(StgClosure *)spark, info_type((StgClosure *)spark)));
}
// ToDo: fwd info on local/global spark to thread -- HWL
}
#endif
+static SchedulerStatus waitThread_(/*out*/StgMainThread* m,
+ Capability *initialCapability
+ );
+
+
/* ---------------------------------------------------------------------------
* scheduleThread()
*
* on this thread's stack before the scheduler is invoked.
* ------------------------------------------------------------------------ */
-static void scheduleThread_ (StgTSO* tso, rtsBool createTask);
+static void scheduleThread_ (StgTSO* tso);
void
-scheduleThread_(StgTSO *tso
- , rtsBool createTask
-#if !defined(THREADED_RTS)
- STG_UNUSED
-#endif
- )
+scheduleThread_(StgTSO *tso)
+{
+ // The thread goes at the *end* of the run-queue, to avoid possible
+ // starvation of any threads already on the queue.
+ APPEND_TO_RUN_QUEUE(tso);
+ threadRunnable();
+}
+
+void
+scheduleThread(StgTSO* tso)
{
ACQUIRE_LOCK(&sched_mutex);
+ scheduleThread_(tso);
+ RELEASE_LOCK(&sched_mutex);
+}
- /* Put the new thread on the head of the runnable queue. The caller
- * better push an appropriate closure on this thread's stack
- * beforehand. In the SMP case, the thread may start running as
- * soon as we release the scheduler lock below.
- */
- PUSH_ON_RUN_QUEUE(tso);
-#if defined(THREADED_RTS)
- /* If main() is scheduling a thread, don't bother creating a
- * new task.
- */
- if ( createTask ) {
- startTask(taskStart);
- }
+#if defined(RTS_SUPPORTS_THREADS)
+static Condition bound_cond_cache;
+static int bound_cond_cache_full = 0;
#endif
- THREAD_RUNNABLE();
-#if 0
- IF_DEBUG(scheduler,printTSO(tso));
+
+SchedulerStatus
+scheduleWaitThread(StgTSO* tso, /*[out]*/HaskellObj* ret,
+ Capability *initialCapability)
+{
+ // Precondition: sched_mutex must be held
+ StgMainThread *m;
+
+ m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
+ m->tso = tso;
+ tso->main = m;
+ m->ret = ret;
+ m->stat = NoStatus;
+ m->link = main_threads;
+ m->prev = NULL;
+ if (main_threads != NULL) {
+ main_threads->prev = m;
+ }
+ main_threads = m;
+
+#if defined(RTS_SUPPORTS_THREADS)
+ // Allocating a new condition for each thread is expensive, so we
+ // cache one. This is a pretty feeble hack, but it helps speed up
+ // consecutive call-ins quite a bit.
+ if (bound_cond_cache_full) {
+ m->bound_thread_cond = bound_cond_cache;
+ bound_cond_cache_full = 0;
+ } else {
+ initCondition(&m->bound_thread_cond);
+ }
#endif
- RELEASE_LOCK(&sched_mutex);
-}
-void scheduleThread(StgTSO* tso)
-{
- return scheduleThread_(tso, rtsFalse);
-}
+ /* Put the thread on the main-threads list prior to scheduling the TSO.
+ Failure to do so introduces a race condition in the MT case (as
+ identified by Wolfgang Thaller), whereby the new task/OS thread
+ created by scheduleThread_() would complete prior to the thread
+ that spawned it managed to put 'itself' on the main-threads list.
+ The upshot of it all being that the worker thread wouldn't get to
+ signal the completion of the its work item for the main thread to
+ see (==> it got stuck waiting.) -- sof 6/02.
+ */
+ IF_DEBUG(scheduler, sched_belch("waiting for thread (%d)", tso->id));
+
+ APPEND_TO_RUN_QUEUE(tso);
+ // NB. Don't call threadRunnable() here, because the thread is
+ // bound and only runnable by *this* OS thread, so waking up other
+ // workers will just slow things down.
-void scheduleExtThread(StgTSO* tso)
-{
- return scheduleThread_(tso, rtsTrue);
+ return waitThread_(m, initialCapability);
}
/* ---------------------------------------------------------------------------
*
* ------------------------------------------------------------------------ */
-#ifdef SMP
-static void
-term_handler(int sig STG_UNUSED)
-{
- stat_workerStop();
- ACQUIRE_LOCK(&term_mutex);
- await_death--;
- RELEASE_LOCK(&term_mutex);
- shutdownThread();
-}
-#endif
-
void
initScheduler(void)
{
* the scheduler. */
initMutex(&sched_mutex);
initMutex(&term_mutex);
-
- initCondition(&thread_ready_cond);
#endif
-#if defined(SMP)
- initCondition(&gc_pending_cond);
-#endif
-
-#if defined(RTS_SUPPORTS_THREADS)
ACQUIRE_LOCK(&sched_mutex);
-#endif
-
- /* Install the SIGHUP handler */
-#if defined(SMP)
- {
- struct sigaction action,oact;
-
- action.sa_handler = term_handler;
- sigemptyset(&action.sa_mask);
- action.sa_flags = 0;
- if (sigaction(SIGTERM, &action, &oact) != 0) {
- barf("can't install TERM handler");
- }
- }
-#endif
/* A capability holds the state a native thread needs in
* order to execute STG code. At least one capability is
#if defined(RTS_SUPPORTS_THREADS)
/* start our haskell execution tasks */
-# if defined(SMP)
- startTaskManager(RtsFlags.ParFlags.nNodes, taskStart);
-# else
startTaskManager(0,taskStart);
-# endif
#endif
#if /* defined(SMP) ||*/ defined(PAR)
initSparkPools();
#endif
-#if defined(RTS_SUPPORTS_THREADS)
RELEASE_LOCK(&sched_mutex);
-#endif
-
}
void
#if defined(RTS_SUPPORTS_THREADS)
stopTaskManager();
#endif
+ shutting_down_scheduler = rtsTrue;
}
-/* -----------------------------------------------------------------------------
+/* ----------------------------------------------------------------------------
Managing the per-task allocation areas.
Each capability comes with an allocation area. These are
fixed-length block lists into which allocation can be done.
ToDo: no support for two-space collection at the moment???
- -------------------------------------------------------------------------- */
-
-/* -----------------------------------------------------------------------------
- * waitThread is the external interface for running a new computation
- * and waiting for the result.
- *
- * In the non-SMP case, we create a new main thread, push it on the
- * main-thread stack, and invoke the scheduler to run it. The
- * scheduler will return when the top main thread on the stack has
- * completed or died, and fill in the necessary fields of the
- * main_thread structure.
- *
- * In the SMP case, we create a main thread as before, but we then
- * create a new condition variable and sleep on it. When our new
- * main thread has completed, we'll be woken up and the status/result
- * will be in the main_thread struct.
- * -------------------------------------------------------------------------- */
-
-int
-howManyThreadsAvail ( void )
-{
- int i = 0;
- StgTSO* q;
- for (q = run_queue_hd; q != END_TSO_QUEUE; q = q->link)
- i++;
- for (q = blocked_queue_hd; q != END_TSO_QUEUE; q = q->link)
- i++;
- for (q = sleeping_queue; q != END_TSO_QUEUE; q = q->link)
- i++;
- return i;
-}
-
-void
-finishAllThreads ( void )
-{
- do {
- while (run_queue_hd != END_TSO_QUEUE) {
- waitThread ( run_queue_hd, NULL);
- }
- while (blocked_queue_hd != END_TSO_QUEUE) {
- waitThread ( blocked_queue_hd, NULL);
- }
- while (sleeping_queue != END_TSO_QUEUE) {
- waitThread ( blocked_queue_hd, NULL);
- }
- } while
- (blocked_queue_hd != END_TSO_QUEUE ||
- run_queue_hd != END_TSO_QUEUE ||
- sleeping_queue != END_TSO_QUEUE);
-}
-
-SchedulerStatus
-waitThread(StgTSO *tso, /*out*/StgClosure **ret)
-{
- IF_DEBUG(scheduler, sched_belch("== scheduler: waiting for thread (%d)\n", tso->id));
-#if defined(THREADED_RTS)
- return waitThread_(tso,ret, rtsFalse);
-#else
- return waitThread_(tso,ret);
-#endif
-}
+ ------------------------------------------------------------------------- */
+static
SchedulerStatus
-waitThread_(StgTSO *tso,
- /*out*/StgClosure **ret
-#if defined(THREADED_RTS)
- , rtsBool blockWaiting
-#endif
- )
+waitThread_(StgMainThread* m, Capability *initialCapability)
{
- StgMainThread *m;
SchedulerStatus stat;
- ACQUIRE_LOCK(&sched_mutex);
- IF_DEBUG(scheduler, sched_belch("== scheduler: waiting for thread (%d)\n", tso->id));
-
- m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
-
- m->tso = tso;
- m->ret = ret;
- m->stat = NoStatus;
-#if defined(RTS_SUPPORTS_THREADS)
- initCondition(&m->wakeup);
-#endif
-
- m->link = main_threads;
- main_threads = m;
-
- IF_DEBUG(scheduler, sched_belch("== scheduler: new main thread (%d)\n", m->tso->id));
+ // Precondition: sched_mutex must be held.
+ IF_DEBUG(scheduler, sched_belch("new main thread (%d)", m->tso->id));
-#if defined(RTS_SUPPORTS_THREADS)
-
-# if defined(THREADED_RTS)
- if (!blockWaiting) {
- /* In the threaded case, the OS thread that called main()
- * gets to enter the RTS directly without going via another
- * task/thread.
- */
- RELEASE_LOCK(&sched_mutex);
- schedule();
- ASSERT(m->stat != NoStatus);
- } else
-# endif
- {
- IF_DEBUG(scheduler, sched_belch("sfoo"));
- do {
- waitCondition(&m->wakeup, &sched_mutex);
- } while (m->stat == NoStatus);
- }
-#elif defined(GRAN)
+#if defined(GRAN)
/* GranSim specific init */
CurrentTSO = m->tso; // the TSO to run
procStatus[MainProc] = Busy; // status of main PE
CurrentProc = MainProc; // PE to run it on
-
- schedule();
+ schedule(m,initialCapability);
#else
- RELEASE_LOCK(&sched_mutex);
- schedule();
+ schedule(m,initialCapability);
ASSERT(m->stat != NoStatus);
#endif
stat = m->stat;
#if defined(RTS_SUPPORTS_THREADS)
- closeCondition(&m->wakeup);
-#endif
-
- IF_DEBUG(scheduler, fprintf(stderr, "== scheduler: main thread (%d) finished\n",
- m->tso->id));
- free(m);
-
-#if defined(THREADED_RTS)
- if (blockWaiting)
-#endif
- RELEASE_LOCK(&sched_mutex);
-
- return stat;
-}
-
-//@node Run queue code, Garbage Collextion Routines, Suspend and Resume, Main scheduling code
-//@subsection Run queue code
-
-#if 0
-/*
- NB: In GranSim we have many run queues; run_queue_hd is actually a macro
- unfolding to run_queue_hds[CurrentProc], thus CurrentProc is an
- implicit global variable that has to be correct when calling these
- fcts -- HWL
-*/
-
-/* Put the new thread on the head of the runnable queue.
- * The caller of createThread better push an appropriate closure
- * on this thread's stack before the scheduler is invoked.
- */
-static /* inline */ void
-add_to_run_queue(tso)
-StgTSO* tso;
-{
- ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
- tso->link = run_queue_hd;
- run_queue_hd = tso;
- if (run_queue_tl == END_TSO_QUEUE) {
- run_queue_tl = tso;
- }
-}
-
-/* Put the new thread at the end of the runnable queue. */
-static /* inline */ void
-push_on_run_queue(tso)
-StgTSO* tso;
-{
- ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
- ASSERT(run_queue_hd!=NULL && run_queue_tl!=NULL);
- ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
- if (run_queue_hd == END_TSO_QUEUE) {
- run_queue_hd = tso;
+ // Free the condition variable, returning it to the cache if possible.
+ if (!bound_cond_cache_full) {
+ bound_cond_cache = m->bound_thread_cond;
+ bound_cond_cache_full = 1;
} else {
- run_queue_tl->link = tso;
+ closeCondition(&m->bound_thread_cond);
}
- run_queue_tl = tso;
-}
-
-/*
- Should be inlined because it's used very often in schedule. The tso
- argument is actually only needed in GranSim, where we want to have the
- possibility to schedule *any* TSO on the run queue, irrespective of the
- actual ordering. Therefore, if tso is not the nil TSO then we traverse
- the run queue and dequeue the tso, adjusting the links in the queue.
-*/
-//@cindex take_off_run_queue
-static /* inline */ StgTSO*
-take_off_run_queue(StgTSO *tso) {
- StgTSO *t, *prev;
+#endif
- /*
- qetlaHbogh Qu' ngaSbogh ghomDaQ {tso} yIteq!
+ IF_DEBUG(scheduler, sched_belch("main thread (%d) finished", m->tso->id));
+ stgFree(m);
- if tso is specified, unlink that tso from the run_queue (doesn't have
- to be at the beginning of the queue); GranSim only
- */
- if (tso!=END_TSO_QUEUE) {
- /* find tso in queue */
- for (t=run_queue_hd, prev=END_TSO_QUEUE;
- t!=END_TSO_QUEUE && t!=tso;
- prev=t, t=t->link)
- /* nothing */ ;
- ASSERT(t==tso);
- /* now actually dequeue the tso */
- if (prev!=END_TSO_QUEUE) {
- ASSERT(run_queue_hd!=t);
- prev->link = t->link;
- } else {
- /* t is at beginning of thread queue */
- ASSERT(run_queue_hd==t);
- run_queue_hd = t->link;
- }
- /* t is at end of thread queue */
- if (t->link==END_TSO_QUEUE) {
- ASSERT(t==run_queue_tl);
- run_queue_tl = prev;
- } else {
- ASSERT(run_queue_tl!=t);
- }
- t->link = END_TSO_QUEUE;
- } else {
- /* take tso from the beginning of the queue; std concurrent code */
- t = run_queue_hd;
- if (t != END_TSO_QUEUE) {
- run_queue_hd = t->link;
- t->link = END_TSO_QUEUE;
- if (run_queue_hd == END_TSO_QUEUE) {
- run_queue_tl = END_TSO_QUEUE;
- }
- }
- }
- return t;
+ // Postcondition: sched_mutex still held
+ return stat;
}
-#endif /* 0 */
-
-//@node Garbage Collextion Routines, Blocking Queue Routines, Run queue code, Main scheduling code
-//@subsection Garbage Collextion Routines
-
/* ---------------------------------------------------------------------------
Where are the roots that we know about?
*/
void
-GetRoots(evac_fn evac)
+GetRoots( evac_fn evac )
{
#if defined(GRAN)
{
#if defined(PAR) || defined(GRAN)
markSparkQueue(evac);
#endif
+
+#if defined(RTS_USER_SIGNALS)
+ // mark the signal handlers (signals should be already blocked)
+ markSignalHandlers(evac);
+#endif
}
/* -----------------------------------------------------------------------------
This needs to be protected by the GC condition variable above. KH.
-------------------------------------------------------------------------- */
-void (*extra_roots)(evac_fn);
+static void (*extra_roots)(evac_fn);
void
performGC(void)
static StgTSO *
threadStackOverflow(StgTSO *tso)
{
- nat new_stack_size, new_tso_size, diff, stack_words;
+ nat new_stack_size, new_tso_size, stack_words;
StgPtr new_sp;
StgTSO *dest;
if (tso->stack_size >= tso->max_stack_size) {
IF_DEBUG(gc,
- belch("@@ threadStackOverflow of TSO %d (%p): stack too large (now %ld; max is %ld",
- tso->id, tso, tso->stack_size, tso->max_stack_size);
+ debugBelch("@@ threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)\n",
+ (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
/* If we're debugging, just print out the top of the stack */
printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
tso->sp+64)));
new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
- IF_DEBUG(scheduler, fprintf(stderr,"== scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
+ IF_DEBUG(scheduler, debugBelch("== sched: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
dest = (StgTSO *)allocate(new_tso_size);
TICK_ALLOC_TSO(new_stack_size,0);
memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
/* relocate the stack pointers... */
- diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
- dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
- dest->sp = new_sp;
+ dest->sp = new_sp;
dest->stack_size = new_stack_size;
- /* and relocate the update frame list */
- relocate_stack(dest, diff);
-
/* Mark the old TSO as relocated. We have to check for relocated
* TSOs in the garbage collector and any primops that deal with TSOs.
*
- * It's important to set the sp and su values to just beyond the end
+ * It's important to set the sp value to just beyond the end
* of the stack, so we don't attempt to scavenge any part of the
* dead TSO's stack.
*/
tso->what_next = ThreadRelocated;
tso->link = dest;
tso->sp = (P_)&(tso->stack[tso->stack_size]);
- tso->su = (StgUpdateFrame *)tso->sp;
tso->why_blocked = NotBlocked;
dest->mut_link = NULL;
IF_PAR_DEBUG(verbose,
- belch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld",
+ debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
tso->id, tso, tso->stack_size);
/* If we're debugging, just print out the top of the stack */
printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
return dest;
}
-//@node Blocking Queue Routines, Exception Handling Routines, Garbage Collextion Routines, Main scheduling code
-//@subsection Blocking Queue Routines
-
/* ---------------------------------------------------------------------------
Wake up a queue that was blocked on some resource.
------------------------------------------------------------------------ */
#if defined(GRAN)
-static inline void
+STATIC_INLINE void
unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
{
}
#elif defined(PAR)
-static inline void
+STATIC_INLINE void
unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
{
/* write RESUME events to log file and
}
/* the thread-queue-overhead is accounted for in either Resume or UnblockThread */
IF_GRAN_DEBUG(bq,
- fprintf(stderr," %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,",
+ debugBelch(" %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,",
(node_loc==tso_loc ? "Local" : "Global"),
tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
tso->block_info.closure = NULL;
- IF_DEBUG(scheduler,belch("-- Waking up thread %ld (%p)",
+ IF_DEBUG(scheduler,debugBelch("-- Waking up thread %ld (%p)\n",
tso->id, tso));
}
#elif defined(PAR)
ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked);
/* if it's a TSO just push it onto the run_queue */
next = bqe->link;
- // ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
- PUSH_ON_RUN_QUEUE((StgTSO *)bqe);
- THREAD_RUNNABLE();
+ ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
+ APPEND_TO_RUN_QUEUE((StgTSO *)bqe);
+ threadRunnable();
unblockCount(bqe, node);
/* reset blocking status after dumping event */
((StgTSO *)bqe)->why_blocked = NotBlocked;
(StgClosure *)bqe);
# endif
}
- IF_PAR_DEBUG(bq, fprintf(stderr, ", %p (%s)", bqe, info_type((StgClosure*)bqe)));
+ IF_PAR_DEBUG(bq, debugBelch(", %p (%s)\n", bqe, info_type((StgClosure*)bqe)));
return next;
}
ASSERT(tso->why_blocked != NotBlocked);
tso->why_blocked = NotBlocked;
next = tso->link;
- PUSH_ON_RUN_QUEUE(tso);
- THREAD_RUNNABLE();
- IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
+ tso->link = END_TSO_QUEUE;
+ APPEND_TO_RUN_QUEUE(tso);
+ threadRunnable();
+ IF_DEBUG(scheduler,sched_belch("waking up thread %ld", (long)tso->id));
return next;
}
#endif
#if defined(GRAN) || defined(PAR)
-inline StgBlockingQueueElement *
+INLINE_ME StgBlockingQueueElement *
unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
{
ACQUIRE_LOCK(&sched_mutex);
return bqe;
}
#else
-inline StgTSO *
+INLINE_ME StgTSO *
unblockOne(StgTSO *tso)
{
ACQUIRE_LOCK(&sched_mutex);
nat len = 0;
IF_GRAN_DEBUG(bq,
- belch("##-_ AwBQ for node %p on PE %d @ %ld by TSO %d (%p): ", \
+ debugBelch("##-_ AwBQ for node %p on PE %d @ %ld by TSO %d (%p): \n", \
node, CurrentProc, CurrentTime[CurrentProc],
CurrentTSO->id, CurrentTSO));
*/
if (CurrentProc!=node_loc) {
IF_GRAN_DEBUG(bq,
- belch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)",
+ debugBelch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)\n",
node, node_loc, CurrentProc, CurrentTSO->id,
// CurrentTSO, where_is(CurrentTSO),
node->header.gran.procs));
node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc);
IF_GRAN_DEBUG(bq,
- belch("## new bitmask of node %p is %#x",
+ debugBelch("## new bitmask of node %p is %#x\n",
node, node->header.gran.procs));
if (RtsFlags.GranFlags.GranSimStats.Global) {
globalGranStats.tot_fake_fetches++;
((StgRBH *)node)->mut_link = (StgMutClosure *)((StgRBHSave *)bqe)->payload[1];
IF_GRAN_DEBUG(bq,
- belch("## Filled in RBH_Save for %p (%s) at end of AwBQ",
+ debugBelch("## Filled in RBH_Save for %p (%s) at end of AwBQ\n",
node, info_type(node)));
}
globalGranStats.tot_awbq++; // total no. of bqs awakened
}
IF_GRAN_DEBUG(bq,
- fprintf(stderr,"## BQ Stats of %p: [%d entries] %s\n",
+ debugBelch("## BQ Stats of %p: [%d entries] %s\n",
node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : ""));
}
#elif defined(PAR)
ACQUIRE_LOCK(&sched_mutex);
IF_PAR_DEBUG(verbose,
- belch("##-_ AwBQ for node %p on [%x]: ",
+ debugBelch("##-_ AwBQ for node %p on [%x]: \n",
node, mytid));
#ifdef DIST
//RFP
if(get_itbl(q)->type == CONSTR || q==END_BQ_QUEUE) {
- IF_PAR_DEBUG(verbose, belch("## ... nothing to unblock so lets just return. RFP (BUG?)"));
+ IF_PAR_DEBUG(verbose, debugBelch("## ... nothing to unblock so lets just return. RFP (BUG?)\n"));
return;
}
#endif
}
#else /* !GRAN && !PAR */
+
+void
+awakenBlockedQueueNoLock(StgTSO *tso)
+{
+ while (tso != END_TSO_QUEUE) {
+ tso = unblockOneLocked(tso);
+ }
+}
+
void
awakenBlockedQueue(StgTSO *tso)
{
}
#endif
-//@node Exception Handling Routines, Debugging Routines, Blocking Queue Routines, Main scheduling code
-//@subsection Exception Handling Routines
-
/* ---------------------------------------------------------------------------
Interrupt execution
- usually called inside a signal handler so it mustn't do anything fancy.
case BlockedOnRead:
case BlockedOnWrite:
+#if defined(mingw32_TARGET_OS)
+ case BlockedOnDoProc:
+#endif
{
/* take TSO off blocked_queue */
StgBlockingQueueElement *prev = NULL;
goto done;
}
}
- barf("unblockThread (I/O): TSO not found");
+ barf("unblockThread (delay): TSO not found");
}
default:
case BlockedOnRead:
case BlockedOnWrite:
+#if defined(mingw32_TARGET_OS)
+ case BlockedOnDoProc:
+#endif
{
StgTSO *prev = NULL;
for (t = blocked_queue_hd; t != END_TSO_QUEUE;
goto done;
}
}
- barf("unblockThread (I/O): TSO not found");
+ barf("unblockThread (delay): TSO not found");
}
default:
tso->link = END_TSO_QUEUE;
tso->why_blocked = NotBlocked;
tso->block_info.closure = NULL;
- PUSH_ON_RUN_QUEUE(tso);
+ APPEND_TO_RUN_QUEUE(tso);
}
#endif
* the top of the stack.
*
* How exactly do we save all the active computations? We create an
- * AP_UPD for every UpdateFrame on the stack. Entering one of these
- * AP_UPDs pushes everything from the corresponding update frame
+ * AP_STACK for every UpdateFrame on the stack. Entering one of these
+ * AP_STACKs pushes everything from the corresponding update frame
* upwards onto the stack. (Actually, it pushes everything up to the
- * next update frame plus a pointer to the next AP_UPD object.
- * Entering the next AP_UPD object pushes more onto the stack until we
- * reach the last AP_UPD object - at which point the stack should look
+ * next update frame plus a pointer to the next AP_STACK object.
+ * Entering the next AP_STACK object pushes more onto the stack until we
+ * reach the last AP_STACK object - at which point the stack should look
* exactly as it did when we killed the TSO and we can continue
* execution by entering the closure on top of the stack.
*
raiseAsync(tso,NULL);
}
+#ifdef FORKPROCESS_PRIMOP_SUPPORTED
+static void
+deleteThreadImmediately(StgTSO *tso)
+{ // for forkProcess only:
+ // delete thread without giving it a chance to catch the KillThread exception
+
+ if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
+ return;
+ }
+
+ if (tso->why_blocked != BlockedOnCCall &&
+ tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
+ unblockThread(tso);
+ }
+
+ tso->what_next = ThreadKilled;
+}
+#endif
+
void
raiseAsyncWithLock(StgTSO *tso, StgClosure *exception)
{
void
raiseAsync(StgTSO *tso, StgClosure *exception)
{
- StgUpdateFrame* su = tso->su;
- StgPtr sp = tso->sp;
+ StgRetInfoTable *info;
+ StgPtr sp;
- /* Thread already dead? */
- if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
- return;
- }
-
- IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
-
- /* Remove it from any blocking queues */
- unblockThread(tso);
-
- IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
- /* The stack freezing code assumes there's a closure pointer on
- * the top of the stack. This isn't always the case with compiled
- * code, so we have to push a dummy closure on the top which just
- * returns to the next return address on the stack.
- */
- if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
- *(--sp) = (W_)&stg_dummy_ret_closure;
- }
-
- while (1) {
- nat words = ((P_)su - (P_)sp) - 1;
- nat i;
- StgAP_UPD * ap;
-
- /* If we find a CATCH_FRAME, and we've got an exception to raise,
- * then build the THUNK raise(exception), and leave it on
- * top of the CATCH_FRAME ready to enter.
- */
- if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
-#ifdef PROFILING
- StgCatchFrame *cf = (StgCatchFrame *)su;
-#endif
- StgClosure *raise;
-
- /* we've got an exception to raise, so let's pass it to the
- * handler in this frame.
- */
- raise = (StgClosure *)allocate(sizeofW(StgClosure)+1);
- TICK_ALLOC_SE_THK(1,0);
- SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
- raise->payload[0] = exception;
-
- /* throw away the stack from Sp up to the CATCH_FRAME.
- */
- sp = (P_)su - 1;
-
- /* Ensure that async excpetions are blocked now, so we don't get
- * a surprise exception before we get around to executing the
- * handler.
- */
- if (tso->blocked_exceptions == NULL) {
- tso->blocked_exceptions = END_TSO_QUEUE;
- }
-
- /* Put the newly-built THUNK on top of the stack, ready to execute
- * when the thread restarts.
- */
- sp[0] = (W_)raise;
- tso->sp = sp;
- tso->su = su;
- tso->what_next = ThreadEnterGHC;
- IF_DEBUG(sanity, checkTSO(tso));
- return;
+ // Thread already dead?
+ if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
+ return;
}
- /* First build an AP_UPD consisting of the stack chunk above the
- * current update frame, with the top word on the stack as the
- * fun field.
- */
- ap = (StgAP_UPD *)allocate(AP_sizeW(words));
+ IF_DEBUG(scheduler,
+ sched_belch("raising exception in thread %ld.", (long)tso->id));
- ASSERT(words >= 0);
+ // Remove it from any blocking queues
+ unblockThread(tso);
+
+ sp = tso->sp;
- ap->n_args = words;
- ap->fun = (StgClosure *)sp[0];
- sp++;
- for(i=0; i < (nat)words; ++i) {
- ap->payload[i] = (StgClosure *)*sp++;
+ // The stack freezing code assumes there's a closure pointer on
+ // the top of the stack, so we have to arrange that this is the case...
+ //
+ if (sp[0] == (W_)&stg_enter_info) {
+ sp++;
+ } else {
+ sp--;
+ sp[0] = (W_)&stg_dummy_ret_closure;
}
-
- switch (get_itbl(su)->type) {
-
- case UPDATE_FRAME:
- {
- SET_HDR(ap,&stg_AP_UPD_info,su->header.prof.ccs /* ToDo */);
- TICK_ALLOC_UP_THK(words+1,0);
-
- IF_DEBUG(scheduler,
- fprintf(stderr, "scheduler: Updating ");
- printPtr((P_)su->updatee);
- fprintf(stderr, " with ");
- printObj((StgClosure *)ap);
- );
-
- /* Replace the updatee with an indirection - happily
- * this will also wake up any threads currently
- * waiting on the result.
- *
- * Warning: if we're in a loop, more than one update frame on
- * the stack may point to the same object. Be careful not to
- * overwrite an IND_OLDGEN in this case, because we'll screw
- * up the mutable lists. To be on the safe side, don't
- * overwrite any kind of indirection at all. See also
- * threadSqueezeStack in GC.c, where we have to make a similar
- * check.
- */
- if (!closure_IND(su->updatee)) {
- UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
- }
- su = su->link;
- sp += sizeofW(StgUpdateFrame) -1;
- sp[0] = (W_)ap; /* push onto stack */
- break;
- }
- case CATCH_FRAME:
- {
- StgCatchFrame *cf = (StgCatchFrame *)su;
- StgClosure* o;
-
- /* We want a PAP, not an AP_UPD. Fortunately, the
- * layout's the same.
- */
- SET_HDR(ap,&stg_PAP_info,su->header.prof.ccs /* ToDo */);
- TICK_ALLOC_UPD_PAP(words+1,0);
+ while (1) {
+ nat i;
+
+ // 1. Let the top of the stack be the "current closure"
+ //
+ // 2. Walk up the stack until we find either an UPDATE_FRAME or a
+ // CATCH_FRAME.
+ //
+ // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
+ // current closure applied to the chunk of stack up to (but not
+ // including) the update frame. This closure becomes the "current
+ // closure". Go back to step 2.
+ //
+ // 4. If it's a CATCH_FRAME, then leave the exception handler on
+ // top of the stack applied to the exception.
+ //
+ // 5. If it's a STOP_FRAME, then kill the thread.
- /* now build o = FUN(catch,ap,handler) */
- o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
- TICK_ALLOC_FUN(2,0);
- SET_HDR(o,&stg_catch_info,su->header.prof.ccs /* ToDo */);
- o->payload[0] = (StgClosure *)ap;
- o->payload[1] = cf->handler;
+ StgPtr frame;
- IF_DEBUG(scheduler,
- fprintf(stderr, "scheduler: Built ");
- printObj((StgClosure *)o);
- );
-
- /* pop the old handler and put o on the stack */
- su = cf->link;
- sp += sizeofW(StgCatchFrame) - 1;
- sp[0] = (W_)o;
- break;
- }
-
- case SEQ_FRAME:
- {
- StgSeqFrame *sf = (StgSeqFrame *)su;
- StgClosure* o;
+ frame = sp + 1;
+ info = get_ret_itbl((StgClosure *)frame);
- SET_HDR(ap,&stg_PAP_info,su->header.prof.ccs /* ToDo */);
- TICK_ALLOC_UPD_PAP(words+1,0);
+ while (info->i.type != UPDATE_FRAME
+ && (info->i.type != CATCH_FRAME || exception == NULL)
+ && info->i.type != STOP_FRAME) {
+ frame += stack_frame_sizeW((StgClosure *)frame);
+ info = get_ret_itbl((StgClosure *)frame);
+ }
- /* now build o = FUN(seq,ap) */
- o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
- TICK_ALLOC_SE_THK(1,0);
- SET_HDR(o,&stg_seq_info,su->header.prof.ccs /* ToDo */);
- o->payload[0] = (StgClosure *)ap;
+ switch (info->i.type) {
+
+ case CATCH_FRAME:
+ // If we find a CATCH_FRAME, and we've got an exception to raise,
+ // then build the THUNK raise(exception), and leave it on
+ // top of the CATCH_FRAME ready to enter.
+ //
+ {
+#ifdef PROFILING
+ StgCatchFrame *cf = (StgCatchFrame *)frame;
+#endif
+ StgClosure *raise;
+
+ // we've got an exception to raise, so let's pass it to the
+ // handler in this frame.
+ //
+ raise = (StgClosure *)allocate(sizeofW(StgClosure)+1);
+ TICK_ALLOC_SE_THK(1,0);
+ SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
+ raise->payload[0] = exception;
+
+ // throw away the stack from Sp up to the CATCH_FRAME.
+ //
+ sp = frame - 1;
+
+ /* Ensure that async excpetions are blocked now, so we don't get
+ * a surprise exception before we get around to executing the
+ * handler.
+ */
+ if (tso->blocked_exceptions == NULL) {
+ tso->blocked_exceptions = END_TSO_QUEUE;
+ }
+
+ /* Put the newly-built THUNK on top of the stack, ready to execute
+ * when the thread restarts.
+ */
+ sp[0] = (W_)raise;
+ sp[-1] = (W_)&stg_enter_info;
+ tso->sp = sp-1;
+ tso->what_next = ThreadRunGHC;
+ IF_DEBUG(sanity, checkTSO(tso));
+ return;
+ }
- IF_DEBUG(scheduler,
- fprintf(stderr, "scheduler: Built ");
- printObj((StgClosure *)o);
- );
+ case UPDATE_FRAME:
+ {
+ StgAP_STACK * ap;
+ nat words;
+
+ // First build an AP_STACK consisting of the stack chunk above the
+ // current update frame, with the top word on the stack as the
+ // fun field.
+ //
+ words = frame - sp - 1;
+ ap = (StgAP_STACK *)allocate(PAP_sizeW(words));
+
+ ap->size = words;
+ ap->fun = (StgClosure *)sp[0];
+ sp++;
+ for(i=0; i < (nat)words; ++i) {
+ ap->payload[i] = (StgClosure *)*sp++;
+ }
+
+ SET_HDR(ap,&stg_AP_STACK_info,
+ ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
+ TICK_ALLOC_UP_THK(words+1,0);
+
+ IF_DEBUG(scheduler,
+ debugBelch("sched: Updating ");
+ printPtr((P_)((StgUpdateFrame *)frame)->updatee);
+ debugBelch(" with ");
+ printObj((StgClosure *)ap);
+ );
+
+ // Replace the updatee with an indirection - happily
+ // this will also wake up any threads currently
+ // waiting on the result.
+ //
+ // Warning: if we're in a loop, more than one update frame on
+ // the stack may point to the same object. Be careful not to
+ // overwrite an IND_OLDGEN in this case, because we'll screw
+ // up the mutable lists. To be on the safe side, don't
+ // overwrite any kind of indirection at all. See also
+ // threadSqueezeStack in GC.c, where we have to make a similar
+ // check.
+ //
+ if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) {
+ // revert the black hole
+ UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
+ (StgClosure *)ap);
+ }
+ sp += sizeofW(StgUpdateFrame) - 1;
+ sp[0] = (W_)ap; // push onto stack
+ break;
+ }
- /* pop the old handler and put o on the stack */
- su = sf->link;
- sp += sizeofW(StgSeqFrame) - 1;
- sp[0] = (W_)o;
- break;
- }
-
- case STOP_FRAME:
- /* We've stripped the entire stack, the thread is now dead. */
- sp += sizeofW(StgStopFrame) - 1;
- sp[0] = (W_)exception; /* save the exception */
- tso->what_next = ThreadKilled;
- tso->su = (StgUpdateFrame *)(sp+1);
- tso->sp = sp;
- return;
+ case STOP_FRAME:
+ // We've stripped the entire stack, the thread is now dead.
+ sp += sizeofW(StgStopFrame);
+ tso->what_next = ThreadKilled;
+ tso->sp = sp;
+ return;
+
+ default:
+ barf("raiseAsync");
+ }
+ }
+ barf("raiseAsync");
+}
- default:
- barf("raiseAsync");
+/* -----------------------------------------------------------------------------
+ raiseExceptionHelper
+
+ This function is called by the raise# primitve, just so that we can
+ move some of the tricky bits of raising an exception from C-- into
+ C. Who knows, it might be a useful re-useable thing here too.
+ -------------------------------------------------------------------------- */
+
+StgWord
+raiseExceptionHelper (StgTSO *tso, StgClosure *exception)
+{
+ StgClosure *raise_closure = NULL;
+ StgPtr p, next;
+ StgRetInfoTable *info;
+ //
+ // This closure represents the expression 'raise# E' where E
+ // is the exception raise. It is used to overwrite all the
+ // thunks which are currently under evaluataion.
+ //
+
+ //
+ // LDV profiling: stg_raise_info has THUNK as its closure
+ // type. Since a THUNK takes at least MIN_UPD_SIZE words in its
+ // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
+ // 1 does not cause any problem unless profiling is performed.
+ // However, when LDV profiling goes on, we need to linearly scan
+ // small object pool, where raise_closure is stored, so we should
+ // use MIN_UPD_SIZE.
+ //
+ // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
+ // sizeofW(StgClosure)+1);
+ //
+
+ //
+ // Walk up the stack, looking for the catch frame. On the way,
+ // we update any closures pointed to from update frames with the
+ // raise closure that we just built.
+ //
+ p = tso->sp;
+ while(1) {
+ info = get_ret_itbl((StgClosure *)p);
+ next = p + stack_frame_sizeW((StgClosure *)p);
+ switch (info->i.type) {
+
+ case UPDATE_FRAME:
+ // Only create raise_closure if we need to.
+ if (raise_closure == NULL) {
+ raise_closure =
+ (StgClosure *)allocate(sizeofW(StgClosure)+MIN_UPD_SIZE);
+ SET_HDR(raise_closure, &stg_raise_info, CCCS);
+ raise_closure->payload[0] = exception;
+ }
+ UPD_IND(((StgUpdateFrame *)p)->updatee,raise_closure);
+ p = next;
+ continue;
+
+ case CATCH_FRAME:
+ tso->sp = p;
+ return CATCH_FRAME;
+
+ case STOP_FRAME:
+ tso->sp = p;
+ return STOP_FRAME;
+
+ default:
+ p = next;
+ continue;
+ }
}
- }
- barf("raiseAsync");
}
/* -----------------------------------------------------------------------------
}
}
-/* -----------------------------------------------------------------------------
- * Blackhole detection: if we reach a deadlock, test whether any
- * threads are blocked on themselves. Any threads which are found to
- * be self-blocked get sent a NonTermination exception.
- *
- * This is only done in a deadlock situation in order to avoid
- * performance overhead in the normal case.
- *
- * Locks: sched_mutex is held upon entry and exit.
- * -------------------------------------------------------------------------- */
-
-static void
-detectBlackHoles( void )
-{
- StgTSO *t = all_threads;
- StgUpdateFrame *frame;
- StgClosure *blocked_on;
-
- for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
-
- while (t->what_next == ThreadRelocated) {
- t = t->link;
- ASSERT(get_itbl(t)->type == TSO);
- }
-
- if (t->why_blocked != BlockedOnBlackHole) {
- continue;
- }
-
- blocked_on = t->block_info.closure;
-
- for (frame = t->su; ; frame = frame->link) {
- switch (get_itbl(frame)->type) {
-
- case UPDATE_FRAME:
- if (frame->updatee == blocked_on) {
- /* We are blocking on one of our own computations, so
- * send this thread the NonTermination exception.
- */
- IF_DEBUG(scheduler,
- sched_belch("thread %d is blocked on itself", t->id));
- raiseAsync(t, (StgClosure *)NonTermination_closure);
- goto done;
- }
- else {
- continue;
- }
-
- case CATCH_FRAME:
- case SEQ_FRAME:
- continue;
-
- case STOP_FRAME:
- break;
- }
- break;
- }
-
- done: ;
- }
-}
-
-//@node Debugging Routines, Index, Exception Handling Routines, Main scheduling code
-//@subsection Debugging Routines
-
-/* -----------------------------------------------------------------------------
- Debugging: why is a thread blocked
- -------------------------------------------------------------------------- */
-
-#ifdef DEBUG
+/* ----------------------------------------------------------------------------
+ * Debugging: why is a thread blocked
+ * [Also provides useful information when debugging threaded programs
+ * at the Haskell source code level, so enable outside of DEBUG. --sof 7/02]
+ ------------------------------------------------------------------------- */
+static
void
printThreadBlockage(StgTSO *tso)
{
switch (tso->why_blocked) {
case BlockedOnRead:
- fprintf(stderr,"is blocked on read from fd %d", tso->block_info.fd);
+ debugBelch("is blocked on read from fd %d", tso->block_info.fd);
break;
case BlockedOnWrite:
- fprintf(stderr,"is blocked on write to fd %d", tso->block_info.fd);
+ debugBelch("is blocked on write to fd %d", tso->block_info.fd);
break;
+#if defined(mingw32_TARGET_OS)
+ case BlockedOnDoProc:
+ debugBelch("is blocked on proc (request: %d)", tso->block_info.async_result->reqID);
+ break;
+#endif
case BlockedOnDelay:
- fprintf(stderr,"is blocked until %d", tso->block_info.target);
+ debugBelch("is blocked until %d", tso->block_info.target);
break;
case BlockedOnMVar:
- fprintf(stderr,"is blocked on an MVar");
+ debugBelch("is blocked on an MVar");
break;
case BlockedOnException:
- fprintf(stderr,"is blocked on delivering an exception to thread %d",
+ debugBelch("is blocked on delivering an exception to thread %d",
tso->block_info.tso->id);
break;
case BlockedOnBlackHole:
- fprintf(stderr,"is blocked on a black hole");
+ debugBelch("is blocked on a black hole");
break;
case NotBlocked:
- fprintf(stderr,"is not blocked");
+ debugBelch("is not blocked");
break;
#if defined(PAR)
case BlockedOnGA:
- fprintf(stderr,"is blocked on global address; local FM_BQ is %p (%s)",
+ debugBelch("is blocked on global address; local FM_BQ is %p (%s)",
tso->block_info.closure, info_type(tso->block_info.closure));
break;
case BlockedOnGA_NoSend:
- fprintf(stderr,"is blocked on global address (no send); local FM_BQ is %p (%s)",
+ debugBelch("is blocked on global address (no send); local FM_BQ is %p (%s)",
tso->block_info.closure, info_type(tso->block_info.closure));
break;
#endif
-#if defined(RTS_SUPPORTS_THREADS)
case BlockedOnCCall:
- fprintf(stderr,"is blocked on an external call");
+ debugBelch("is blocked on an external call");
+ break;
+ case BlockedOnCCall_NoUnblockExc:
+ debugBelch("is blocked on an external call (exceptions were already blocked)");
break;
-#endif
default:
barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
tso->why_blocked, tso->id, tso);
}
}
+static
void
printThreadStatus(StgTSO *tso)
{
switch (tso->what_next) {
case ThreadKilled:
- fprintf(stderr,"has been killed");
+ debugBelch("has been killed");
break;
case ThreadComplete:
- fprintf(stderr,"has completed");
+ debugBelch("has completed");
break;
default:
printThreadBlockage(tso);
ullong_format_string(TIME_ON_PROC(CurrentProc),
time_string, rtsFalse/*no commas!*/);
- sched_belch("all threads at [%s]:", time_string);
+ debugBelch("all threads at [%s]:\n", time_string);
# elif defined(PAR)
char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
ullong_format_string(CURRENT_TIME,
time_string, rtsFalse/*no commas!*/);
- sched_belch("all threads at [%s]:", time_string);
+ debugBelch("all threads at [%s]:\n", time_string);
# else
- sched_belch("all threads:");
+ debugBelch("all threads:\n");
# endif
for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
- fprintf(stderr, "\tthread %d ", t->id);
- if (t->label) fprintf(stderr,"[\"%s\"] ",t->label);
+ debugBelch("\tthread %d @ %p ", t->id, (void *)t);
+#if defined(DEBUG)
+ {
+ void *label = lookupThreadLabel(t->id);
+ if (label) debugBelch("[\"%s\"] ",(char *)label);
+ }
+#endif
printThreadStatus(t);
- fprintf(stderr,"\n");
+ debugBelch("\n");
}
}
+#ifdef DEBUG
+
/*
Print a whole blocking queue attached to node (debugging only).
*/
-//@cindex print_bq
# if defined(PAR)
void
print_bq (StgClosure *node)
StgTSO *tso;
rtsBool end;
- fprintf(stderr,"## BQ of closure %p (%s): ",
+ debugBelch("## BQ of closure %p (%s): ",
node, info_type(node));
/* should cover all closures that may have a blocking queue */
switch (get_itbl(bqe)->type) {
case TSO:
- fprintf(stderr," TSO %u (%x),",
+ debugBelch(" TSO %u (%x),",
((StgTSO *)bqe)->id, ((StgTSO *)bqe));
break;
case BLOCKED_FETCH:
- fprintf(stderr," BF (node=%p, ga=((%x, %d, %x)),",
+ debugBelch(" BF (node=%p, ga=((%x, %d, %x)),",
((StgBlockedFetch *)bqe)->node,
((StgBlockedFetch *)bqe)->ga.payload.gc.gtid,
((StgBlockedFetch *)bqe)->ga.payload.gc.slot,
((StgBlockedFetch *)bqe)->ga.weight);
break;
case CONSTR:
- fprintf(stderr," %s (IP %p),",
+ debugBelch(" %s (IP %p),",
(get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" :
get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" :
get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" :
break;
}
} /* for */
- fputc('\n', stderr);
+ debugBelch("\n");
}
# elif defined(GRAN)
void
ASSERT(node!=(StgClosure*)NULL); // sanity check
node_loc = where_is(node);
- fprintf(stderr,"## BQ of closure %p (%s) on [PE %d]: ",
+ debugBelch("## BQ of closure %p (%s) on [PE %d]: ",
node, info_type(node), node_loc);
/*
tso_loc = where_is((StgClosure *)bqe);
switch (get_itbl(bqe)->type) {
case TSO:
- fprintf(stderr," TSO %d (%p) on [PE %d],",
+ debugBelch(" TSO %d (%p) on [PE %d],",
((StgTSO *)bqe)->id, (StgTSO *)bqe, tso_loc);
break;
case CONSTR:
- fprintf(stderr," %s (IP %p),",
+ debugBelch(" %s (IP %p),",
(get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" :
get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" :
get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" :
break;
}
} /* for */
- fputc('\n', stderr);
+ debugBelch("\n");
}
#else
/*
tso=tso->link) {
ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
- fprintf(stderr," TSO %d (%p),", tso->id, tso);
+ debugBelch(" TSO %d (%p),", tso->id, tso);
}
- fputc('\n', stderr);
+ debugBelch("\n");
}
# endif
}
#endif
-static void
+void
sched_belch(char *s, ...)
{
va_list ap;
va_start(ap,s);
-#ifdef SMP
- fprintf(stderr, "scheduler (task %ld): ", osThreadId());
+#ifdef RTS_SUPPORTS_THREADS
+ debugBelch("sched (task %p): ", osThreadId());
#elif defined(PAR)
- fprintf(stderr, "== ");
+ debugBelch("== ");
#else
- fprintf(stderr, "scheduler: ");
+ debugBelch("sched: ");
#endif
- vfprintf(stderr, s, ap);
- fprintf(stderr, "\n");
+ vdebugBelch(s, ap);
+ debugBelch("\n");
+ va_end(ap);
}
#endif /* DEBUG */
-
-
-//@node Index, , Debugging Routines, Main scheduling code
-//@subsection Index
-
-//@index
-//* StgMainThread:: @cindex\s-+StgMainThread
-//* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue
-//* blocked_queue_hd:: @cindex\s-+blocked_queue_hd
-//* blocked_queue_tl:: @cindex\s-+blocked_queue_tl
-//* context_switch:: @cindex\s-+context_switch
-//* createThread:: @cindex\s-+createThread
-//* gc_pending_cond:: @cindex\s-+gc_pending_cond
-//* initScheduler:: @cindex\s-+initScheduler
-//* interrupted:: @cindex\s-+interrupted
-//* next_thread_id:: @cindex\s-+next_thread_id
-//* print_bq:: @cindex\s-+print_bq
-//* run_queue_hd:: @cindex\s-+run_queue_hd
-//* run_queue_tl:: @cindex\s-+run_queue_tl
-//* sched_mutex:: @cindex\s-+sched_mutex
-//* schedule:: @cindex\s-+schedule
-//* take_off_run_queue:: @cindex\s-+take_off_run_queue
-//* term_mutex:: @cindex\s-+term_mutex
-//@end index