#include "Updates.h"
#include "Proftimer.h"
#include "ProfHeap.h"
+#include "GC.h"
+#include "Weak.h"
+#include "EventLog.h"
/* PARALLEL_HASKELL includes go here */
*/
rtsBool blackholes_need_checking = rtsFalse;
-/* flag set by signal handler to precipitate a context switch
- * LOCK: none (just an advisory flag)
+/* Set to true when the latest garbage collection failed to reclaim
+ * enough space, and the runtime should proceed to shut itself down in
+ * an orderly fashion (emitting profiling info etc.)
*/
-int context_switch = 0;
+rtsBool heap_overflow = rtsFalse;
/* flag that tracks whether we have done any execution in this time slice.
* LOCK: currently none, perhaps we should lock (but needs to be
* updated in the fast path of the scheduler).
+ *
+ * NB. must be StgWord, we do xchg() on it.
*/
-nat recent_activity = ACTIVITY_YES;
+volatile StgWord recent_activity = ACTIVITY_YES;
/* if this flag is set as well, give up execution
- * LOCK: none (changes once, from false->true)
+ * LOCK: none (changes monotonically)
*/
-rtsBool sched_state = SCHED_RUNNING;
+volatile StgWord sched_state = SCHED_RUNNING;
/* This is used in `TSO.h' and gcc 2.96 insists that this variable actually
* exists - earlier gccs apparently didn't.
// scheduler clearer.
//
static void schedulePreLoop (void);
+static void scheduleFindWork (Capability *cap);
#if defined(THREADED_RTS)
-static void schedulePushWork(Capability *cap, Task *task);
+static void scheduleYield (Capability **pcap, Task *task);
#endif
static void scheduleStartSignalHandlers (Capability *cap);
static void scheduleCheckBlockedThreads (Capability *cap);
static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
static void scheduleCheckBlackHoles (Capability *cap);
static void scheduleDetectDeadlock (Capability *cap, Task *task);
+static void schedulePushWork(Capability *cap, Task *task);
#if defined(PARALLEL_HASKELL)
static rtsBool scheduleGetRemoteWork(Capability *cap);
static void scheduleSendPendingMessages(void);
+#endif
+#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
static void scheduleActivateSpark(Capability *cap);
#endif
-static void schedulePostRunThread(StgTSO *t);
+static void schedulePostRunThread(Capability *cap, StgTSO *t);
static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
static void scheduleHandleStackOverflow( Capability *cap, Task *task,
StgTSO *t);
"### NEW SCHEDULER LOOP (task: %p, cap: %p)",
task, initialCapability);
+ if (running_finalizers) {
+ errorBelch("error: a C finalizer called back into Haskell.\n"
+ " use Foreign.Concurrent.newForeignPtr for Haskell finalizers.");
+ stg_exit(EXIT_FAILURE);
+ }
+
schedulePreLoop();
// -----------------------------------------------------------
while (TERMINATION_CONDITION) {
-#if defined(THREADED_RTS)
- if (first) {
- // don't yield the first time, we want a chance to run this
- // thread for a bit, even if there are others banging at the
- // door.
- first = rtsFalse;
- ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
- } else {
- // Yield the capability to higher-priority tasks if necessary.
- yieldCapability(&cap, task);
- }
-#endif
-
-#if defined(THREADED_RTS)
- schedulePushWork(cap,task);
-#endif
-
// Check whether we have re-entered the RTS from Haskell without
// going via suspendThread()/resumeThread (i.e. a 'safe' foreign
// call).
#endif
/* scheduleDoGC() deletes all the threads */
cap = scheduleDoGC(cap,task,rtsFalse);
- break;
+
+ // after scheduleDoGC(), we must be shutting down. Either some
+ // other Capability did the final GC, or we did it above,
+ // either way we can fall through to the SCHED_SHUTTING_DOWN
+ // case now.
+ ASSERT(sched_state == SCHED_SHUTTING_DOWN);
+ // fall through
+
case SCHED_SHUTTING_DOWN:
debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
// If we are a worker, just exit. If we're a bound thread
barf("sched_state: %d", sched_state);
}
-#if defined(THREADED_RTS)
- // If the run queue is empty, take a spark and turn it into a thread.
- {
- if (emptyRunQueue(cap)) {
- StgClosure *spark;
- spark = findSpark(cap);
- if (spark != NULL) {
- debugTrace(DEBUG_sched,
- "turning spark of closure %p into a thread",
- (StgClosure *)spark);
- createSparkThread(cap,spark);
- }
- }
- }
-#endif // THREADED_RTS
+ scheduleFindWork(cap);
- scheduleStartSignalHandlers(cap);
-
- // Only check the black holes here if we've nothing else to do.
- // During normal execution, the black hole list only gets checked
- // at GC time, to avoid repeatedly traversing this possibly long
- // list each time around the scheduler.
- if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
-
- scheduleCheckWakeupThreads(cap);
-
- scheduleCheckBlockedThreads(cap);
+ /* work pushing, currently relevant only for THREADED_RTS:
+ (pushes threads, wakes up idle capabilities for stealing) */
+ schedulePushWork(cap,task);
#if defined(PARALLEL_HASKELL)
- /* message processing and work distribution goes here */
-
- /* if messages have been buffered... a NOOP in THREADED_RTS */
- scheduleSendPendingMessages();
-
- /* If the run queue is empty,...*/
- if (emptyRunQueue(cap)) {
- /* ...take one of our own sparks and turn it into a thread */
- scheduleActivateSpark(cap);
-
- /* if this did not work, try to steal a spark from someone else */
- if (emptyRunQueue(cap)) {
- receivedFinish = scheduleGetRemoteWork(cap);
- continue; // a new round, (hopefully) with new work
- /*
- in GUM, this a) sends out a FISH and returns IF no fish is
- out already
- b) (blocking) awaits and receives messages
-
- in Eden, this is only the blocking receive, as b) in GUM.
- */
- }
- }
-
/* since we perform a blocking receive and continue otherwise,
either we never reach here or we definitely have work! */
// from here: non-empty run queue
above, waits for messages as well! */
processMessages(cap, &receivedFinish);
}
-#endif // PARALLEL_HASKELL
+#endif // PARALLEL_HASKELL: non-empty run queue!
scheduleDetectDeadlock(cap,task);
+
#if defined(THREADED_RTS)
cap = task->cap; // reload cap, it might have changed
#endif
//
// win32: might be here due to awaitEvent() being abandoned
// as a result of a console event having been delivered.
- if ( emptyRunQueue(cap) ) {
+
+#if defined(THREADED_RTS)
+ if (first)
+ {
+ // XXX: ToDo
+ // // don't yield the first time, we want a chance to run this
+ // // thread for a bit, even if there are others banging at the
+ // // door.
+ // first = rtsFalse;
+ // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
+ }
+
+ yield:
+ scheduleYield(&cap,task);
+ if (emptyRunQueue(cap)) continue; // look for work again
+#endif
+
#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
+ if ( emptyRunQueue(cap) ) {
ASSERT(sched_state >= SCHED_INTERRUPTING);
-#endif
- continue; // nothing to do
}
+#endif
//
// Get a thread to run
}
#endif
+ // If we're shutting down, and this thread has not yet been
+ // killed, kill it now. This sometimes happens when a finalizer
+ // thread is created by the final GC, or a thread previously
+ // in a foreign call returns.
+ if (sched_state >= SCHED_INTERRUPTING &&
+ !(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
+ deleteThread(cap,t);
+ }
+
/* context switches are initiated by the timer signal, unless
* the user specified "context switch as often as possible", with
* +RTS -C0
*/
if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
&& !emptyThreadQueues(cap)) {
- context_switch = 1;
+ cap->context_switch = 1;
}
run_thread:
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
ASSERT(t->cap == cap);
+ ASSERT(t->bound ? t->bound->cap == cap : 1);
prev_what_next = t->what_next;
}
#endif
+ postEvent(cap, EVENT_RUN_THREAD, t->id, 0);
+
switch (prev_what_next) {
case ThreadKilled:
t->saved_winerror = GetLastError();
#endif
+ postEvent (cap, EVENT_STOP_THREAD, t->id, ret);
+
#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
// blocked, we are in limbo - the TSO is now owned by whatever it
debugTrace(DEBUG_sched,
"--<< thread %lu (%s) stopped: blocked",
(unsigned long)t->id, whatNext_strs[t->what_next]);
- continue;
+ goto yield;
}
#endif
CCCS = CCS_SYSTEM;
#endif
- schedulePostRunThread(t);
+ schedulePostRunThread(cap,t);
t = threadStackUnderflow(task,t);
}
/* -----------------------------------------------------------------------------
+ * scheduleFindWork()
+ *
+ * Search for work to do, and handle messages from elsewhere.
+ * -------------------------------------------------------------------------- */
+
+static void
+scheduleFindWork (Capability *cap)
+{
+ scheduleStartSignalHandlers(cap);
+
+ // Only check the black holes here if we've nothing else to do.
+ // During normal execution, the black hole list only gets checked
+ // at GC time, to avoid repeatedly traversing this possibly long
+ // list each time around the scheduler.
+ if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
+
+ scheduleCheckWakeupThreads(cap);
+
+ scheduleCheckBlockedThreads(cap);
+
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
+ if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); }
+#endif
+
+#if defined(PARALLEL_HASKELL)
+ // if messages have been buffered...
+ scheduleSendPendingMessages();
+#endif
+
+#if defined(PARALLEL_HASKELL)
+ if (emptyRunQueue(cap)) {
+ receivedFinish = scheduleGetRemoteWork(cap);
+ continue; // a new round, (hopefully) with new work
+ /*
+ in GUM, this a) sends out a FISH and returns IF no fish is
+ out already
+ b) (blocking) awaits and receives messages
+
+ in Eden, this is only the blocking receive, as b) in GUM.
+ */
+ }
+#endif
+}
+
+#if defined(THREADED_RTS)
+STATIC_INLINE rtsBool
+shouldYieldCapability (Capability *cap, Task *task)
+{
+ // we need to yield this capability to someone else if..
+ // - another thread is initiating a GC
+ // - another Task is returning from a foreign call
+ // - the thread at the head of the run queue cannot be run
+ // by this Task (it is bound to another Task, or it is unbound
+ // and this task it bound).
+ return (waiting_for_gc ||
+ cap->returning_tasks_hd != NULL ||
+ (!emptyRunQueue(cap) && (task->tso == NULL
+ ? cap->run_queue_hd->bound != NULL
+ : cap->run_queue_hd->bound != task)));
+}
+
+// This is the single place where a Task goes to sleep. There are
+// two reasons it might need to sleep:
+// - there are no threads to run
+// - we need to yield this Capability to someone else
+// (see shouldYieldCapability())
+//
+// Careful: the scheduler loop is quite delicate. Make sure you run
+// the tests in testsuite/concurrent (all ways) after modifying this,
+// and also check the benchmarks in nofib/parallel for regressions.
+
+static void
+scheduleYield (Capability **pcap, Task *task)
+{
+ Capability *cap = *pcap;
+
+ // if we have work, and we don't need to give up the Capability, continue.
+ if (!shouldYieldCapability(cap,task) &&
+ (!emptyRunQueue(cap) ||
+ !emptyWakeupQueue(cap) ||
+ blackholes_need_checking ||
+ sched_state >= SCHED_INTERRUPTING))
+ return;
+
+ // otherwise yield (sleep), and keep yielding if necessary.
+ do {
+ yieldCapability(&cap,task);
+ }
+ while (shouldYieldCapability(cap,task));
+
+ // note there may still be no threads on the run queue at this
+ // point, the caller has to check.
+
+ *pcap = cap;
+ return;
+}
+#endif
+
+/* -----------------------------------------------------------------------------
* schedulePushWork()
*
* Push work to other Capabilities if we have some.
* -------------------------------------------------------------------------- */
-#if defined(THREADED_RTS)
static void
schedulePushWork(Capability *cap USED_IF_THREADS,
Task *task USED_IF_THREADS)
{
+ /* following code not for PARALLEL_HASKELL. I kept the call general,
+ future GUM versions might use pushing in a distributed setup */
+#if defined(THREADED_RTS)
+
Capability *free_caps[n_capabilities], *cap0;
nat i, n_free_caps;
// Check whether we have more threads on our run queue, or sparks
// in our pool, that we could hand to another Capability.
- if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE)
- && sparkPoolSizeCap(cap) < 2) {
- return;
+ if (cap->run_queue_hd == END_TSO_QUEUE) {
+ if (sparkPoolSizeCap(cap) < 2) return;
+ } else {
+ if (cap->run_queue_hd->_link == END_TSO_QUEUE &&
+ sparkPoolSizeCap(cap) < 1) return;
}
// First grab as many free Capabilities as we can.
StgTSO *prev, *t, *next;
rtsBool pushed_to_all;
- debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
+ debugTrace(DEBUG_sched,
+ "cap %d: %s and %d free capabilities, sharing...",
+ cap->no,
+ (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)?
+ "excess threads on run queue":"sparks to share (>=2)",
+ n_free_caps);
i = 0;
pushed_to_all = rtsFalse;
} else {
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
+
+ postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
+
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
i++;
cap->run_queue_tl = prev;
}
+#ifdef SPARK_PUSHING
+ /* JB I left this code in place, it would work but is not necessary */
+
// If there are some free capabilities that we didn't push any
// threads to, then try to push a spark to each one.
if (!pushed_to_all) {
// i is the next free capability to push to
for (; i < n_free_caps; i++) {
if (emptySparkPoolCap(free_caps[i])) {
- spark = findSpark(cap);
+ spark = tryStealSpark(cap->sparks);
if (spark != NULL) {
debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
newSpark(&(free_caps[i]->r), spark);
}
}
}
+#endif /* SPARK_PUSHING */
// release the capabilities
for (i = 0; i < n_free_caps; i++) {
task->cap = free_caps[i];
- releaseCapability(free_caps[i]);
+ releaseAndWakeupCapability(free_caps[i]);
}
}
task->cap = cap; // reset to point to our Capability.
+
+#endif /* THREADED_RTS */
+
}
-#endif
/* ----------------------------------------------------------------------------
* Start any pending signal handlers
{
ACQUIRE_LOCK(&sched_mutex);
if ( blackholes_need_checking ) {
- checkBlackHoles(cap);
blackholes_need_checking = rtsFalse;
+ // important that we reset the flag *before* checking the
+ // blackhole queue, otherwise we could get deadlock. This
+ // happens as follows: we wake up a thread that
+ // immediately runs on another Capability, blocks on a
+ // blackhole, and then we reset the blackholes_need_checking flag.
+ checkBlackHoles(cap);
}
RELEASE_LOCK(&sched_mutex);
}
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
- cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
+ cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
+ // when force_major == rtsTrue. scheduleDoGC sets
+ // recent_activity to ACTIVITY_DONE_GC and turns off the timer
+ // signal.
- recent_activity = ACTIVITY_DONE_GC;
- // disable timer signals (see #1623)
- stopTimer();
-
if ( !emptyRunQueue(cap) ) return;
#if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
* Send pending messages (PARALLEL_HASKELL only)
* ------------------------------------------------------------------------- */
-static StgTSO *
+#if defined(PARALLEL_HASKELL)
+static void
scheduleSendPendingMessages(void)
{
-#if defined(PARALLEL_HASKELL)
# if defined(PAR) // global Mem.Mgmt., omit for now
if (PendingFetches != END_BF_QUEUE) {
// packets which have become too old...
sendOldBuffers();
}
-#endif
}
+#endif
/* ----------------------------------------------------------------------------
- * Activate spark threads (PARALLEL_HASKELL only)
+ * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
* ------------------------------------------------------------------------- */
-#if defined(PARALLEL_HASKELL)
+#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
static void
scheduleActivateSpark(Capability *cap)
{
- StgClosure *spark;
-
-/* We only want to stay here if the run queue is empty and we want some
- work. We try to turn a spark into a thread, and add it to the run
- queue, from where it will be picked up in the next iteration of the
- scheduler loop.
-*/
- if (!emptyRunQueue(cap))
- /* In the threaded RTS, another task might have pushed a thread
- on our run queue in the meantime ? But would need a lock.. */
- return;
-
- spark = findSpark(cap); // defined in Sparks.c
-
- if (spark != NULL) {
- debugTrace(DEBUG_sched,
- "turning spark of closure %p into a thread",
- (StgClosure *)spark);
- createSparkThread(cap,spark); // defined in Sparks.c
+ if (anySparks())
+ {
+ createSparkThread(cap);
+ debugTrace(DEBUG_sched, "creating a spark thread");
}
}
-#endif // PARALLEL_HASKELL
+#endif // PARALLEL_HASKELL || THREADED_RTS
/* ----------------------------------------------------------------------------
* Get work from a remote node (PARALLEL_HASKELL only)
* ------------------------------------------------------------------------- */
#if defined(PARALLEL_HASKELL)
-static rtsBool
-scheduleGetRemoteWork(Capability *cap)
+static rtsBool /* return value used in PARALLEL_HASKELL only */
+scheduleGetRemoteWork (Capability *cap STG_UNUSED)
{
#if defined(PARALLEL_HASKELL)
rtsBool receivedFinish = rtsFalse;
#endif /* PARALLEL_HASKELL */
}
-#endif // PARALLEL_HASKELL
+#endif // PARALLEL_HASKELL || THREADED_RTS
/* ----------------------------------------------------------------------------
* After running a thread...
* ------------------------------------------------------------------------- */
static void
-schedulePostRunThread (StgTSO *t)
+schedulePostRunThread (Capability *cap, StgTSO *t)
{
// We have to be able to catch transactions that are in an
// infinite loop as a result of seeing an inconsistent view of
// ATOMICALLY_FRAME, aborting the (nested)
// transaction, and saving the stack of any
// partially-evaluated thunks on the heap.
- throwToSingleThreaded_(&capabilities[0], t,
- NULL, rtsTrue, NULL);
+ throwToSingleThreaded_(cap, t, NULL, rtsTrue);
ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
}
"--<< thread %ld (%s) stopped: HeapOverflow",
(long)t->id, whatNext_strs[t->what_next]);
- if (context_switch) {
+ if (cap->r.rHpLim == NULL || cap->context_switch) {
// Sometimes we miss a context switch, e.g. when calling
// primitives in a tight loop, MAYBE_GC() doesn't check the
// context switch flag, and we end up waiting for a GC.
// See #1984, and concurrent/should_run/1984
- context_switch = 0;
+ cap->context_switch = 0;
addToRunQueue(cap,t);
} else {
pushOnRunQueue(cap,t);
// the CPU because the tick always arrives during GC). This way
// penalises threads that do a lot of allocation, but that seems
// better than the alternative.
- context_switch = 0;
+ cap->context_switch = 0;
/* put the thread back on the run queue. Then, if we're ready to
* GC, check whether this is the last task to stop. If so, wake
debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished",
(unsigned long)t->id, whatNext_strs[t->what_next]);
+ // blocked exceptions can now complete, even if the thread was in
+ // blocked mode (see #2910). This unconditionally calls
+ // lockTSO(), which ensures that we don't miss any threads that
+ // are engaged in throwTo() with this thread as a target.
+ awakenBlockedExceptionQueue (cap, t);
+
//
// Check whether the thread that just completed was a bound
// thread, and if so return with the result.
*(task->ret) = NULL;
}
if (sched_state >= SCHED_INTERRUPTING) {
- task->stat = Interrupted;
+ if (heap_overflow) {
+ task->stat = HeapExhausted;
+ } else {
+ task->stat = Interrupted;
+ }
} else {
task->stat = Killed;
}
static Capability *
scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
{
- StgTSO *t;
rtsBool heap_census;
#ifdef THREADED_RTS
- static volatile StgWord waiting_for_gc;
- rtsBool was_waiting;
+ /* extern static volatile StgWord waiting_for_gc;
+ lives inside capability.c */
+ rtsBool gc_type, prev_pending_gc;
nat i;
#endif
+ if (sched_state == SCHED_SHUTTING_DOWN) {
+ // The final GC has already been done, and the system is
+ // shutting down. We'll probably deadlock if we try to GC
+ // now.
+ return cap;
+ }
+
#ifdef THREADED_RTS
+ if (sched_state < SCHED_INTERRUPTING
+ && RtsFlags.ParFlags.parGcEnabled
+ && N >= RtsFlags.ParFlags.parGcGen
+ && ! oldest_gen->steps[0].mark)
+ {
+ gc_type = PENDING_GC_PAR;
+ } else {
+ gc_type = PENDING_GC_SEQ;
+ }
+
// In order to GC, there must be no threads running Haskell code.
// Therefore, the GC thread needs to hold *all* the capabilities,
// and release them after the GC has completed.
// actually did the GC. But it's quite hard to arrange for all
// the other tasks to sleep and stay asleep.
//
-
- was_waiting = cas(&waiting_for_gc, 0, 1);
- if (was_waiting) {
+
+ /* Other capabilities are prevented from running yet more Haskell
+ threads if waiting_for_gc is set. Tested inside
+ yieldCapability() and releaseCapability() in Capability.c */
+
+ prev_pending_gc = cas(&waiting_for_gc, 0, gc_type);
+ if (prev_pending_gc) {
do {
- debugTrace(DEBUG_sched, "someone else is trying to GC...");
- if (cap) yieldCapability(&cap,task);
+ debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...",
+ prev_pending_gc);
+ ASSERT(cap);
+ yieldCapability(&cap,task);
} while (waiting_for_gc);
return cap; // NOTE: task->cap might have changed here
}
- for (i=0; i < n_capabilities; i++) {
- debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
- if (cap != &capabilities[i]) {
- Capability *pcap = &capabilities[i];
- // we better hope this task doesn't get migrated to
- // another Capability while we're waiting for this one.
- // It won't, because load balancing happens while we have
- // all the Capabilities, but even so it's a slightly
- // unsavoury invariant.
- task->cap = pcap;
- context_switch = 1;
- waitForReturnCapability(&pcap, task);
- if (pcap != &capabilities[i]) {
- barf("scheduleDoGC: got the wrong capability");
- }
- }
+ setContextSwitches();
+
+ // The final shutdown GC is always single-threaded, because it's
+ // possible that some of the Capabilities have no worker threads.
+
+ if (gc_type == PENDING_GC_SEQ)
+ {
+ postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
+ // single-threaded GC: grab all the capabilities
+ for (i=0; i < n_capabilities; i++) {
+ debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
+ if (cap != &capabilities[i]) {
+ Capability *pcap = &capabilities[i];
+ // we better hope this task doesn't get migrated to
+ // another Capability while we're waiting for this one.
+ // It won't, because load balancing happens while we have
+ // all the Capabilities, but even so it's a slightly
+ // unsavoury invariant.
+ task->cap = pcap;
+ waitForReturnCapability(&pcap, task);
+ if (pcap != &capabilities[i]) {
+ barf("scheduleDoGC: got the wrong capability");
+ }
+ }
+ }
}
+ else
+ {
+ // multi-threaded GC: make sure all the Capabilities donate one
+ // GC thread each.
+ postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
+ debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
- waiting_for_gc = rtsFalse;
+ waitForGcThreads(cap);
+ }
#endif
// so this happens periodically:
IF_DEBUG(scheduler, printAllThreads());
+delete_threads_and_gc:
/*
* We now have all the capabilities; if we're in an interrupting
* state, then we should take the opportunity to delete all the
* threads in the system.
*/
- if (sched_state >= SCHED_INTERRUPTING) {
- deleteAllThreads(&capabilities[0]);
+ if (sched_state == SCHED_INTERRUPTING) {
+ deleteAllThreads(cap);
sched_state = SCHED_SHUTTING_DOWN;
}
heap_census = scheduleNeedHeapProfile(rtsTrue);
- /* everybody back, start the GC.
- * Could do it in this thread, or signal a condition var
- * to do it in another thread. Either way, we need to
- * broadcast on gc_pending_cond afterward.
- */
#if defined(THREADED_RTS)
+ postEvent(cap, EVENT_GC_START, 0, 0);
debugTrace(DEBUG_sched, "doing GC");
+ // reset waiting_for_gc *before* GC, so that when the GC threads
+ // emerge they don't immediately re-enter the GC.
+ waiting_for_gc = 0;
+ GarbageCollect(force_major || heap_census, gc_type, cap);
+#else
+ GarbageCollect(force_major || heap_census, 0, cap);
#endif
- GarbageCollect(force_major || heap_census);
-
+ postEvent(cap, EVENT_GC_END, 0, 0);
+
+ if (recent_activity == ACTIVITY_INACTIVE && force_major)
+ {
+ // We are doing a GC because the system has been idle for a
+ // timeslice and we need to check for deadlock. Record the
+ // fact that we've done a GC and turn off the timer signal;
+ // it will get re-enabled if we run any threads after the GC.
+ recent_activity = ACTIVITY_DONE_GC;
+ stopTimer();
+ }
+ else
+ {
+ // the GC might have taken long enough for the timer to set
+ // recent_activity = ACTIVITY_INACTIVE, but we aren't
+ // necessarily deadlocked:
+ recent_activity = ACTIVITY_YES;
+ }
+
+#if defined(THREADED_RTS)
+ if (gc_type == PENDING_GC_PAR)
+ {
+ releaseGCThreads(cap);
+ }
+#endif
+
if (heap_census) {
debugTrace(DEBUG_sched, "performing heap census");
heapCensus();
performHeapProfile = rtsFalse;
}
+ if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
+ // GC set the heap_overflow flag, so we should proceed with
+ // an orderly shutdown now. Ultimately we want the main
+ // thread to return to its caller with HeapExhausted, at which
+ // point the caller should call hs_exit(). The first step is
+ // to delete all the threads.
+ //
+ // Another way to do this would be to raise an exception in
+ // the main thread, which we really should do because it gives
+ // the program a chance to clean up. But how do we find the
+ // main thread? It should presumably be the same one that
+ // gets ^C exceptions, but that's all done on the Haskell side
+ // (GHC.TopHandler).
+ sched_state = SCHED_INTERRUPTING;
+ goto delete_threads_and_gc;
+ }
+
+#ifdef SPARKBALANCE
+ /* JB
+ Once we are all together... this would be the place to balance all
+ spark pools. No concurrent stealing or adding of new sparks can
+ occur. Should be defined in Sparks.c. */
+ balanceSparkPoolsCaps(n_capabilities, capabilities);
+#endif
+
#if defined(THREADED_RTS)
- // release our stash of capabilities.
- for (i = 0; i < n_capabilities; i++) {
- if (cap != &capabilities[i]) {
- task->cap = &capabilities[i];
- releaseCapability(&capabilities[i]);
- }
+ if (gc_type == PENDING_GC_SEQ) {
+ // release our stash of capabilities.
+ for (i = 0; i < n_capabilities; i++) {
+ if (cap != &capabilities[i]) {
+ task->cap = &capabilities[i];
+ releaseCapability(&capabilities[i]);
+ }
+ }
}
if (cap) {
task->cap = cap;
task = cap->running_task;
tso = cap->r.rCurrentTSO;
+ postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL);
debugTrace(DEBUG_sched,
"thread %lu did a safe foreign call",
(unsigned long)cap->r.rCurrentTSO->id);
suspendTask(cap,task);
cap->in_haskell = rtsFalse;
- releaseCapability_(cap);
+ releaseCapability_(cap,rtsFalse);
RELEASE_LOCK(&cap->lock);
tso = task->suspended_tso;
task->suspended_tso = NULL;
tso->_link = END_TSO_QUEUE; // no write barrier reqd
+
+ postEvent(cap, EVENT_RUN_THREAD, tso->id, 0);
debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
if (tso->why_blocked == BlockedOnCCall) {
- awakenBlockedExceptionQueue(cap,tso);
+ // avoid locking the TSO if we don't have to
+ if (tso->blocked_exceptions != END_TSO_QUEUE) {
+ awakenBlockedExceptionQueue(cap,tso);
+ }
tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
}
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
- migrateThreadToCapability_lock(&capabilities[cpu],tso);
+ postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no);
+ wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
}
#else
appendToRunQueue(cap,tso);
* ------------------------------------------------------------------------- */
#if defined(THREADED_RTS)
-void
+void OSThreadProcAttr
workerStart(Task *task)
{
Capability *cap;
cap = task->cap;
RELEASE_LOCK(&task->lock);
+ if (RtsFlags.ParFlags.setAffinity) {
+ setThreadAffinity(cap->no, n_capabilities);
+ }
+
// set the thread-local pointer to the Task:
taskEnter(task);
// schedule() runs without a lock.
cap = schedule(cap,task);
- // On exit from schedule(), we have a Capability.
- releaseCapability(cap);
+ // On exit from schedule(), we have a Capability, but possibly not
+ // the same one we started with.
+
+ // During shutdown, the requirement is that after all the
+ // Capabilities are shut down, all workers that are shutting down
+ // have finished workerTaskStop(). This is why we hold on to
+ // cap->lock until we've finished workerTaskStop() below.
+ //
+ // There may be workers still involved in foreign calls; those
+ // will just block in waitForReturnCapability() because the
+ // Capability has been shut down.
+ //
+ ACQUIRE_LOCK(&cap->lock);
+ releaseCapability_(cap,rtsFalse);
workerTaskStop(task);
+ RELEASE_LOCK(&cap->lock);
}
#endif
blackhole_queue = END_TSO_QUEUE;
- context_switch = 0;
sched_state = SCHED_RUNNING;
recent_activity = ACTIVITY_YES;
}
#endif
- trace(TRACE_sched, "start: %d capabilities", n_capabilities);
-
RELEASE_LOCK(&sched_mutex);
}
{
Task *task = NULL;
-#if defined(THREADED_RTS)
ACQUIRE_LOCK(&sched_mutex);
task = newBoundTask();
RELEASE_LOCK(&sched_mutex);
-#endif
// If we haven't killed all the threads yet, do it now.
if (sched_state < SCHED_SHUTTING_DOWN) {
sched_state = SCHED_INTERRUPTING;
- scheduleDoGC(NULL,task,rtsFalse);
+ waitForReturnCapability(&task->cap,task);
+ scheduleDoGC(task->cap,task,rtsFalse);
+ releaseCapability(task->cap);
}
sched_state = SCHED_SHUTTING_DOWN;
shutdownCapability(&capabilities[i], task, wait_foreign);
}
boundTaskExiting(task);
- stopTaskManager();
}
-#else
- freeCapability(&MainCapability);
#endif
}
void
freeScheduler( void )
{
- freeTaskManager();
- if (n_capabilities != 1) {
- stgFree(capabilities);
+ nat still_running;
+
+ ACQUIRE_LOCK(&sched_mutex);
+ still_running = freeTaskManager();
+ // We can only free the Capabilities if there are no Tasks still
+ // running. We might have a Task about to return from a foreign
+ // call into waitForReturnCapability(), for example (actually,
+ // this should be the *only* thing that a still-running Task can
+ // do at this point, and it will block waiting for the
+ // Capability).
+ if (still_running == 0) {
+ freeCapabilities();
+ if (n_capabilities != 1) {
+ stgFree(capabilities);
+ }
}
+ RELEASE_LOCK(&sched_mutex);
#if defined(THREADED_RTS)
closeMutex(&sched_mutex);
#endif
performGC_(rtsBool force_major)
{
Task *task;
+
// We must grab a new Task here, because the existing Task may be
// associated with a particular Capability, and chained onto the
// suspended_ccalling_tasks queue.
ACQUIRE_LOCK(&sched_mutex);
task = newBoundTask();
RELEASE_LOCK(&sched_mutex);
- scheduleDoGC(NULL,task,force_major);
+
+ waitForReturnCapability(&task->cap,task);
+ scheduleDoGC(task->cap,task,force_major);
+ releaseCapability(task->cap);
boundTaskExiting(task);
}
}
/* Try to double the current stack size. If that takes us over the
- * maximum stack size for this thread, then use the maximum instead.
- * Finally round up so the TSO ends up as a whole number of blocks.
+ * maximum stack size for this thread, then use the maximum instead
+ * (that is, unless we're already at or over the max size and we
+ * can't raise the StackOverflow exception (see above), in which
+ * case just double the size). Finally round up so the TSO ends up as
+ * a whole number of blocks.
*/
- new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+ if (tso->stack_size >= tso->max_stack_size) {
+ new_stack_size = tso->stack_size * 2;
+ } else {
+ new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+ }
new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
TSO_STRUCT_SIZE)/sizeof(W_);
new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
{
bdescr *bd, *new_bd;
- lnat new_tso_size_w, tso_size_w;
+ lnat free_w, tso_size_w;
StgTSO *new_tso;
tso_size_w = tso_sizeW(tso);
// while we are moving the TSO:
lockClosure((StgClosure *)tso);
- new_tso_size_w = round_to_mblocks(tso_size_w/2);
-
- debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
- tso->id, tso_size_w, new_tso_size_w);
+ // this is the number of words we'll free
+ free_w = round_to_mblocks(tso_size_w/2);
bd = Bdescr((StgPtr)tso);
- new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W);
- new_bd->free = bd->free;
+ new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
bd->free = bd->start + TSO_STRUCT_SIZEW;
new_tso = (StgTSO *)new_bd->start;
memcpy(new_tso,tso,TSO_STRUCT_SIZE);
- new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW;
+ new_tso->stack_size = new_bd->free - new_tso->stack;
+
+ debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
+ (long)tso->id, tso_size_w, tso_sizeW(new_tso));
tso->what_next = ThreadRelocated;
tso->_link = new_tso; // no write barrier reqd: same generation
interruptStgRts(void)
{
sched_state = SCHED_INTERRUPTING;
- context_switch = 1;
+ setContextSwitches();
wakeUpRts();
}
if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
IF_DEBUG(sanity,checkTSO(t));
t = unblockOne(cap, t);
- // urk, the threads migrate to the current capability
- // here, but we'd like to keep them on the original one.
*prev = t;
any_woke_up = rtsTrue;
} else {