X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=97923022b44b44f727bc59a62b118f989d5332ac;hb=de75026f5a48d3d052135a973ab4dff76c5b20f5;hp=7dd063423fe3e123770291254b2efba3ed874b55;hpb=117dc73b8a8f6a4184a825a96c372480377f4680;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 7dd0634..9792302 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -31,6 +31,9 @@ #include "Updates.h" #include "Proftimer.h" #include "ProfHeap.h" +#include "GC.h" +#include "Weak.h" +#include "EventLog.h" /* PARALLEL_HASKELL includes go here */ @@ -89,6 +92,12 @@ StgTSO *blackhole_queue = NULL; */ rtsBool blackholes_need_checking = rtsFalse; +/* Set to true when the latest garbage collection failed to reclaim + * enough space, and the runtime should proceed to shut itself down in + * an orderly fashion (emitting profiling info etc.) + */ +rtsBool heap_overflow = rtsFalse; + /* flag that tracks whether we have done any execution in this time slice. * LOCK: currently none, perhaps we should lock (but needs to be * updated in the fast path of the scheduler). @@ -274,6 +283,14 @@ schedule (Capability *initialCapability, Task *task) "### NEW SCHEDULER LOOP (task: %p, cap: %p)", task, initialCapability); + if (running_finalizers) { + errorBelch("error: a C finalizer called back into Haskell.\n" + " This was previously allowed, but is disallowed in GHC 6.10.2 and later.\n" + " To create finalizers that may call back into Haskll, use\n" + " Foreign.Concurrent.newForeignPtr instead of Foreign.newForeignPtr."); + stg_exit(EXIT_FAILURE); + } + schedulePreLoop(); // ----------------------------------------------------------- @@ -525,6 +542,8 @@ run_thread: } #endif + postEvent(cap, EVENT_RUN_THREAD, t->id, 0); + switch (prev_what_next) { case ThreadKilled: @@ -573,6 +592,8 @@ run_thread: t->saved_winerror = GetLastError(); #endif + postEvent (cap, EVENT_STOP_THREAD, t->id, ret); + #if defined(THREADED_RTS) // If ret is ThreadBlocked, and this Task is bound to the TSO that // blocked, we are in limbo - the TSO is now owned by whatever it @@ -730,6 +751,7 @@ scheduleYield (Capability **pcap, Task *task) // if we have work, and we don't need to give up the Capability, continue. if (!shouldYieldCapability(cap,task) && (!emptyRunQueue(cap) || + !emptyWakeupQueue(cap) || blackholes_need_checking || sched_state >= SCHED_INTERRUPTING)) return; @@ -770,9 +792,11 @@ schedulePushWork(Capability *cap USED_IF_THREADS, // Check whether we have more threads on our run queue, or sparks // in our pool, that we could hand to another Capability. - if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE) - && sparkPoolSizeCap(cap) < 2) { - return; + if (cap->run_queue_hd == END_TSO_QUEUE) { + if (sparkPoolSizeCap(cap) < 2) return; + } else { + if (cap->run_queue_hd->_link == END_TSO_QUEUE && + sparkPoolSizeCap(cap) < 1) return; } // First grab as many free Capabilities as we can. @@ -835,6 +859,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS, } else { debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); + + postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no); + if (t->bound) { t->bound->cap = free_caps[i]; } t->cap = free_caps[i]; i++; @@ -856,6 +883,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS, spark = tryStealSpark(cap->sparks); if (spark != NULL) { debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no); + + postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no); + newSpark(&(free_caps[i]->r), spark); } } @@ -1251,7 +1281,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) "--<< thread %ld (%s) stopped: HeapOverflow", (long)t->id, whatNext_strs[t->what_next]); - if (cap->context_switch) { + if (cap->r.rHpLim == NULL || cap->context_switch) { // Sometimes we miss a context switch, e.g. when calling // primitives in a tight loop, MAYBE_GC() doesn't check the // context switch flag, and we end up waiting for a GC. @@ -1393,6 +1423,12 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", (unsigned long)t->id, whatNext_strs[t->what_next]); + // blocked exceptions can now complete, even if the thread was in + // blocked mode (see #2910). This unconditionally calls + // lockTSO(), which ensures that we don't miss any threads that + // are engaged in throwTo() with this thread as a target. + awakenBlockedExceptionQueue (cap, t); + // // Check whether the thread that just completed was a bound // thread, and if so return with the result. @@ -1435,7 +1471,11 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) *(task->ret) = NULL; } if (sched_state >= SCHED_INTERRUPTING) { - task->stat = Interrupted; + if (heap_overflow) { + task->stat = HeapExhausted; + } else { + task->stat = Interrupted; + } } else { task->stat = Killed; } @@ -1478,7 +1518,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) #ifdef THREADED_RTS /* extern static volatile StgWord waiting_for_gc; lives inside capability.c */ - rtsBool was_waiting; + rtsBool gc_type, prev_pending_gc; nat i; #endif @@ -1490,6 +1530,16 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) } #ifdef THREADED_RTS + if (sched_state < SCHED_INTERRUPTING + && RtsFlags.ParFlags.parGcEnabled + && N >= RtsFlags.ParFlags.parGcGen + && ! oldest_gen->steps[0].mark) + { + gc_type = PENDING_GC_PAR; + } else { + gc_type = PENDING_GC_SEQ; + } + // In order to GC, there must be no threads running Haskell code. // Therefore, the GC thread needs to hold *all* the capabilities, // and release them after the GC has completed. @@ -1500,39 +1550,57 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) // actually did the GC. But it's quite hard to arrange for all // the other tasks to sleep and stay asleep. // - + /* Other capabilities are prevented from running yet more Haskell threads if waiting_for_gc is set. Tested inside yieldCapability() and releaseCapability() in Capability.c */ - was_waiting = cas(&waiting_for_gc, 0, 1); - if (was_waiting) { + prev_pending_gc = cas(&waiting_for_gc, 0, gc_type); + if (prev_pending_gc) { do { - debugTrace(DEBUG_sched, "someone else is trying to GC..."); - if (cap) yieldCapability(&cap,task); + debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...", + prev_pending_gc); + ASSERT(cap); + yieldCapability(&cap,task); } while (waiting_for_gc); return cap; // NOTE: task->cap might have changed here } setContextSwitches(); - for (i=0; i < n_capabilities; i++) { - debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities); - if (cap != &capabilities[i]) { - Capability *pcap = &capabilities[i]; - // we better hope this task doesn't get migrated to - // another Capability while we're waiting for this one. - // It won't, because load balancing happens while we have - // all the Capabilities, but even so it's a slightly - // unsavoury invariant. - task->cap = pcap; - waitForReturnCapability(&pcap, task); - if (pcap != &capabilities[i]) { - barf("scheduleDoGC: got the wrong capability"); - } - } + + // The final shutdown GC is always single-threaded, because it's + // possible that some of the Capabilities have no worker threads. + + if (gc_type == PENDING_GC_SEQ) + { + postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0); + // single-threaded GC: grab all the capabilities + for (i=0; i < n_capabilities; i++) { + debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities); + if (cap != &capabilities[i]) { + Capability *pcap = &capabilities[i]; + // we better hope this task doesn't get migrated to + // another Capability while we're waiting for this one. + // It won't, because load balancing happens while we have + // all the Capabilities, but even so it's a slightly + // unsavoury invariant. + task->cap = pcap; + waitForReturnCapability(&pcap, task); + if (pcap != &capabilities[i]) { + barf("scheduleDoGC: got the wrong capability"); + } + } + } } + else + { + // multi-threaded GC: make sure all the Capabilities donate one + // GC thread each. + postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0); + debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads"); - waiting_for_gc = rtsFalse; + waitForGcThreads(cap); + } #endif // so this happens periodically: @@ -1540,34 +1608,78 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) IF_DEBUG(scheduler, printAllThreads()); +delete_threads_and_gc: /* * We now have all the capabilities; if we're in an interrupting * state, then we should take the opportunity to delete all the * threads in the system. */ - if (sched_state >= SCHED_INTERRUPTING) { - deleteAllThreads(&capabilities[0]); + if (sched_state == SCHED_INTERRUPTING) { + deleteAllThreads(cap); sched_state = SCHED_SHUTTING_DOWN; } heap_census = scheduleNeedHeapProfile(rtsTrue); - /* everybody back, start the GC. - * Could do it in this thread, or signal a condition var - * to do it in another thread. Either way, we need to - * broadcast on gc_pending_cond afterward. - */ #if defined(THREADED_RTS) + postEvent(cap, EVENT_GC_START, 0, 0); debugTrace(DEBUG_sched, "doing GC"); + // reset waiting_for_gc *before* GC, so that when the GC threads + // emerge they don't immediately re-enter the GC. + waiting_for_gc = 0; + GarbageCollect(force_major || heap_census, gc_type, cap); +#else + GarbageCollect(force_major || heap_census, 0, cap); #endif - GarbageCollect(force_major || heap_census); - + postEvent(cap, EVENT_GC_END, 0, 0); + + if (recent_activity == ACTIVITY_INACTIVE && force_major) + { + // We are doing a GC because the system has been idle for a + // timeslice and we need to check for deadlock. Record the + // fact that we've done a GC and turn off the timer signal; + // it will get re-enabled if we run any threads after the GC. + recent_activity = ACTIVITY_DONE_GC; + stopTimer(); + } + else + { + // the GC might have taken long enough for the timer to set + // recent_activity = ACTIVITY_INACTIVE, but we aren't + // necessarily deadlocked: + recent_activity = ACTIVITY_YES; + } + +#if defined(THREADED_RTS) + if (gc_type == PENDING_GC_PAR) + { + releaseGCThreads(cap); + } +#endif + if (heap_census) { debugTrace(DEBUG_sched, "performing heap census"); heapCensus(); performHeapProfile = rtsFalse; } + if (heap_overflow && sched_state < SCHED_INTERRUPTING) { + // GC set the heap_overflow flag, so we should proceed with + // an orderly shutdown now. Ultimately we want the main + // thread to return to its caller with HeapExhausted, at which + // point the caller should call hs_exit(). The first step is + // to delete all the threads. + // + // Another way to do this would be to raise an exception in + // the main thread, which we really should do because it gives + // the program a chance to clean up. But how do we find the + // main thread? It should presumably be the same one that + // gets ^C exceptions, but that's all done on the Haskell side + // (GHC.TopHandler). + sched_state = SCHED_INTERRUPTING; + goto delete_threads_and_gc; + } + #ifdef SPARKBALANCE /* JB Once we are all together... this would be the place to balance all @@ -1576,23 +1688,15 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) balanceSparkPoolsCaps(n_capabilities, capabilities); #endif - if (force_major) - { - // We've just done a major GC and we don't need the timer - // signal turned on any more (#1623). - // NB. do this *before* releasing the Capabilities, to avoid - // deadlocks! - recent_activity = ACTIVITY_DONE_GC; - stopTimer(); - } - #if defined(THREADED_RTS) - // release our stash of capabilities. - for (i = 0; i < n_capabilities; i++) { - if (cap != &capabilities[i]) { - task->cap = &capabilities[i]; - releaseCapability(&capabilities[i]); - } + if (gc_type == PENDING_GC_SEQ) { + // release our stash of capabilities. + for (i = 0; i < n_capabilities; i++) { + if (cap != &capabilities[i]) { + task->cap = &capabilities[i]; + releaseCapability(&capabilities[i]); + } + } } if (cap) { task->cap = cap; @@ -1843,6 +1947,7 @@ suspendThread (StgRegTable *reg) task = cap->running_task; tso = cap->r.rCurrentTSO; + postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL); debugTrace(DEBUG_sched, "thread %lu did a safe foreign call", (unsigned long)cap->r.rCurrentTSO->id); @@ -1914,10 +2019,15 @@ resumeThread (void *task_) tso = task->suspended_tso; task->suspended_tso = NULL; tso->_link = END_TSO_QUEUE; // no write barrier reqd + + postEvent(cap, EVENT_RUN_THREAD, tso->id, 0); debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id); if (tso->why_blocked == BlockedOnCCall) { - awakenBlockedExceptionQueue(cap,tso); + // avoid locking the TSO if we don't have to + if (tso->blocked_exceptions != END_TSO_QUEUE) { + awakenBlockedExceptionQueue(cap,tso); + } tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE); } @@ -1967,6 +2077,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { + postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no); wakeupThreadOnCapability(cap, &capabilities[cpu], tso); } #else @@ -2019,6 +2130,10 @@ workerStart(Task *task) cap = task->cap; RELEASE_LOCK(&task->lock); + if (RtsFlags.ParFlags.setAffinity) { + setThreadAffinity(cap->no, n_capabilities); + } + // set the thread-local pointer to the Task: taskEnter(task); @@ -2106,8 +2221,6 @@ initScheduler(void) } #endif - trace(TRACE_sched, "start: %d capabilities", n_capabilities); - RELEASE_LOCK(&sched_mutex); } @@ -2122,16 +2235,16 @@ exitScheduler( { Task *task = NULL; -#if defined(THREADED_RTS) ACQUIRE_LOCK(&sched_mutex); task = newBoundTask(); RELEASE_LOCK(&sched_mutex); -#endif // If we haven't killed all the threads yet, do it now. if (sched_state < SCHED_SHUTTING_DOWN) { sched_state = SCHED_INTERRUPTING; - scheduleDoGC(NULL,task,rtsFalse); + waitForReturnCapability(&task->cap,task); + scheduleDoGC(task->cap,task,rtsFalse); + releaseCapability(task->cap); } sched_state = SCHED_SHUTTING_DOWN; @@ -2184,13 +2297,17 @@ static void performGC_(rtsBool force_major) { Task *task; + // We must grab a new Task here, because the existing Task may be // associated with a particular Capability, and chained onto the // suspended_ccalling_tasks queue. ACQUIRE_LOCK(&sched_mutex); task = newBoundTask(); RELEASE_LOCK(&sched_mutex); - scheduleDoGC(NULL,task,force_major); + + waitForReturnCapability(&task->cap,task); + scheduleDoGC(task->cap,task,force_major); + releaseCapability(task->cap); boundTaskExiting(task); } @@ -2432,6 +2549,10 @@ checkBlackHoles (Capability *cap) prev = &blackhole_queue; t = blackhole_queue; while (t != END_TSO_QUEUE) { + if (t->what_next == ThreadRelocated) { + t = t->_link; + continue; + } ASSERT(t->why_blocked == BlockedOnBlackHole); type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type; if (type != BLACKHOLE && type != CAF_BLACKHOLE) {