X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=978adb89c8d7d9c050499a032d812122e2a770be;hb=fbc3fc411a2b619f638612dcaf322983c7a403c3;hp=33715b1ecd55ef2baac3818b59c7c502989ecaf2;hpb=5a5acb3698aa4ffdd738c301fa722afe12a1f3de;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 33715b1..978adb8 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -32,6 +32,7 @@ #include "Proftimer.h" #include "ProfHeap.h" #include "GC.h" +#include "Weak.h" /* PARALLEL_HASKELL includes go here */ @@ -281,6 +282,12 @@ schedule (Capability *initialCapability, Task *task) "### NEW SCHEDULER LOOP (task: %p, cap: %p)", task, initialCapability); + if (running_finalizers) { + errorBelch("error: a C finalizer called back into Haskell.\n" + " use Foreign.Concurrent.newForeignPtr for Haskell finalizers."); + stg_exit(EXIT_FAILURE); + } + schedulePreLoop(); // ----------------------------------------------------------- @@ -777,9 +784,11 @@ schedulePushWork(Capability *cap USED_IF_THREADS, // Check whether we have more threads on our run queue, or sparks // in our pool, that we could hand to another Capability. - if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE) - && sparkPoolSizeCap(cap) < 2) { - return; + if (cap->run_queue_hd == END_TSO_QUEUE) { + if (sparkPoolSizeCap(cap) < 2) return; + } else { + if (cap->run_queue_hd->_link == END_TSO_QUEUE && + sparkPoolSizeCap(cap) < 1) return; } // First grab as many free Capabilities as we can. @@ -1400,6 +1409,12 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", (unsigned long)t->id, whatNext_strs[t->what_next]); + // blocked exceptions can now complete, even if the thread was in + // blocked mode (see #2910). This unconditionally calls + // lockTSO(), which ensures that we don't miss any threads that + // are engaged in throwTo() with this thread as a target. + awakenBlockedExceptionQueue (cap, t); + // // Check whether the thread that just completed was a bound // thread, and if so return with the result. @@ -1590,6 +1605,21 @@ delete_threads_and_gc: heap_census = scheduleNeedHeapProfile(rtsTrue); + if (recent_activity == ACTIVITY_INACTIVE && force_major) + { + // We are doing a GC because the system has been idle for a + // timeslice and we need to check for deadlock. Record the + // fact that we've done a GC and turn off the timer signal; + // it will get re-enabled if we run any threads after the GC. + // + // Note: this is done before GC, because after GC there might + // be threads already running (GarbageCollect() releases the + // GC threads when it completes), so we risk turning off the + // timer signal when it should really be on. + recent_activity = ACTIVITY_DONE_GC; + stopTimer(); + } + #if defined(THREADED_RTS) debugTrace(DEBUG_sched, "doing GC"); // reset waiting_for_gc *before* GC, so that when the GC threads @@ -1631,16 +1661,6 @@ delete_threads_and_gc: balanceSparkPoolsCaps(n_capabilities, capabilities); #endif - if (force_major) - { - // We've just done a major GC and we don't need the timer - // signal turned on any more (#1623). - // NB. do this *before* releasing the Capabilities, to avoid - // deadlocks! - recent_activity = ACTIVITY_DONE_GC; - stopTimer(); - } - #if defined(THREADED_RTS) if (gc_type == PENDING_GC_SEQ) { // release our stash of capabilities. @@ -1974,7 +1994,10 @@ resumeThread (void *task_) debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id); if (tso->why_blocked == BlockedOnCCall) { - awakenBlockedExceptionQueue(cap,tso); + // avoid locking the TSO if we don't have to + if (tso->blocked_exceptions != END_TSO_QUEUE) { + awakenBlockedExceptionQueue(cap,tso); + } tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE); }