X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=040d16f25afd0eb7cbc70ef6aa947bf3a2c60fcc;hb=304e7fb703e7afddc1ef9be6aab6505e36b63b06;hp=d22d48fb8f2b4e4b0d52092029d9b55cf551ad7e;hpb=ea1ad23f58b5e45731b47a1d686c0dd73766e1b7;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index d22d48f..040d16f 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -32,6 +32,7 @@ #include "Proftimer.h" #include "ProfHeap.h" #include "GC.h" +#include "Weak.h" /* PARALLEL_HASKELL includes go here */ @@ -281,6 +282,12 @@ schedule (Capability *initialCapability, Task *task) "### NEW SCHEDULER LOOP (task: %p, cap: %p)", task, initialCapability); + if (running_finalizers) { + errorBelch("error: a C finalizer called back into Haskell.\n" + " use Foreign.Concurrent.newForeignPtr for Haskell finalizers."); + stg_exit(EXIT_FAILURE); + } + schedulePreLoop(); // ----------------------------------------------------------- @@ -737,6 +744,7 @@ scheduleYield (Capability **pcap, Task *task) // if we have work, and we don't need to give up the Capability, continue. if (!shouldYieldCapability(cap,task) && (!emptyRunQueue(cap) || + !emptyWakeupQueue(cap) || blackholes_need_checking || sched_state >= SCHED_INTERRUPTING)) return; @@ -1260,7 +1268,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) "--<< thread %ld (%s) stopped: HeapOverflow", (long)t->id, whatNext_strs[t->what_next]); - if (cap->context_switch) { + if (cap->r.rHpLim == NULL || cap->context_switch) { // Sometimes we miss a context switch, e.g. when calling // primitives in a tight loop, MAYBE_GC() doesn't check the // context switch flag, and we end up waiting for a GC. @@ -1598,29 +1606,38 @@ delete_threads_and_gc: heap_census = scheduleNeedHeapProfile(rtsTrue); +#if defined(THREADED_RTS) + debugTrace(DEBUG_sched, "doing GC"); + // reset waiting_for_gc *before* GC, so that when the GC threads + // emerge they don't immediately re-enter the GC. + waiting_for_gc = 0; + GarbageCollect(force_major || heap_census, gc_type, cap); +#else + GarbageCollect(force_major || heap_census, 0, cap); +#endif + if (recent_activity == ACTIVITY_INACTIVE && force_major) { // We are doing a GC because the system has been idle for a // timeslice and we need to check for deadlock. Record the // fact that we've done a GC and turn off the timer signal; // it will get re-enabled if we run any threads after the GC. - // - // Note: this is done before GC, because after GC there might - // be threads already running (GarbageCollect() releases the - // GC threads when it completes), so we risk turning off the - // timer signal when it should really be on. recent_activity = ACTIVITY_DONE_GC; stopTimer(); } + else + { + // the GC might have taken long enough for the timer to set + // recent_activity = ACTIVITY_INACTIVE, but we aren't + // necessarily deadlocked: + recent_activity = ACTIVITY_YES; + } #if defined(THREADED_RTS) - debugTrace(DEBUG_sched, "doing GC"); - // reset waiting_for_gc *before* GC, so that when the GC threads - // emerge they don't immediately re-enter the GC. - waiting_for_gc = 0; - GarbageCollect(force_major || heap_census, gc_type, cap); -#else - GarbageCollect(force_major || heap_census, 0, cap); + if (gc_type == PENDING_GC_PAR) + { + releaseGCThreads(cap); + } #endif if (heap_census) { @@ -2195,22 +2212,16 @@ exitScheduler( { Task *task = NULL; -#if defined(THREADED_RTS) ACQUIRE_LOCK(&sched_mutex); task = newBoundTask(); RELEASE_LOCK(&sched_mutex); -#endif // If we haven't killed all the threads yet, do it now. if (sched_state < SCHED_SHUTTING_DOWN) { sched_state = SCHED_INTERRUPTING; -#if defined(THREADED_RTS) waitForReturnCapability(&task->cap,task); scheduleDoGC(task->cap,task,rtsFalse); releaseCapability(task->cap); -#else - scheduleDoGC(&MainCapability,task,rtsFalse); -#endif } sched_state = SCHED_SHUTTING_DOWN;