X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=97923022b44b44f727bc59a62b118f989d5332ac;hb=de75026f5a48d3d052135a973ab4dff76c5b20f5;hp=040d16f25afd0eb7cbc70ef6aa947bf3a2c60fcc;hpb=304e7fb703e7afddc1ef9be6aab6505e36b63b06;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 040d16f..9792302 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -33,6 +33,7 @@ #include "ProfHeap.h" #include "GC.h" #include "Weak.h" +#include "EventLog.h" /* PARALLEL_HASKELL includes go here */ @@ -284,7 +285,9 @@ schedule (Capability *initialCapability, Task *task) if (running_finalizers) { errorBelch("error: a C finalizer called back into Haskell.\n" - " use Foreign.Concurrent.newForeignPtr for Haskell finalizers."); + " This was previously allowed, but is disallowed in GHC 6.10.2 and later.\n" + " To create finalizers that may call back into Haskll, use\n" + " Foreign.Concurrent.newForeignPtr instead of Foreign.newForeignPtr."); stg_exit(EXIT_FAILURE); } @@ -539,6 +542,8 @@ run_thread: } #endif + postEvent(cap, EVENT_RUN_THREAD, t->id, 0); + switch (prev_what_next) { case ThreadKilled: @@ -587,6 +592,8 @@ run_thread: t->saved_winerror = GetLastError(); #endif + postEvent (cap, EVENT_STOP_THREAD, t->id, ret); + #if defined(THREADED_RTS) // If ret is ThreadBlocked, and this Task is bound to the TSO that // blocked, we are in limbo - the TSO is now owned by whatever it @@ -852,6 +859,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS, } else { debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); + + postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no); + if (t->bound) { t->bound->cap = free_caps[i]; } t->cap = free_caps[i]; i++; @@ -873,6 +883,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS, spark = tryStealSpark(cap->sparks); if (spark != NULL) { debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no); + + postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no); + newSpark(&(free_caps[i]->r), spark); } } @@ -1560,6 +1573,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) if (gc_type == PENDING_GC_SEQ) { + postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0); // single-threaded GC: grab all the capabilities for (i=0; i < n_capabilities; i++) { debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities); @@ -1582,6 +1596,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) { // multi-threaded GC: make sure all the Capabilities donate one // GC thread each. + postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0); debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads"); waitForGcThreads(cap); @@ -1607,6 +1622,7 @@ delete_threads_and_gc: heap_census = scheduleNeedHeapProfile(rtsTrue); #if defined(THREADED_RTS) + postEvent(cap, EVENT_GC_START, 0, 0); debugTrace(DEBUG_sched, "doing GC"); // reset waiting_for_gc *before* GC, so that when the GC threads // emerge they don't immediately re-enter the GC. @@ -1615,6 +1631,7 @@ delete_threads_and_gc: #else GarbageCollect(force_major || heap_census, 0, cap); #endif + postEvent(cap, EVENT_GC_END, 0, 0); if (recent_activity == ACTIVITY_INACTIVE && force_major) { @@ -1930,6 +1947,7 @@ suspendThread (StgRegTable *reg) task = cap->running_task; tso = cap->r.rCurrentTSO; + postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL); debugTrace(DEBUG_sched, "thread %lu did a safe foreign call", (unsigned long)cap->r.rCurrentTSO->id); @@ -2001,6 +2019,8 @@ resumeThread (void *task_) tso = task->suspended_tso; task->suspended_tso = NULL; tso->_link = END_TSO_QUEUE; // no write barrier reqd + + postEvent(cap, EVENT_RUN_THREAD, tso->id, 0); debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id); if (tso->why_blocked == BlockedOnCCall) { @@ -2057,6 +2077,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { + postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no); wakeupThreadOnCapability(cap, &capabilities[cpu], tso); } #else @@ -2109,6 +2130,10 @@ workerStart(Task *task) cap = task->cap; RELEASE_LOCK(&task->lock); + if (RtsFlags.ParFlags.setAffinity) { + setThreadAffinity(cap->no, n_capabilities); + } + // set the thread-local pointer to the Task: taskEnter(task); @@ -2196,8 +2221,6 @@ initScheduler(void) } #endif - trace(TRACE_sched, "start: %d capabilities", n_capabilities); - RELEASE_LOCK(&sched_mutex); } @@ -2526,6 +2549,10 @@ checkBlackHoles (Capability *cap) prev = &blackhole_queue; t = blackhole_queue; while (t != END_TSO_QUEUE) { + if (t->what_next == ThreadRelocated) { + t = t->_link; + continue; + } ASSERT(t->why_blocked == BlockedOnBlackHole); type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type; if (type != BLACKHOLE && type != CAF_BLACKHOLE) {