X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=66af8be7cb5b6e02c03700102d102cd10b639aa3;hb=0a5613f40b0e32cf59966e6b56b807cdbe80aa7b;hp=72086e83d45fccd94db8e915186fb2587997968e;hpb=5308c0423b613e0d6a22996e2052731f0a371475;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 72086e8..66af8be 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -709,7 +709,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, Capability *free_caps[n_capabilities], *cap0; nat i, n_free_caps; - // migration can be turned off with +RTS -qg + // migration can be turned off with +RTS -qm if (!RtsFlags.ParFlags.migrate) return; // Check whether we have more threads on our run queue, or sparks @@ -779,7 +779,6 @@ schedulePushWork(Capability *cap USED_IF_THREADS, setTSOLink(cap, prev, t); prev = t; } else { - debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); traceEventMigrateThread (cap, t, free_caps[i]->no); @@ -1189,36 +1188,32 @@ scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t) static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) { - // Reset the context switch flag. We don't do this just before - // running the thread, because that would mean we would lose ticks - // during GC, which can lead to unfair scheduling (a thread hogs - // the CPU because the tick always arrives during GC). This way - // penalises threads that do a lot of allocation, but that seems - // better than the alternative. - cap->context_switch = 0; - /* put the thread back on the run queue. Then, if we're ready to * GC, check whether this is the last task to stop. If so, wake * up the GC thread. getThread will block during a GC until the * GC is finished. */ -#ifdef DEBUG - if (t->what_next != prev_what_next) { - debugTrace(DEBUG_sched, - "--<< thread %ld (%s) stopped to switch evaluators", - (long)t->id, what_next_strs[t->what_next]); - } -#endif - + ASSERT(t->_link == END_TSO_QUEUE); // Shortcut if we're just switching evaluators: don't bother // doing stack squeezing (which can be expensive), just run the // thread. - if (t->what_next != prev_what_next) { + if (cap->context_switch == 0 && t->what_next != prev_what_next) { + debugTrace(DEBUG_sched, + "--<< thread %ld (%s) stopped to switch evaluators", + (long)t->id, what_next_strs[t->what_next]); return rtsTrue; } + // Reset the context switch flag. We don't do this just before + // running the thread, because that would mean we would lose ticks + // during GC, which can lead to unfair scheduling (a thread hogs + // the CPU because the tick always arrives during GC). This way + // penalises threads that do a lot of allocation, but that seems + // better than the alternative. + cap->context_switch = 0; + IF_DEBUG(sanity, //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id); checkTSO(t)); @@ -1332,6 +1327,17 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) #ifdef DEBUG removeThreadLabel((StgWord)task->tso->id); #endif + + // We no longer consider this thread and task to be bound to + // each other. The TSO lives on until it is GC'd, but the + // task is about to be released by the caller, and we don't + // want anyone following the pointer from the TSO to the + // defunct task (which might have already been + // re-used). This was a real bug: the GC updated + // tso->bound->tso which lead to a deadlock. + t->bound = NULL; + task->tso = NULL; + return rtsTrue; // tells schedule() to return } @@ -1941,6 +1947,7 @@ Capability * scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) { Task *task; + StgThreadID id; // We already created/initialised the Task task = cap->running_task; @@ -1956,14 +1963,15 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) appendToRunQueue(cap,tso); - debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id); + id = tso->id; + debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)id); cap = schedule(cap,task); ASSERT(task->stat != NoStatus); ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); - debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id); + debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)id); return cap; } @@ -2093,7 +2101,8 @@ exitScheduler( if (sched_state < SCHED_SHUTTING_DOWN) { sched_state = SCHED_INTERRUPTING; waitForReturnCapability(&task->cap,task); - scheduleDoGC(task->cap,task,rtsFalse); + scheduleDoGC(task->cap,task,rtsFalse); + ASSERT(task->tso == NULL); releaseCapability(task->cap); } sched_state = SCHED_SHUTTING_DOWN; @@ -2103,6 +2112,7 @@ exitScheduler( nat i; for (i = 0; i < n_capabilities; i++) { + ASSERT(task->tso == NULL); shutdownCapability(&capabilities[i], task, wait_foreign); } } @@ -2550,7 +2560,8 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception) SET_HDR(raise_closure, &stg_raise_info, CCCS); raise_closure->payload[0] = exception; } - UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure); + UPD_IND(cap, ((StgUpdateFrame *)p)->updatee, + (StgClosure *)raise_closure); p = next; continue;