if (prev == ACTIVITY_DONE_GC) {
startTimer();
}
- } else {
+ } else if (recent_activity != ACTIVITY_INACTIVE) {
+ // If we reached ACTIVITY_INACTIVE, then don't reset it until
+ // we've done the GC. The thread running here might just be
+ // the IO manager thread that handle_tick() woke up via
+ // wakeUpRts().
recent_activity = ACTIVITY_YES;
}
#endif
- traceSchedEvent(cap, EVENT_RUN_THREAD, t, 0);
+ traceEventRunThread(cap, t);
switch (prev_what_next) {
t->saved_winerror = GetLastError();
#endif
- traceSchedEvent (cap, EVENT_STOP_THREAD, t, ret);
+ traceEventStopThread(cap, t, ret);
#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
Capability *free_caps[n_capabilities], *cap0;
nat i, n_free_caps;
- // migration can be turned off with +RTS -qg
+ // migration can be turned off with +RTS -qm
if (!RtsFlags.ParFlags.migrate) return;
// Check whether we have more threads on our run queue, or sparks
setTSOLink(cap, prev, t);
prev = t;
} else {
- debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
- traceSchedEvent (cap, EVENT_MIGRATE_THREAD, t, free_caps[i]->no);
+ traceEventMigrateThread (cap, t, free_caps[i]->no);
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
if (spark != NULL) {
debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
- traceSchedEvent(free_caps[i], EVENT_STEAL_SPARK, t, cap->no);
+ traceEventStealSpark(free_caps[i], t, cap->no);
newSpark(&(free_caps[i]->r), spark);
}
static rtsBool
scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
{
- // Reset the context switch flag. We don't do this just before
- // running the thread, because that would mean we would lose ticks
- // during GC, which can lead to unfair scheduling (a thread hogs
- // the CPU because the tick always arrives during GC). This way
- // penalises threads that do a lot of allocation, but that seems
- // better than the alternative.
- cap->context_switch = 0;
-
/* put the thread back on the run queue. Then, if we're ready to
* GC, check whether this is the last task to stop. If so, wake
* up the GC thread. getThread will block during a GC until the
* GC is finished.
*/
-#ifdef DEBUG
- if (t->what_next != prev_what_next) {
- debugTrace(DEBUG_sched,
- "--<< thread %ld (%s) stopped to switch evaluators",
- (long)t->id, what_next_strs[t->what_next]);
- }
-#endif
-
+
ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
// doing stack squeezing (which can be expensive), just run the
// thread.
- if (t->what_next != prev_what_next) {
+ if (cap->context_switch == 0 && t->what_next != prev_what_next) {
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped to switch evaluators",
+ (long)t->id, what_next_strs[t->what_next]);
return rtsTrue;
}
+ // Reset the context switch flag. We don't do this just before
+ // running the thread, because that would mean we would lose ticks
+ // during GC, which can lead to unfair scheduling (a thread hogs
+ // the CPU because the tick always arrives during GC). This way
+ // penalises threads that do a lot of allocation, but that seems
+ // better than the alternative.
+ cap->context_switch = 0;
+
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
#ifdef DEBUG
removeThreadLabel((StgWord)task->tso->id);
#endif
+
+ // We no longer consider this thread and task to be bound to
+ // each other. The TSO lives on until it is GC'd, but the
+ // task is about to be released by the caller, and we don't
+ // want anyone following the pointer from the TSO to the
+ // defunct task (which might have already been
+ // re-used). This was a real bug: the GC updated
+ // tso->bound->tso which lead to a deadlock.
+ t->bound = NULL;
+ task->tso = NULL;
+
return rtsTrue; // tells schedule() to return
}
if (gc_type == PENDING_GC_SEQ)
{
- traceSchedEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
+ traceEventRequestSeqGc(cap);
}
else
{
- traceSchedEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
+ traceEventRequestParGc(cap);
debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
}
heap_census = scheduleNeedHeapProfile(rtsTrue);
+ traceEventGcStart(cap);
#if defined(THREADED_RTS)
- traceSchedEvent(cap, EVENT_GC_START, 0, 0);
// reset waiting_for_gc *before* GC, so that when the GC threads
// emerge they don't immediately re-enter the GC.
waiting_for_gc = 0;
#else
GarbageCollect(force_major || heap_census, 0, cap);
#endif
- traceSchedEvent(cap, EVENT_GC_END, 0, 0);
+ traceEventGcEnd(cap);
if (recent_activity == ACTIVITY_INACTIVE && force_major)
{
task = cap->running_task;
tso = cap->r.rCurrentTSO;
- traceSchedEvent(cap, EVENT_STOP_THREAD, tso, THREAD_SUSPENDED_FOREIGN_CALL);
+ traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
// XXX this might not be necessary --SDM
tso->what_next = ThreadRunGHC;
task->suspended_tso = NULL;
tso->_link = END_TSO_QUEUE; // no write barrier reqd
- traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next);
+ traceEventRunThread(cap, tso);
if (tso->why_blocked == BlockedOnCCall) {
// avoid locking the TSO if we don't have to
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
- traceSchedEvent (cap, EVENT_MIGRATE_THREAD, tso, capabilities[cpu].no);
+ traceEventMigrateThread (cap, tso, capabilities[cpu].no);
wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
}
#else
scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
{
Task *task;
+ StgThreadID id;
// We already created/initialised the Task
task = cap->running_task;
appendToRunQueue(cap,tso);
- debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
+ id = tso->id;
+ debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)id);
cap = schedule(cap,task);
ASSERT(task->stat != NoStatus);
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
- debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
+ debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)id);
return cap;
}
if (sched_state < SCHED_SHUTTING_DOWN) {
sched_state = SCHED_INTERRUPTING;
waitForReturnCapability(&task->cap,task);
- scheduleDoGC(task->cap,task,rtsFalse);
+ scheduleDoGC(task->cap,task,rtsFalse);
+ ASSERT(task->tso == NULL);
releaseCapability(task->cap);
}
sched_state = SCHED_SHUTTING_DOWN;
nat i;
for (i = 0; i < n_capabilities; i++) {
+ ASSERT(task->tso == NULL);
shutdownCapability(&capabilities[i], task, wait_foreign);
}
}
SET_HDR(raise_closure, &stg_raise_info, CCCS);
raise_closure->payload[0] = exception;
}
- UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
+ UPD_IND(cap, ((StgUpdateFrame *)p)->updatee,
+ (StgClosure *)raise_closure);
p = next;
continue;