X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=c61bbc7f183d1126f62c202b2a6ce0c3a5dd00e1;hb=90686adf9d3dc7a09a51853df051bc4ea472d840;hp=6dbc7c41ad0d700845118dc97baeab2892d35151;hpb=dddbf3593436ac0355d907b7b759e1b44f4f3d0f;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 6dbc7c4..c61bbc7 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -17,7 +17,7 @@ #include "Interpreter.h" #include "Printer.h" #include "RtsSignals.h" -#include "Sanity.h" +#include "sm/Sanity.h" #include "Stats.h" #include "STM.h" #include "Prelude.h" @@ -463,12 +463,16 @@ run_thread: if (prev == ACTIVITY_DONE_GC) { startTimer(); } - } else { + } else if (recent_activity != ACTIVITY_INACTIVE) { + // If we reached ACTIVITY_INACTIVE, then don't reset it until + // we've done the GC. The thread running here might just be + // the IO manager thread that handle_tick() woke up via + // wakeUpRts(). recent_activity = ACTIVITY_YES; } #endif - traceSchedEvent(cap, EVENT_RUN_THREAD, t, 0); + traceEventRunThread(cap, t); switch (prev_what_next) { @@ -518,7 +522,7 @@ run_thread: t->saved_winerror = GetLastError(); #endif - traceSchedEvent (cap, EVENT_STOP_THREAD, t, ret); + traceEventStopThread(cap, t, ret); #if defined(THREADED_RTS) // If ret is ThreadBlocked, and this Task is bound to the TSO that @@ -705,7 +709,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, Capability *free_caps[n_capabilities], *cap0; nat i, n_free_caps; - // migration can be turned off with +RTS -qg + // migration can be turned off with +RTS -qm if (!RtsFlags.ParFlags.migrate) return; // Check whether we have more threads on our run queue, or sparks @@ -775,10 +779,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS, setTSOLink(cap, prev, t); prev = t; } else { - debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); - traceSchedEvent (cap, EVENT_MIGRATE_THREAD, t, free_caps[i]->no); + traceEventMigrateThread (cap, t, free_caps[i]->no); if (t->bound) { t->bound->cap = free_caps[i]; } t->cap = free_caps[i]; @@ -802,7 +805,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, if (spark != NULL) { debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no); - traceSchedEvent(free_caps[i], EVENT_STEAL_SPARK, t, cap->no); + traceEventStealSpark(free_caps[i], t, cap->no); newSpark(&(free_caps[i]->r), spark); } @@ -1118,7 +1121,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) { bdescr *x; for (x = bd; x < bd + blocks; x++) { - initBdescr(x,cap->r.rNursery); + initBdescr(x,g0,g0); x->free = x->start; x->flags = 0; } @@ -1328,6 +1331,17 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) #ifdef DEBUG removeThreadLabel((StgWord)task->tso->id); #endif + + // We no longer consider this thread and task to be bound to + // each other. The TSO lives on until it is GC'd, but the + // task is about to be released by the caller, and we don't + // want anyone following the pointer from the TSO to the + // defunct task (which might have already been + // re-used). This was a real bug: the GC updated + // tso->bound->tso which lead to a deadlock. + t->bound = NULL; + task->tso = NULL; + return rtsTrue; // tells schedule() to return } @@ -1378,7 +1392,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) if (sched_state < SCHED_INTERRUPTING && RtsFlags.ParFlags.parGcEnabled && N >= RtsFlags.ParFlags.parGcGen - && ! oldest_gen->steps[0].mark) + && ! oldest_gen->mark) { gc_type = PENDING_GC_PAR; } else { @@ -1418,11 +1432,11 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) if (gc_type == PENDING_GC_SEQ) { - traceSchedEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0); + traceEventRequestSeqGc(cap); } else { - traceSchedEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0); + traceEventRequestParGc(cap); debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads"); } @@ -1478,8 +1492,8 @@ delete_threads_and_gc: heap_census = scheduleNeedHeapProfile(rtsTrue); + traceEventGcStart(cap); #if defined(THREADED_RTS) - traceSchedEvent(cap, EVENT_GC_START, 0, 0); // reset waiting_for_gc *before* GC, so that when the GC threads // emerge they don't immediately re-enter the GC. waiting_for_gc = 0; @@ -1487,7 +1501,7 @@ delete_threads_and_gc: #else GarbageCollect(force_major || heap_census, 0, cap); #endif - traceSchedEvent(cap, EVENT_GC_END, 0, 0); + traceEventGcEnd(cap); if (recent_activity == ACTIVITY_INACTIVE && force_major) { @@ -1580,7 +1594,7 @@ forkProcess(HsStablePtr *entry pid_t pid; StgTSO* t,*next; Capability *cap; - nat s; + nat g; #if defined(THREADED_RTS) if (RtsFlags.ParFlags.nNodes > 1) { @@ -1628,8 +1642,8 @@ forkProcess(HsStablePtr *entry // all Tasks, because they correspond to OS threads that are // now gone. - for (s = 0; s < total_steps; s++) { - for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) { + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) { if (t->what_next == ThreadRelocated) { next = t->_link; } else { @@ -1655,8 +1669,8 @@ forkProcess(HsStablePtr *entry // Empty the threads lists. Otherwise, the garbage // collector may attempt to resurrect some of these threads. - for (s = 0; s < total_steps; s++) { - all_steps[s].threads = END_TSO_QUEUE; + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + generations[g].threads = END_TSO_QUEUE; } // Wipe the task list, except the current Task. @@ -1710,19 +1724,19 @@ deleteAllThreads ( Capability *cap ) // NOTE: only safe to call if we own all capabilities. StgTSO* t, *next; - nat s; + nat g; debugTrace(DEBUG_sched,"deleting all threads"); - for (s = 0; s < total_steps; s++) { - for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) { - if (t->what_next == ThreadRelocated) { - next = t->_link; - } else { - next = t->global_link; - deleteThread(cap,t); - } - } - } + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) { + if (t->what_next == ThreadRelocated) { + next = t->_link; + } else { + next = t->global_link; + deleteThread(cap,t); + } + } + } // The run queue now contains a bunch of ThreadKilled threads. We // must not throw these away: the main thread(s) will be in there @@ -1806,7 +1820,7 @@ suspendThread (StgRegTable *reg) task = cap->running_task; tso = cap->r.rCurrentTSO; - traceSchedEvent(cap, EVENT_STOP_THREAD, tso, THREAD_SUSPENDED_FOREIGN_CALL); + traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL); // XXX this might not be necessary --SDM tso->what_next = ThreadRunGHC; @@ -1869,7 +1883,7 @@ resumeThread (void *task_) task->suspended_tso = NULL; tso->_link = END_TSO_QUEUE; // no write barrier reqd - traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next); + traceEventRunThread(cap, tso); if (tso->why_blocked == BlockedOnCCall) { // avoid locking the TSO if we don't have to @@ -1925,7 +1939,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { - traceSchedEvent (cap, EVENT_MIGRATE_THREAD, tso, capabilities[cpu].no); + traceEventMigrateThread (cap, tso, capabilities[cpu].no); wakeupThreadOnCapability(cap, &capabilities[cpu], tso); } #else @@ -1937,6 +1951,7 @@ Capability * scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) { Task *task; + StgThreadID id; // We already created/initialised the Task task = cap->running_task; @@ -1952,14 +1967,15 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) appendToRunQueue(cap,tso); - debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id); + id = tso->id; + debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)id); cap = schedule(cap,task); ASSERT(task->stat != NoStatus); ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); - debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id); + debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)id); return cap; } @@ -2089,7 +2105,8 @@ exitScheduler( if (sched_state < SCHED_SHUTTING_DOWN) { sched_state = SCHED_INTERRUPTING; waitForReturnCapability(&task->cap,task); - scheduleDoGC(task->cap,task,rtsFalse); + scheduleDoGC(task->cap,task,rtsFalse); + ASSERT(task->tso == NULL); releaseCapability(task->cap); } sched_state = SCHED_SHUTTING_DOWN; @@ -2099,6 +2116,7 @@ exitScheduler( nat i; for (i = 0; i < n_capabilities; i++) { + ASSERT(task->tso == NULL); shutdownCapability(&capabilities[i], task, wait_foreign); } } @@ -2201,6 +2219,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso) // if (tso->flags & TSO_SQUEEZED) { + unlockTSO(tso); return tso; } // #3677: In a stack overflow situation, stack squeezing may @@ -2342,7 +2361,7 @@ threadStackUnderflow (Capability *cap, Task *task, StgTSO *tso) // list. The new TSO is not yet on the mutable list, so we better // put it there. new_tso->dirty = 0; - new_tso->flags &= !TSO_LINK_DIRTY; + new_tso->flags &= ~TSO_LINK_DIRTY; dirty_TSO(cap, new_tso); debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu", @@ -2545,7 +2564,8 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception) SET_HDR(raise_closure, &stg_raise_info, CCCS); raise_closure->payload[0] = exception; } - UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure); + UPD_IND(cap, ((StgUpdateFrame *)p)->updatee, + (StgClosure *)raise_closure); p = next; continue; @@ -2655,14 +2675,14 @@ resurrectThreads (StgTSO *threads) { StgTSO *tso, *next; Capability *cap; - step *step; + generation *gen; for (tso = threads; tso != END_TSO_QUEUE; tso = next) { next = tso->global_link; - step = Bdescr((P_)tso)->step; - tso->global_link = step->threads; - step->threads = tso; + gen = Bdescr((P_)tso)->gen; + tso->global_link = gen->threads; + gen->threads = tso; debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id); @@ -2719,7 +2739,7 @@ performPendingThrowTos (StgTSO *threads) StgTSO *tso, *next; Capability *cap; Task *task, *saved_task;; - step *step; + generation *gen; task = myTask(); cap = task->cap; @@ -2727,9 +2747,9 @@ performPendingThrowTos (StgTSO *threads) for (tso = threads; tso != END_TSO_QUEUE; tso = next) { next = tso->global_link; - step = Bdescr((P_)tso)->step; - tso->global_link = step->threads; - step->threads = tso; + gen = Bdescr((P_)tso)->gen; + tso->global_link = gen->threads; + gen->threads = tso; debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);