X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2FSchedule.c;h=f67fcd98571f93292592dba50cea35af430db9a9;hp=11b9f87d592579dfc4ffda7f23d7f1874abb84e4;hb=f4b8374442162769ea87c24bf90a0f86a2fdd9b7;hpb=b1953bbb1ed3cb16497e5447db7487f0c2d9e41a diff --git a/rts/Schedule.c b/rts/Schedule.c index 11b9f87..f67fcd9 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -11,7 +11,6 @@ #include "SchedAPI.h" #include "RtsUtils.h" #include "RtsFlags.h" -#include "BlockAlloc.h" #include "OSThreads.h" #include "Storage.h" #include "StgRun.h" @@ -52,6 +51,7 @@ #include "Trace.h" #include "RaiseAsync.h" #include "Threads.h" +#include "ThrIOManager.h" #ifdef HAVE_SYS_TYPES_H #include @@ -218,11 +218,9 @@ static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task, StgTSO *t ); static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc); static Capability *scheduleDoGC(Capability *cap, Task *task, - rtsBool force_major, - void (*get_roots)(evac_fn)); + rtsBool force_major); static rtsBool checkBlackHoles(Capability *cap); -static void AllRoots(evac_fn evac); static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso); @@ -421,7 +419,7 @@ schedule (Capability *initialCapability, Task *task) discardSparksCap(cap); #endif /* scheduleDoGC() deletes all the threads */ - cap = scheduleDoGC(cap,task,rtsFalse,GetRoots); + cap = scheduleDoGC(cap,task,rtsFalse); break; case SCHED_SHUTTING_DOWN: debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN"); @@ -535,11 +533,11 @@ schedule (Capability *initialCapability, Task *task) if (bound) { if (bound == task) { debugTrace(DEBUG_sched, - "### Running thread %d in bound thread", t->id); + "### Running thread %lu in bound thread", (unsigned long)t->id); // yes, the Haskell thread is bound to the current native thread } else { debugTrace(DEBUG_sched, - "### thread %d bound to another OS thread", t->id); + "### thread %lu bound to another OS thread", (unsigned long)t->id); // no, bound to a different Haskell thread: pass to that thread pushOnRunQueue(cap,t); continue; @@ -548,7 +546,7 @@ schedule (Capability *initialCapability, Task *task) // The thread we want to run is unbound. if (task->tso) { debugTrace(DEBUG_sched, - "### this OS thread cannot run thread %d", t->id); + "### this OS thread cannot run thread %lu", (unsigned long)t->id); // no, the current native thread is bound to a different // Haskell thread, so pass it to any worker thread pushOnRunQueue(cap,t); @@ -649,8 +647,8 @@ run_thread: // immediately and return to normaility. if (ret == ThreadBlocked) { debugTrace(DEBUG_sched, - "--<< thread %d (%s) stopped: blocked", - t->id, whatNext_strs[t->what_next]); + "--<< thread %lu (%s) stopped: blocked", + (unsigned long)t->id, whatNext_strs[t->what_next]); continue; } #endif @@ -701,7 +699,7 @@ run_thread: if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; } if (ready_to_gc) { - cap = scheduleDoGC(cap,task,rtsFalse,GetRoots); + cap = scheduleDoGC(cap,task,rtsFalse); } } /* end of while() */ @@ -812,7 +810,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, prev->link = t; prev = t; } else { - debugTrace(DEBUG_sched, "pushing thread %d to capability %d", t->id, free_caps[i]->no); + debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); if (t->bound) { t->bound->cap = free_caps[i]; } t->cap = free_caps[i]; @@ -852,7 +850,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, * Start any pending signal handlers * ------------------------------------------------------------------------- */ -#if defined(RTS_USER_SIGNALS) && (!defined(THREADED_RTS) || defined(mingw32_HOST_OS)) +#if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS) static void scheduleStartSignalHandlers(Capability *cap) { @@ -968,13 +966,13 @@ scheduleDetectDeadlock (Capability *cap, Task *task) // they are unreachable and will therefore be sent an // exception. Any threads thus released will be immediately // runnable. - cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/, GetRoots); + cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/); recent_activity = ACTIVITY_DONE_GC; if ( !emptyRunQueue(cap) ) return; -#if defined(RTS_USER_SIGNALS) && (!defined(THREADED_RTS) || defined(mingw32_HOST_OS)) +#if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS) /* If we have user-installed signal handlers, then wait * for signals to arrive rather then bombing out with a * deadlock. @@ -1770,18 +1768,19 @@ scheduleHandleThreadBlocked( StgTSO *t // has tidied up its stack and placed itself on whatever queue // it needs to be on. -#if !defined(THREADED_RTS) - ASSERT(t->why_blocked != NotBlocked); - // This might not be true under THREADED_RTS: we don't have - // exclusive access to this TSO, so someone might have - // woken it up by now. This actually happens: try - // conc023 +RTS -N2. -#endif + // ASSERT(t->why_blocked != NotBlocked); + // Not true: for example, + // - in THREADED_RTS, the thread may already have been woken + // up by another Capability. This actually happens: try + // conc023 +RTS -N2. + // - the thread may have woken itself up already, because + // threadPaused() might have raised a blocked throwTo + // exception, see maybePerformBlockedException(). #ifdef DEBUG if (traceClass(DEBUG_sched)) { - debugTraceBegin("--<< thread %d (%s) stopped: ", - t->id, whatNext_strs[t->what_next]); + debugTraceBegin("--<< thread %lu (%s) stopped: ", + (unsigned long)t->id, whatNext_strs[t->what_next]); printThreadBlockage(t); debugTraceEnd(); } @@ -1807,8 +1806,11 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) * We also end up here if the thread kills itself with an * uncaught exception, see Exception.cmm. */ - debugTrace(DEBUG_sched, "--++ thread %d (%s) finished", - t->id, whatNext_strs[t->what_next]); + debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", + (unsigned long)t->id, whatNext_strs[t->what_next]); + + /* Inform the Hpc that a thread has finished */ + hs_hpc_event("Thread Finished",t); #if defined(GRAN) endThread(t, CurrentProc); // clean-up the thread @@ -1929,7 +1931,7 @@ scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED ) scheduleCheckBlackHoles(&MainCapability); debugTrace(DEBUG_sched, "garbage collecting before heap census"); - GarbageCollect(GetRoots, rtsTrue); + GarbageCollect(rtsTrue); debugTrace(DEBUG_sched, "performing heap census"); heapCensus(); @@ -1946,8 +1948,7 @@ scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED ) * -------------------------------------------------------------------------- */ static Capability * -scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, - rtsBool force_major, void (*get_roots)(evac_fn)) +scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) { StgTSO *t; #ifdef THREADED_RTS @@ -2066,7 +2067,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, #if defined(THREADED_RTS) debugTrace(DEBUG_sched, "doing GC"); #endif - GarbageCollect(get_roots, force_major); + GarbageCollect(force_major); #if defined(THREADED_RTS) // release our stash of capabilities. @@ -2294,8 +2295,8 @@ suspendThread (StgRegTable *reg) tso = cap->r.rCurrentTSO; debugTrace(DEBUG_sched, - "thread %d did a safe foreign call", - cap->r.rCurrentTSO->id); + "thread %lu did a safe foreign call", + (unsigned long)cap->r.rCurrentTSO->id); // XXX this might not be necessary --SDM tso->what_next = ThreadRunGHC; @@ -2325,7 +2326,7 @@ suspendThread (StgRegTable *reg) /* Preparing to leave the RTS, so ensure there's a native thread/task waiting to take over. */ - debugTrace(DEBUG_sched, "thread %d: leaving RTS", tso->id); + debugTrace(DEBUG_sched, "thread %lu: leaving RTS", (unsigned long)tso->id); #endif errno = saved_errno; @@ -2353,7 +2354,7 @@ resumeThread (void *task_) tso = task->suspended_tso; task->suspended_tso = NULL; tso->link = END_TSO_QUEUE; - debugTrace(DEBUG_sched, "thread %d: re-entering RTS", tso->id); + debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id); if (tso->why_blocked == BlockedOnCCall) { awakenBlockedExceptionQueue(cap,tso); @@ -2429,7 +2430,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) appendToRunQueue(cap,tso); - debugTrace(DEBUG_sched, "new bound thread (%d)", tso->id); + debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id); #if defined(GRAN) /* GranSim specific init */ @@ -2443,7 +2444,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) ASSERT(task->stat != NoStatus); ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); - debugTrace(DEBUG_sched, "bound thread (%d) finished", task->tso->id); + debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id); return cap; } @@ -2509,9 +2510,6 @@ initScheduler(void) context_switch = 0; sched_state = SCHED_RUNNING; - RtsFlags.ConcFlags.ctxtSwitchTicks = - RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS; - #if defined(THREADED_RTS) /* Initialise the mutex and condition variables used by * the scheduler. */ @@ -2570,7 +2568,7 @@ exitScheduler( void ) // If we haven't killed all the threads yet, do it now. if (sched_state < SCHED_SHUTTING_DOWN) { sched_state = SCHED_INTERRUPTING; - scheduleDoGC(NULL,task,rtsFalse,GetRoots); + scheduleDoGC(NULL,task,rtsFalse); } sched_state = SCHED_SHUTTING_DOWN; @@ -2584,6 +2582,20 @@ exitScheduler( void ) boundTaskExiting(task); stopTaskManager(); } +#else + freeCapability(&MainCapability); +#endif +} + +void +freeScheduler( void ) +{ + freeTaskManager(); + if (n_capabilities != 1) { + stgFree(capabilities); + } +#if defined(THREADED_RTS) + closeMutex(&sched_mutex); #endif } @@ -2640,7 +2652,7 @@ GetRoots( evac_fn evac ) for (task = cap->suspended_ccalling_tasks; task != NULL; task=task->next) { debugTrace(DEBUG_sched, - "evac'ing suspended TSO %d", task->suspended_tso->id); + "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id); evac((StgClosure **)(void *)&task->suspended_tso); } @@ -2672,17 +2684,10 @@ GetRoots( evac_fn evac ) This is the interface to the garbage collector from Haskell land. We provide this so that external C code can allocate and garbage collect when called from Haskell via _ccall_GC. - - It might be useful to provide an interface whereby the programmer - can specify more roots (ToDo). - - This needs to be protected by the GC condition variable above. KH. -------------------------------------------------------------------------- */ -static void (*extra_roots)(evac_fn); - static void -performGC_(rtsBool force_major, void (*get_roots)(evac_fn)) +performGC_(rtsBool force_major) { Task *task; // We must grab a new Task here, because the existing Task may be @@ -2691,34 +2696,20 @@ performGC_(rtsBool force_major, void (*get_roots)(evac_fn)) ACQUIRE_LOCK(&sched_mutex); task = newBoundTask(); RELEASE_LOCK(&sched_mutex); - scheduleDoGC(NULL,task,force_major, get_roots); + scheduleDoGC(NULL,task,force_major); boundTaskExiting(task); } void performGC(void) { - performGC_(rtsFalse, GetRoots); + performGC_(rtsFalse); } void performMajorGC(void) { - performGC_(rtsTrue, GetRoots); -} - -static void -AllRoots(evac_fn evac) -{ - GetRoots(evac); // the scheduler's roots - extra_roots(evac); // the user's roots -} - -void -performGCWithRoots(void (*get_roots)(evac_fn)) -{ - extra_roots = get_roots; - performGC_(rtsFalse, AllRoots); + performGC_(rtsTrue); } /* ----------------------------------------------------------------------------- @@ -2847,17 +2838,10 @@ void wakeUpRts(void) { #if defined(THREADED_RTS) -#if !defined(mingw32_HOST_OS) // This forces the IO Manager thread to wakeup, which will // in turn ensure that some OS thread wakes up and runs the // scheduler loop, which will cause a GC and deadlock check. ioManagerWakeup(); -#else - // On Windows this might be safe enough, because we aren't - // in a signal handler. Later we should use the IO Manager, - // though. - prodOneCapability(); -#endif #endif } @@ -3041,8 +3025,9 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception) This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#) or should be a ATOMICALLY_FRAME (if the retry# reaches the top level). - We skip CATCH_STM_FRAMEs because retries are not considered to be exceptions, - despite the similar implementation. + We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they + create) because retries are not considered to be exceptions, despite the + similar implementation. We should not expect to see CATCH_FRAME or STOP_FRAME because those should not be created within memory transactions. @@ -3062,7 +3047,7 @@ findRetryFrameHelper (StgTSO *tso) case ATOMICALLY_FRAME: debugTrace(DEBUG_stm, - "found ATOMICALLY_FRAME at %p during retrry", p); + "found ATOMICALLY_FRAME at %p during retry", p); tso->sp = p; return ATOMICALLY_FRAME; @@ -3072,7 +3057,20 @@ findRetryFrameHelper (StgTSO *tso) tso->sp = p; return CATCH_RETRY_FRAME; - case CATCH_STM_FRAME: + case CATCH_STM_FRAME: { + debugTrace(DEBUG_stm, + "found CATCH_STM_FRAME at %p during retry", p); + StgTRecHeader *trec = tso -> trec; + StgTRecHeader *outer = stmGetEnclosingTRec(trec); + debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer); + stmAbortTransaction(tso -> cap, trec); + stmFreeAbortedTRec(tso -> cap, trec); + tso -> trec = outer; + p = next; + continue; + } + + default: ASSERT(info->i.type != CATCH_FRAME); ASSERT(info->i.type != STOP_FRAME); @@ -3102,7 +3100,7 @@ resurrectThreads (StgTSO *threads) next = tso->global_link; tso->global_link = all_threads; all_threads = tso; - debugTrace(DEBUG_sched, "resurrecting thread %d", tso->id); + debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id); // Wake up the thread on the Capability it was last on cap = tso->cap;