X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=8ab964dcb37134677fbd5607f1e7867de694f144;hb=b7ea7671c442a0223f34593dc8a1182b15dde0bf;hp=49e25be3292088327c4ecd1eed0629cb29ffe01f;hpb=88b35c172f9434fd98b700f706074d142914a8bb;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 49e25be..8ab964d 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -7,11 +7,11 @@ * --------------------------------------------------------------------------*/ #include "PosixSource.h" +#define KEEP_LOCKCLOSURE #include "Rts.h" #include "SchedAPI.h" #include "RtsUtils.h" #include "RtsFlags.h" -#include "BlockAlloc.h" #include "OSThreads.h" #include "Storage.h" #include "StgRun.h" @@ -29,19 +29,11 @@ #include "ThreadLabels.h" #include "LdvProfile.h" #include "Updates.h" -#ifdef PROFILING #include "Proftimer.h" #include "ProfHeap.h" -#endif -#if defined(GRAN) || defined(PARALLEL_HASKELL) -# include "GranSimRts.h" -# include "GranSim.h" -# include "ParallelRts.h" -# include "Parallel.h" -# include "ParallelDebug.h" -# include "FetchMe.h" -# include "HLC.h" -#endif + +/* PARALLEL_HASKELL includes go here */ + #include "Sparks.h" #include "Capability.h" #include "Task.h" @@ -52,6 +44,7 @@ #include "Trace.h" #include "RaiseAsync.h" #include "Threads.h" +#include "ThrIOManager.h" #ifdef HAVE_SYS_TYPES_H #include @@ -78,28 +71,6 @@ * Global variables * -------------------------------------------------------------------------- */ -#if defined(GRAN) - -StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */ -/* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */ - -/* - In GranSim we have a runnable and a blocked queue for each processor. - In order to minimise code changes new arrays run_queue_hds/tls - are created. run_queue_hd is then a short cut (macro) for - run_queue_hds[CurrentProc] (see GranSim.h). - -- HWL -*/ -StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC]; -StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC]; -StgTSO *ccalling_threadss[MAX_PROC]; -/* We use the same global list of threads (all_threads) in GranSim as in - the std RTS (i.e. we are cheating). However, we don't use this list in - the GranSim specific code at the moment (so we are only potentially - cheating). */ - -#else /* !GRAN */ - #if !defined(THREADED_RTS) // Blocked/sleeping thrads StgTSO *blocked_queue_hd = NULL; @@ -111,7 +82,6 @@ StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table? * LOCK: sched_mutex+capability, or all capabilities */ StgTSO *blackhole_queue = NULL; -#endif /* The blackhole_queue should be checked for threads to wake up. See * Schedule.h for more thorough comment. @@ -119,17 +89,6 @@ StgTSO *blackhole_queue = NULL; */ rtsBool blackholes_need_checking = rtsFalse; -/* Linked list of all threads. - * Used for detecting garbage collected threads. - * LOCK: sched_mutex+capability, or all capabilities - */ -StgTSO *all_threads = NULL; - -/* flag set by signal handler to precipitate a context switch - * LOCK: none (just an advisory flag) - */ -int context_switch = 0; - /* flag that tracks whether we have done any execution in this time slice. * LOCK: currently none, perhaps we should lock (but needs to be * updated in the fast path of the scheduler). @@ -141,10 +100,6 @@ nat recent_activity = ACTIVITY_YES; */ rtsBool sched_state = SCHED_RUNNING; -#if defined(GRAN) -StgTSO *CurrentTSO; -#endif - /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually * exists - earlier gccs apparently didn't. * -= chak @@ -166,12 +121,6 @@ rtsBool shutting_down_scheduler = rtsFalse; Mutex sched_mutex; #endif -#if defined(PARALLEL_HASKELL) -StgTSO *LastTSO; -rtsTime TimeOfLastYield; -rtsBool emitSchedule = rtsTrue; -#endif - #if !defined(mingw32_HOST_OS) #define FORKPROCESS_PRIMOP_SUPPORTED #endif @@ -188,26 +137,24 @@ static Capability *schedule (Capability *initialCapability, Task *task); // scheduler clearer. // static void schedulePreLoop (void); +static void scheduleFindWork (Capability *cap); #if defined(THREADED_RTS) -static void schedulePushWork(Capability *cap, Task *task); +static void scheduleYield (Capability **pcap, Task *task); #endif static void scheduleStartSignalHandlers (Capability *cap); static void scheduleCheckBlockedThreads (Capability *cap); static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS); static void scheduleCheckBlackHoles (Capability *cap); static void scheduleDetectDeadlock (Capability *cap, Task *task); -#if defined(GRAN) -static StgTSO *scheduleProcessEvent(rtsEvent *event); -#endif +static void schedulePushWork(Capability *cap, Task *task); #if defined(PARALLEL_HASKELL) -static StgTSO *scheduleSendPendingMessages(void); -static void scheduleActivateSpark(void); -static rtsBool scheduleGetRemoteWork(rtsBool *receivedFinish); +static rtsBool scheduleGetRemoteWork(Capability *cap); +static void scheduleSendPendingMessages(void); #endif -#if defined(PAR) || defined(GRAN) -static void scheduleGranParReport(void); +#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS) +static void scheduleActivateSpark(Capability *cap); #endif -static void schedulePostRunThread(void); +static void schedulePostRunThread(Capability *cap, StgTSO *t); static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ); static void scheduleHandleStackOverflow( Capability *cap, Task *task, StgTSO *t); @@ -216,15 +163,14 @@ static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t, static void scheduleHandleThreadBlocked( StgTSO *t ); static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task, StgTSO *t ); -static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc); +static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc); static Capability *scheduleDoGC(Capability *cap, Task *task, - rtsBool force_major, - void (*get_roots)(evac_fn)); + rtsBool force_major); static rtsBool checkBlackHoles(Capability *cap); -static void AllRoots(evac_fn evac); static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso); +static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso); static void deleteThread (Capability *cap, StgTSO *tso); static void deleteAllThreads (Capability *cap); @@ -233,11 +179,6 @@ static void deleteAllThreads (Capability *cap); static void deleteThread_(Capability *cap, StgTSO *tso); #endif -#if defined(PARALLEL_HASKELL) -StgTSO * createSparkThread(rtsSpark spark); -StgTSO * activateSpark (rtsSpark spark); -#endif - #ifdef DEBUG static char *whatNext_strs[] = { "(unknown)", @@ -287,6 +228,7 @@ addToRunQueue( Capability *cap, StgTSO *t ) This revolves around the global event queue, which determines what to do next. Therefore, it's more complicated than either the concurrent or the parallel (GUM) setup. + This version has been entirely removed (JB 2008/08). GUM version: GUM iterates over incoming messages. @@ -297,6 +239,12 @@ addToRunQueue( Capability *cap, StgTSO *t ) (see PendingFetches). This is not the ugliest code you could imagine, but it's bloody close. + (JB 2008/08) This version was formerly indicated by a PP-Flag PAR, + now by PP-flag PARALLEL_HASKELL. The Eden RTS (in GHC-6.x) uses it, + as well as future GUM versions. This file has been refurbished to + only contain valid code, which is however incomplete, refers to + invalid includes etc. + ------------------------------------------------------------------------ */ static Capability * @@ -305,15 +253,8 @@ schedule (Capability *initialCapability, Task *task) StgTSO *t; Capability *cap; StgThreadReturnCode ret; -#if defined(GRAN) - rtsEvent *event; -#elif defined(PARALLEL_HASKELL) - StgTSO *tso; - GlobalTaskId pe; +#if defined(PARALLEL_HASKELL) rtsBool receivedFinish = rtsFalse; -# if defined(DEBUG) - nat tp_size, sp_size; // stats only -# endif #endif nat prev_what_next; rtsBool ready_to_gc; @@ -338,37 +279,12 @@ schedule (Capability *initialCapability, Task *task) #if defined(PARALLEL_HASKELL) #define TERMINATION_CONDITION (!receivedFinish) -#elif defined(GRAN) -#define TERMINATION_CONDITION ((event = get_next_event()) != (rtsEvent*)NULL) #else #define TERMINATION_CONDITION rtsTrue #endif while (TERMINATION_CONDITION) { -#if defined(GRAN) - /* Choose the processor with the next event */ - CurrentProc = event->proc; - CurrentTSO = event->tso; -#endif - -#if defined(THREADED_RTS) - if (first) { - // don't yield the first time, we want a chance to run this - // thread for a bit, even if there are others banging at the - // door. - first = rtsFalse; - ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); - } else { - // Yield the capability to higher-priority tasks if necessary. - yieldCapability(&cap, task); - } -#endif - -#if defined(THREADED_RTS) - schedulePushWork(cap,task); -#endif - // Check whether we have re-entered the RTS from Haskell without // going via suspendThread()/resumeThread (i.e. a 'safe' foreign // call). @@ -421,7 +337,7 @@ schedule (Capability *initialCapability, Task *task) discardSparksCap(cap); #endif /* scheduleDoGC() deletes all the threads */ - cap = scheduleDoGC(cap,task,rtsFalse,GetRoots); + cap = scheduleDoGC(cap,task,rtsFalse); break; case SCHED_SHUTTING_DOWN: debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN"); @@ -436,35 +352,29 @@ schedule (Capability *initialCapability, Task *task) barf("sched_state: %d", sched_state); } -#if defined(THREADED_RTS) - // If the run queue is empty, take a spark and turn it into a thread. - { - if (emptyRunQueue(cap)) { - StgClosure *spark; - spark = findSpark(cap); - if (spark != NULL) { - debugTrace(DEBUG_sched, - "turning spark of closure %p into a thread", - (StgClosure *)spark); - createSparkThread(cap,spark); - } - } - } -#endif // THREADED_RTS + scheduleFindWork(cap); - scheduleStartSignalHandlers(cap); + /* work pushing, currently relevant only for THREADED_RTS: + (pushes threads, wakes up idle capabilities for stealing) */ + schedulePushWork(cap,task); - // Only check the black holes here if we've nothing else to do. - // During normal execution, the black hole list only gets checked - // at GC time, to avoid repeatedly traversing this possibly long - // list each time around the scheduler. - if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); } +#if defined(PARALLEL_HASKELL) + /* since we perform a blocking receive and continue otherwise, + either we never reach here or we definitely have work! */ + // from here: non-empty run queue + ASSERT(!emptyRunQueue(cap)); - scheduleCheckWakeupThreads(cap); + if (PacketsWaiting()) { /* now process incoming messages, if any + pending... - scheduleCheckBlockedThreads(cap); + CAUTION: scheduleGetRemoteWork called + above, waits for messages as well! */ + processMessages(cap, &receivedFinish); + } +#endif // PARALLEL_HASKELL: non-empty run queue! scheduleDetectDeadlock(cap,task); + #if defined(THREADED_RTS) cap = task->cap; // reload cap, it might have changed #endif @@ -477,54 +387,37 @@ schedule (Capability *initialCapability, Task *task) // // win32: might be here due to awaitEvent() being abandoned // as a result of a console event having been delivered. - if ( emptyRunQueue(cap) ) { -#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS) - ASSERT(sched_state >= SCHED_INTERRUPTING); -#endif - continue; // nothing to do + +#if defined(THREADED_RTS) + if (first) + { + // XXX: ToDo + // // don't yield the first time, we want a chance to run this + // // thread for a bit, even if there are others banging at the + // // door. + // first = rtsFalse; + // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); } -#if defined(PARALLEL_HASKELL) - scheduleSendPendingMessages(); - if (emptyRunQueue(cap) && scheduleActivateSpark()) - continue; - -#if defined(SPARKS) - ASSERT(next_fish_to_send_at==0); // i.e. no delayed fishes left! + yield: + scheduleYield(&cap,task); + if (emptyRunQueue(cap)) continue; // look for work again #endif - /* If we still have no work we need to send a FISH to get a spark - from another PE */ - if (emptyRunQueue(cap)) { - if (!scheduleGetRemoteWork(&receivedFinish)) continue; - ASSERT(rtsFalse); // should not happen at the moment - } - // from here: non-empty run queue. - // TODO: merge above case with this, only one call processMessages() ! - if (PacketsWaiting()) { /* process incoming messages, if - any pending... only in else - because getRemoteWork waits for - messages as well */ - receivedFinish = processMessages(); +#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS) + if ( emptyRunQueue(cap) ) { + ASSERT(sched_state >= SCHED_INTERRUPTING); } #endif -#if defined(GRAN) - scheduleProcessEvent(event); -#endif - // // Get a thread to run // t = popRunQueue(cap); -#if defined(GRAN) || defined(PAR) - scheduleGranParReport(); // some kind of debuging output -#else // Sanity check the thread we're about to run. This can be // expensive if there is lots of thread switching going on... IF_DEBUG(sanity,checkTSO(t)); -#endif #if defined(THREADED_RTS) // Check whether we can run this thread in the current task. @@ -558,25 +451,26 @@ schedule (Capability *initialCapability, Task *task) } #endif - cap->r.rCurrentTSO = t; - /* context switches are initiated by the timer signal, unless * the user specified "context switch as often as possible", with * +RTS -C0 */ if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0 && !emptyThreadQueues(cap)) { - context_switch = 1; + cap->context_switch = 1; } run_thread: + // CurrentTSO is the thread to run. t might be different if we + // loop back to run_thread, so make sure to set CurrentTSO after + // that. + cap->r.rCurrentTSO = t; + debugTrace(DEBUG_sched, "-->> running thread %ld %s ...", (long)t->id, whatNext_strs[t->what_next]); -#if defined(PROFILING) startHeapProfTimer(); -#endif // Check for exceptions blocked on this thread maybePerformBlockedException (cap, t); @@ -586,15 +480,32 @@ run_thread: ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); ASSERT(t->cap == cap); + ASSERT(t->bound ? t->bound->cap == cap : 1); prev_what_next = t->what_next; errno = t->saved_errno; +#if mingw32_HOST_OS + SetLastError(t->saved_winerror); +#endif + cap->in_haskell = rtsTrue; - dirtyTSO(t); + dirty_TSO(cap,t); - recent_activity = ACTIVITY_YES; +#if defined(THREADED_RTS) + if (recent_activity == ACTIVITY_DONE_GC) { + // ACTIVITY_DONE_GC means we turned off the timer signal to + // conserve power (see #1623). Re-enable it here. + nat prev; + prev = xchg((P_)&recent_activity, ACTIVITY_YES); + if (prev == ACTIVITY_DONE_GC) { + startTimer(); + } + } else { + recent_activity = ACTIVITY_YES; + } +#endif switch (prev_what_next) { @@ -639,6 +550,10 @@ run_thread: // XXX: possibly bogus for SMP because this thread might already // be running again, see code below. t->saved_errno = errno; +#if mingw32_HOST_OS + // Similarly for Windows error code + t->saved_winerror = GetLastError(); +#endif #if defined(THREADED_RTS) // If ret is ThreadBlocked, and this Task is bound to the TSO that @@ -651,7 +566,7 @@ run_thread: debugTrace(DEBUG_sched, "--<< thread %lu (%s) stopped: blocked", (unsigned long)t->id, whatNext_strs[t->what_next]); - continue; + goto yield; } #endif @@ -661,12 +576,14 @@ run_thread: // ---------------------------------------------------------------------- // Costs for the scheduler are assigned to CCS_SYSTEM -#if defined(PROFILING) stopHeapProfTimer(); +#if defined(PROFILING) CCCS = CCS_SYSTEM; #endif - schedulePostRunThread(); + schedulePostRunThread(cap,t); + + t = threadStackUnderflow(task,t); ready_to_gc = rtsFalse; @@ -699,14 +616,10 @@ run_thread: barf("schedule: invalid thread return code %d", (int)ret); } - if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; } - if (ready_to_gc) { - cap = scheduleDoGC(cap,task,rtsFalse,GetRoots); + if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) { + cap = scheduleDoGC(cap,task,rtsFalse); } } /* end of while() */ - - debugTrace(PAR_DEBUG_verbose, - "== Leaving schedule() after having received Finish"); } /* ---------------------------------------------------------------------------- @@ -716,36 +629,122 @@ run_thread: static void schedulePreLoop(void) { -#if defined(GRAN) - /* set up first event to get things going */ - /* ToDo: assign costs for system setup and init MainTSO ! */ - new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc], - ContinueThread, - CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL); - - debugTrace (DEBUG_gran, - "GRAN: Init CurrentTSO (in schedule) = %p", - CurrentTSO); - IF_DEBUG(gran, G_TSO(CurrentTSO, 5)); + // initialisation for scheduler - what cannot go into initScheduler() +} + +/* ----------------------------------------------------------------------------- + * scheduleFindWork() + * + * Search for work to do, and handle messages from elsewhere. + * -------------------------------------------------------------------------- */ + +static void +scheduleFindWork (Capability *cap) +{ + scheduleStartSignalHandlers(cap); + + // Only check the black holes here if we've nothing else to do. + // During normal execution, the black hole list only gets checked + // at GC time, to avoid repeatedly traversing this possibly long + // list each time around the scheduler. + if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); } + + scheduleCheckWakeupThreads(cap); + + scheduleCheckBlockedThreads(cap); + +#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) + // Try to activate one of our own sparks + if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); } +#endif + +#if defined(THREADED_RTS) + // Try to steak work if we don't have any + if (emptyRunQueue(cap)) { stealWork(cap); } +#endif - if (RtsFlags.GranFlags.Light) { - /* Save current time; GranSim Light only */ - CurrentTSO->gran.clock = CurrentTime[CurrentProc]; - } +#if defined(PARALLEL_HASKELL) + // if messages have been buffered... + scheduleSendPendingMessages(); +#endif + +#if defined(PARALLEL_HASKELL) + if (emptyRunQueue(cap)) { + receivedFinish = scheduleGetRemoteWork(cap); + continue; // a new round, (hopefully) with new work + /* + in GUM, this a) sends out a FISH and returns IF no fish is + out already + b) (blocking) awaits and receives messages + + in Eden, this is only the blocking receive, as b) in GUM. + */ + } #endif } +#if defined(THREADED_RTS) +STATIC_INLINE rtsBool +shouldYieldCapability (Capability *cap, Task *task) +{ + // we need to yield this capability to someone else if.. + // - another thread is initiating a GC + // - another Task is returning from a foreign call + // - the thread at the head of the run queue cannot be run + // by this Task (it is bound to another Task, or it is unbound + // and this task it bound). + return (waiting_for_gc || + cap->returning_tasks_hd != NULL || + (!emptyRunQueue(cap) && (task->tso == NULL + ? cap->run_queue_hd->bound != NULL + : cap->run_queue_hd->bound != task))); +} + +// This is the single place where a Task goes to sleep. There are +// two reasons it might need to sleep: +// - there are no threads to run +// - we need to yield this Capability to someone else +// (see shouldYieldCapability()) +// +// The return value indicates whether + +static void +scheduleYield (Capability **pcap, Task *task) +{ + Capability *cap = *pcap; + + // if we have work, and we don't need to give up the Capability, continue. + if (!emptyRunQueue(cap) && !shouldYieldCapability(cap,task)) + return; + + // otherwise yield (sleep), and keep yielding if necessary. + do { + yieldCapability(&cap,task); + } + while (shouldYieldCapability(cap,task)); + + // note there may still be no threads on the run queue at this + // point, the caller has to check. + + *pcap = cap; + return; +} +#endif + /* ----------------------------------------------------------------------------- * schedulePushWork() * * Push work to other Capabilities if we have some. * -------------------------------------------------------------------------- */ -#if defined(THREADED_RTS) static void schedulePushWork(Capability *cap USED_IF_THREADS, Task *task USED_IF_THREADS) { + /* following code not for PARALLEL_HASKELL. I kept the call general, + future GUM versions might use pushing in a distributed setup */ +#if defined(THREADED_RTS) + Capability *free_caps[n_capabilities], *cap0; nat i, n_free_caps; @@ -754,7 +753,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, // Check whether we have more threads on our run queue, or sparks // in our pool, that we could hand to another Capability. - if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE) + if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE) && sparkPoolSizeCap(cap) < 2) { return; } @@ -788,28 +787,33 @@ schedulePushWork(Capability *cap USED_IF_THREADS, StgTSO *prev, *t, *next; rtsBool pushed_to_all; - debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps); + debugTrace(DEBUG_sched, + "cap %d: %s and %d free capabilities, sharing...", + cap->no, + (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)? + "excess threads on run queue":"sparks to share (>=2)", + n_free_caps); i = 0; pushed_to_all = rtsFalse; if (cap->run_queue_hd != END_TSO_QUEUE) { prev = cap->run_queue_hd; - t = prev->link; - prev->link = END_TSO_QUEUE; + t = prev->_link; + prev->_link = END_TSO_QUEUE; for (; t != END_TSO_QUEUE; t = next) { - next = t->link; - t->link = END_TSO_QUEUE; + next = t->_link; + t->_link = END_TSO_QUEUE; if (t->what_next == ThreadRelocated || t->bound == task // don't move my bound thread || tsoLocked(t)) { // don't move a locked thread - prev->link = t; + setTSOLink(cap, prev, t); prev = t; } else if (i == n_free_caps) { pushed_to_all = rtsTrue; i = 0; // keep one for us - prev->link = t; + setTSOLink(cap, prev, t); prev = t; } else { debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); @@ -822,6 +826,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS, cap->run_queue_tl = prev; } +#ifdef SPARK_PUSHING + /* JB I left this code in place, it would work but is not necessary */ + // If there are some free capabilities that we didn't push any // threads to, then try to push a spark to each one. if (!pushed_to_all) { @@ -829,7 +836,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, // i is the next free capability to push to for (; i < n_free_caps; i++) { if (emptySparkPoolCap(free_caps[i])) { - spark = findSpark(cap); + spark = tryStealSpark(cap->sparks); if (spark != NULL) { debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no); newSpark(&(free_caps[i]->r), spark); @@ -837,26 +844,30 @@ schedulePushWork(Capability *cap USED_IF_THREADS, } } } +#endif /* SPARK_PUSHING */ // release the capabilities for (i = 0; i < n_free_caps; i++) { task->cap = free_caps[i]; - releaseCapability(free_caps[i]); + releaseAndWakeupCapability(free_caps[i]); } } task->cap = cap; // reset to point to our Capability. + +#endif /* THREADED_RTS */ + } -#endif /* ---------------------------------------------------------------------------- * Start any pending signal handlers * ------------------------------------------------------------------------- */ -#if defined(RTS_USER_SIGNALS) && (!defined(THREADED_RTS) || defined(mingw32_HOST_OS)) +#if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS) static void scheduleStartSignalHandlers(Capability *cap) { - if (signals_pending()) { // safe outside the lock + if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) { + // safe outside the lock startSignalHandlers(cap); } } @@ -904,7 +915,7 @@ scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS) cap->run_queue_hd = cap->wakeup_queue_hd; cap->run_queue_tl = cap->wakeup_queue_tl; } else { - cap->run_queue_tl->link = cap->wakeup_queue_hd; + setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd); cap->run_queue_tl = cap->wakeup_queue_tl; } cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE; @@ -968,18 +979,20 @@ scheduleDetectDeadlock (Capability *cap, Task *task) // they are unreachable and will therefore be sent an // exception. Any threads thus released will be immediately // runnable. - cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/, GetRoots); + cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/); recent_activity = ACTIVITY_DONE_GC; + // disable timer signals (see #1623) + stopTimer(); if ( !emptyRunQueue(cap) ) return; -#if defined(RTS_USER_SIGNALS) && (!defined(THREADED_RTS) || defined(mingw32_HOST_OS)) +#if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS) /* If we have user-installed signal handlers, then wait * for signals to arrive rather then bombing out with a * deadlock. */ - if ( anyUserHandlers() ) { + if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) { debugTrace(DEBUG_sched, "still deadlocked, waiting for signals..."); @@ -991,6 +1004,8 @@ scheduleDetectDeadlock (Capability *cap, Task *task) // either we have threads to run, or we were interrupted: ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING); + + return; } #endif @@ -1005,7 +1020,7 @@ scheduleDetectDeadlock (Capability *cap, Task *task) case BlockedOnException: case BlockedOnMVar: throwToSingleThreaded(cap, task->tso, - (StgClosure *)NonTermination_closure); + (StgClosure *)nonTermination_closure); return; default: barf("deadlock: main thread blocked in a strange way"); @@ -1016,164 +1031,15 @@ scheduleDetectDeadlock (Capability *cap, Task *task) } } -/* ---------------------------------------------------------------------------- - * Process an event (GRAN only) - * ------------------------------------------------------------------------- */ - -#if defined(GRAN) -static StgTSO * -scheduleProcessEvent(rtsEvent *event) -{ - StgTSO *t; - - if (RtsFlags.GranFlags.Light) - GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc - - /* adjust time based on time-stamp */ - if (event->time > CurrentTime[CurrentProc] && - event->evttype != ContinueThread) - CurrentTime[CurrentProc] = event->time; - - /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */ - if (!RtsFlags.GranFlags.Light) - handleIdlePEs(); - - IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n")); - - /* main event dispatcher in GranSim */ - switch (event->evttype) { - /* Should just be continuing execution */ - case ContinueThread: - IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n")); - /* ToDo: check assertion - ASSERT(run_queue_hd != (StgTSO*)NULL && - run_queue_hd != END_TSO_QUEUE); - */ - /* Ignore ContinueThreads for fetching threads (if synchr comm) */ - if (!RtsFlags.GranFlags.DoAsyncFetch && - procStatus[CurrentProc]==Fetching) { - debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n", - CurrentTSO->id, CurrentTSO, CurrentProc); - goto next_thread; - } - /* Ignore ContinueThreads for completed threads */ - if (CurrentTSO->what_next == ThreadComplete) { - debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n", - CurrentTSO->id, CurrentTSO, CurrentProc); - goto next_thread; - } - /* Ignore ContinueThreads for threads that are being migrated */ - if (PROCS(CurrentTSO)==Nowhere) { - debugBelch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)\n", - CurrentTSO->id, CurrentTSO, CurrentProc); - goto next_thread; - } - /* The thread should be at the beginning of the run queue */ - if (CurrentTSO!=run_queue_hds[CurrentProc]) { - debugBelch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread\n", - CurrentTSO->id, CurrentTSO, CurrentProc); - break; // run the thread anyway - } - /* - new_event(proc, proc, CurrentTime[proc], - FindWork, - (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL); - goto next_thread; - */ /* Catches superfluous CONTINUEs -- should be unnecessary */ - break; // now actually run the thread; DaH Qu'vam yImuHbej - - case FetchNode: - do_the_fetchnode(event); - goto next_thread; /* handle next event in event queue */ - - case GlobalBlock: - do_the_globalblock(event); - goto next_thread; /* handle next event in event queue */ - - case FetchReply: - do_the_fetchreply(event); - goto next_thread; /* handle next event in event queue */ - - case UnblockThread: /* Move from the blocked queue to the tail of */ - do_the_unblock(event); - goto next_thread; /* handle next event in event queue */ - - case ResumeThread: /* Move from the blocked queue to the tail of */ - /* the runnable queue ( i.e. Qu' SImqa'lu') */ - event->tso->gran.blocktime += - CurrentTime[CurrentProc] - event->tso->gran.blockedat; - do_the_startthread(event); - goto next_thread; /* handle next event in event queue */ - - case StartThread: - do_the_startthread(event); - goto next_thread; /* handle next event in event queue */ - - case MoveThread: - do_the_movethread(event); - goto next_thread; /* handle next event in event queue */ - - case MoveSpark: - do_the_movespark(event); - goto next_thread; /* handle next event in event queue */ - - case FindWork: - do_the_findwork(event); - goto next_thread; /* handle next event in event queue */ - - default: - barf("Illegal event type %u\n", event->evttype); - } /* switch */ - - /* This point was scheduler_loop in the old RTS */ - - IF_DEBUG(gran, debugBelch("GRAN: after main switch\n")); - - TimeOfLastEvent = CurrentTime[CurrentProc]; - TimeOfNextEvent = get_time_of_next_event(); - IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK - // CurrentTSO = ThreadQueueHd; - - IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n", - TimeOfNextEvent)); - - if (RtsFlags.GranFlags.Light) - GranSimLight_leave_system(event, &ActiveTSO); - - EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice; - - IF_DEBUG(gran, - debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice)); - - /* in a GranSim setup the TSO stays on the run queue */ - t = CurrentTSO; - /* Take a thread from the run queue. */ - POP_RUN_QUEUE(t); // take_off_run_queue(t); - - IF_DEBUG(gran, - debugBelch("GRAN: About to run current thread, which is\n"); - G_TSO(t,5)); - - context_switch = 0; // turned on via GranYield, checking events and time slice - - IF_DEBUG(gran, - DumpGranEvent(GR_SCHEDULE, t)); - - procStatus[CurrentProc] = Busy; -} -#endif // GRAN /* ---------------------------------------------------------------------------- * Send pending messages (PARALLEL_HASKELL only) * ------------------------------------------------------------------------- */ #if defined(PARALLEL_HASKELL) -static StgTSO * +static void scheduleSendPendingMessages(void) { - StgSparkPool *pool; - rtsSpark spark; - StgTSO *t; # if defined(PAR) // global Mem.Mgmt., omit for now if (PendingFetches != END_BF_QUEUE) { @@ -1190,75 +1056,53 @@ scheduleSendPendingMessages(void) #endif /* ---------------------------------------------------------------------------- - * Activate spark threads (PARALLEL_HASKELL only) + * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS) * ------------------------------------------------------------------------- */ -#if defined(PARALLEL_HASKELL) +#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS) static void -scheduleActivateSpark(void) +scheduleActivateSpark(Capability *cap) { -#if defined(SPARKS) - ASSERT(emptyRunQueue()); -/* We get here if the run queue is empty and want some work. - We try to turn a spark into a thread, and add it to the run queue, - from where it will be picked up in the next iteration of the scheduler - loop. -*/ - - /* :-[ no local threads => look out for local sparks */ - /* the spark pool for the current PE */ - pool = &(cap.r.rSparks); // JB: cap = (old) MainCap - if (advisory_thread_count < RtsFlags.ParFlags.maxThreads && - pool->hd < pool->tl) { - /* - * ToDo: add GC code check that we really have enough heap afterwards!! - * Old comment: - * If we're here (no runnable threads) and we have pending - * sparks, we must have a space problem. Get enough space - * to turn one of those pending sparks into a - * thread... - */ + StgClosure *spark; - spark = findSpark(rtsFalse); /* get a spark */ - if (spark != (rtsSpark) NULL) { - tso = createThreadFromSpark(spark); /* turn the spark into a thread */ - IF_PAR_DEBUG(fish, // schedule, - debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n", - tso->id, tso, advisory_thread_count)); - - if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */ - IF_PAR_DEBUG(fish, // schedule, - debugBelch("==^^ failed to create thread from spark @ %lx\n", - spark)); - return rtsFalse; /* failed to generate a thread */ - } /* otherwise fall through & pick-up new tso */ - } else { - IF_PAR_DEBUG(fish, // schedule, - debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n", - spark_queue_len(pool))); - return rtsFalse; /* failed to generate a thread */ - } - return rtsTrue; /* success in generating a thread */ - } else { /* no more threads permitted or pool empty */ - return rtsFalse; /* failed to generateThread */ - } -#else - tso = NULL; // avoid compiler warning only - return rtsFalse; /* dummy in non-PAR setup */ -#endif // SPARKS +/* We only want to stay here if the run queue is empty and we want some + work. We try to turn a spark into a thread, and add it to the run + queue, from where it will be picked up in the next iteration of the + scheduler loop. +*/ + if (!emptyRunQueue(cap)) + /* In the threaded RTS, another task might have pushed a thread + on our run queue in the meantime ? But would need a lock.. */ + return; + + + // Really we should be using reclaimSpark() here, but + // experimentally it doesn't seem to perform as well as just + // stealing from our own spark pool: + // spark = reclaimSpark(cap->sparks); + spark = tryStealSpark(cap->sparks); // defined in Sparks.c + + if (spark != NULL) { + debugTrace(DEBUG_sched, + "turning spark of closure %p into a thread", + (StgClosure *)spark); + createSparkThread(cap,spark); // defined in Sparks.c + } } -#endif // PARALLEL_HASKELL +#endif // PARALLEL_HASKELL || THREADED_RTS /* ---------------------------------------------------------------------------- * Get work from a remote node (PARALLEL_HASKELL only) * ------------------------------------------------------------------------- */ #if defined(PARALLEL_HASKELL) -static rtsBool -scheduleGetRemoteWork(rtsBool *receivedFinish) +static rtsBool /* return value used in PARALLEL_HASKELL only */ +scheduleGetRemoteWork (Capability *cap STG_UNUSED) { - ASSERT(emptyRunQueue()); +#if defined(PARALLEL_HASKELL) + rtsBool receivedFinish = rtsFalse; + // idle() , i.e. send all buffers, wait for work if (RtsFlags.ParFlags.BufferTime) { IF_PAR_DEBUG(verbose, debugBelch("...send all pending data,")); @@ -1268,261 +1112,63 @@ scheduleGetRemoteWork(rtsBool *receivedFinish) sendImmediately(i); // send all messages away immediately } } -# ifndef SPARKS - //++EDEN++ idle() , i.e. send all buffers, wait for work - // suppress fishing in EDEN... just look for incoming messages - // (blocking receive) - IF_PAR_DEBUG(verbose, - debugBelch("...wait for incoming messages...\n")); - *receivedFinish = processMessages(); // blocking receive... - - // and reenter scheduling loop after having received something - // (return rtsFalse below) - -# else /* activate SPARKS machinery */ -/* We get here, if we have no work, tried to activate a local spark, but still - have no work. We try to get a remote spark, by sending a FISH message. - Thread migration should be added here, and triggered when a sequence of - fishes returns without work. */ - delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll); - - /* =8-[ no local sparks => look for work on other PEs */ - /* - * We really have absolutely no work. Send out a fish - * (there may be some out there already), and wait for - * something to arrive. We clearly can't run any threads - * until a SCHEDULE or RESUME arrives, and so that's what - * we're hoping to see. (Of course, we still have to - * respond to other types of messages.) - */ - rtsTime now = msTime() /*CURRENT_TIME*/; - IF_PAR_DEBUG(verbose, - debugBelch("-- now=%ld\n", now)); - IF_PAR_DEBUG(fish, // verbose, - if (outstandingFishes < RtsFlags.ParFlags.maxFishes && - (last_fish_arrived_at!=0 && - last_fish_arrived_at+delay > now)) { - debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n", - now, last_fish_arrived_at+delay, - last_fish_arrived_at, - delay); - }); - - if (outstandingFishes < RtsFlags.ParFlags.maxFishes && - advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when? - if (last_fish_arrived_at==0 || - (last_fish_arrived_at+delay <= now)) { // send FISH now! - /* outstandingFishes is set in sendFish, processFish; - avoid flooding system with fishes via delay */ - next_fish_to_send_at = 0; - } else { - /* ToDo: this should be done in the main scheduling loop to avoid the - busy wait here; not so bad if fish delay is very small */ - int iq = 0; // DEBUGGING -- HWL - next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send - /* send a fish when ready, but process messages that arrive in the meantime */ - do { - if (PacketsWaiting()) { - iq++; // DEBUGGING - *receivedFinish = processMessages(); - } - now = msTime(); - } while (!*receivedFinish || now sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count)); - } - - // JB: IMHO, this should all be hidden inside sendFish(...) - /* pe = choosePE(); - sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY, - NEW_FISH_HUNGER); + /* this would be the place for fishing in GUM... - // Global statistics: count no. of fishes - if (RtsFlags.ParFlags.ParStats.Global && - RtsFlags.GcFlags.giveStats > NO_GC_STATS) { - globalParStats.tot_fish_mess++; - } - */ + if (no-earlier-fish-around) + sendFish(choosePe()); + */ - /* delayed fishes must have been sent by now! */ - next_fish_to_send_at = 0; - } - - *receivedFinish = processMessages(); -# endif /* SPARKS */ - - return rtsFalse; - /* NB: this function always returns rtsFalse, meaning the scheduler - loop continues with the next iteration; - rationale: - return code means success in finding work; we enter this function - if there is no local work, thus have to send a fish which takes - time until it arrives with work; in the meantime we should process - messages in the main loop; - */ -} -#endif // PARALLEL_HASKELL + // Eden:just look for incoming messages (blocking receive) + IF_PAR_DEBUG(verbose, + debugBelch("...wait for incoming messages...\n")); + processMessages(cap, &receivedFinish); // blocking receive... -/* ---------------------------------------------------------------------------- - * PAR/GRAN: Report stats & debugging info(?) - * ------------------------------------------------------------------------- */ -#if defined(PAR) || defined(GRAN) -static void -scheduleGranParReport(void) -{ - ASSERT(run_queue_hd != END_TSO_QUEUE); + return receivedFinish; + // reenter scheduling look after having received something - /* Take a thread from the run queue, if we have work */ - POP_RUN_QUEUE(t); // take_off_run_queue(END_TSO_QUEUE); +#else /* !PARALLEL_HASKELL, i.e. THREADED_RTS */ - /* If this TSO has got its outport closed in the meantime, - * it mustn't be run. Instead, we have to clean it up as if it was finished. - * It has to be marked as TH_DEAD for this purpose. - * If it is TH_TERM instead, it is supposed to have finished in the normal way. + return rtsFalse; /* return value unused in THREADED_RTS */ -JB: TODO: investigate wether state change field could be nuked - entirely and replaced by the normal tso state (whatnext - field). All we want to do is to kill tsos from outside. - */ - - /* ToDo: write something to the log-file - if (RTSflags.ParFlags.granSimStats && !sameThread) - DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd); - - CurrentTSO = t; - */ - /* the spark pool for the current PE */ - pool = &(cap.r.rSparks); // cap = (old) MainCap - - IF_DEBUG(scheduler, - debugBelch("--=^ %d threads, %d sparks on [%#x]\n", - run_queue_len(), spark_queue_len(pool), CURRENT_PROC)); - - IF_PAR_DEBUG(fish, - debugBelch("--=^ %d threads, %d sparks on [%#x]\n", - run_queue_len(), spark_queue_len(pool), CURRENT_PROC)); - - if (RtsFlags.ParFlags.ParStats.Full && - (t->par.sparkname != (StgInt)0) && // only log spark generated threads - (emitSchedule || // forced emit - (t && LastTSO && t->id != LastTSO->id))) { - /* - we are running a different TSO, so write a schedule event to log file - NB: If we use fair scheduling we also have to write a deschedule - event for LastTSO; with unfair scheduling we know that the - previous tso has blocked whenever we switch to another tso, so - we don't need it in GUM for now - */ - IF_PAR_DEBUG(fish, // schedule, - debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname)); - - DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC, - GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0); - emitSchedule = rtsFalse; - } -} -#endif +#endif /* PARALLEL_HASKELL */ +} +#endif // PARALLEL_HASKELL || THREADED_RTS /* ---------------------------------------------------------------------------- * After running a thread... * ------------------------------------------------------------------------- */ static void -schedulePostRunThread(void) +schedulePostRunThread (Capability *cap, StgTSO *t) { -#if defined(PAR) - /* HACK 675: if the last thread didn't yield, make sure to print a - SCHEDULE event to the log file when StgRunning the next thread, even - if it is the same one as before */ - LastTSO = t; - TimeOfLastYield = CURRENT_TIME; -#endif + // We have to be able to catch transactions that are in an + // infinite loop as a result of seeing an inconsistent view of + // memory, e.g. + // + // atomically $ do + // [a,b] <- mapM readTVar [ta,tb] + // when (a == b) loop + // + // and a is never equal to b given a consistent view of memory. + // + if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) { + if (!stmValidateNestOfTransactions (t -> trec)) { + debugTrace(DEBUG_sched | DEBUG_stm, + "trec %p found wasting its time", t); + + // strip the stack back to the + // ATOMICALLY_FRAME, aborting the (nested) + // transaction, and saving the stack of any + // partially-evaluated thunks on the heap. + throwToSingleThreaded_(cap, t, NULL, rtsTrue, NULL); + + ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME); + } + } /* some statistics gathering in the parallel case */ - -#if defined(GRAN) || defined(PAR) || defined(EDEN) - switch (ret) { - case HeapOverflow: -# if defined(GRAN) - IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t)); - globalGranStats.tot_heapover++; -# elif defined(PAR) - globalParStats.tot_heapover++; -# endif - break; - - case StackOverflow: -# if defined(GRAN) - IF_DEBUG(gran, - DumpGranEvent(GR_DESCHEDULE, t)); - globalGranStats.tot_stackover++; -# elif defined(PAR) - // IF_DEBUG(par, - // DumpGranEvent(GR_DESCHEDULE, t); - globalParStats.tot_stackover++; -# endif - break; - - case ThreadYielding: -# if defined(GRAN) - IF_DEBUG(gran, - DumpGranEvent(GR_DESCHEDULE, t)); - globalGranStats.tot_yields++; -# elif defined(PAR) - // IF_DEBUG(par, - // DumpGranEvent(GR_DESCHEDULE, t); - globalParStats.tot_yields++; -# endif - break; - - case ThreadBlocked: -# if defined(GRAN) - debugTrace(DEBUG_sched, - "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ", - t->id, t, whatNext_strs[t->what_next], t->block_info.closure, - (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure))); - if (t->block_info.closure!=(StgClosure*)NULL) - print_bq(t->block_info.closure); - debugBelch("\n")); - - // ??? needed; should emit block before - IF_DEBUG(gran, - DumpGranEvent(GR_DESCHEDULE, t)); - prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t - /* - ngoq Dogh! - ASSERT(procStatus[CurrentProc]==Busy || - ((procStatus[CurrentProc]==Fetching) && - (t->block_info.closure!=(StgClosure*)NULL))); - if (run_queue_hds[CurrentProc] == END_TSO_QUEUE && - !(!RtsFlags.GranFlags.DoAsyncFetch && - procStatus[CurrentProc]==Fetching)) - procStatus[CurrentProc] = Idle; - */ -# elif defined(PAR) -//++PAR++ blockThread() writes the event (change?) -# endif - break; - - case ThreadFinished: - break; - - default: - barf("parGlobalStats: unknown return code"); - break; - } -#endif } /* ----------------------------------------------------------------------------- @@ -1601,23 +1247,19 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) } debugTrace(DEBUG_sched, - "--<< thread %ld (%s) stopped: HeapOverflow\n", + "--<< thread %ld (%s) stopped: HeapOverflow", (long)t->id, whatNext_strs[t->what_next]); -#if defined(GRAN) - ASSERT(!is_on_queue(t,CurrentProc)); -#elif defined(PARALLEL_HASKELL) - /* Currently we emit a DESCHEDULE event before GC in GUM. - ToDo: either add separate event to distinguish SYSTEM time from rest - or just nuke this DESCHEDULE (and the following SCHEDULE) */ - if (0 && RtsFlags.ParFlags.ParStats.Full) { - DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC, - GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0); - emitSchedule = rtsTrue; + if (cap->context_switch) { + // Sometimes we miss a context switch, e.g. when calling + // primitives in a tight loop, MAYBE_GC() doesn't check the + // context switch flag, and we end up waiting for a GC. + // See #1984, and concurrent/should_run/1984 + cap->context_switch = 0; + addToRunQueue(cap,t); + } else { + pushOnRunQueue(cap,t); } -#endif - - pushOnRunQueue(cap,t); return rtsTrue; /* actual GC is done at the end of the while loop in schedule() */ } @@ -1663,7 +1305,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) // the CPU because the tick always arrives during GC). This way // penalises threads that do a lot of allocation, but that seems // better than the alternative. - context_switch = 0; + cap->context_switch = 0; /* put the thread back on the run queue. Then, if we're ready to * GC, check whether this is the last task to stop. If so, wake @@ -1685,7 +1327,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) IF_DEBUG(sanity, //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id); checkTSO(t)); - ASSERT(t->link == END_TSO_QUEUE); + ASSERT(t->_link == END_TSO_QUEUE); // Shortcut if we're just switching evaluators: don't bother // doing stack squeezing (which can be expensive), just run the @@ -1693,28 +1335,9 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) if (t->what_next != prev_what_next) { return rtsTrue; } - -#if defined(GRAN) - ASSERT(!is_on_queue(t,CurrentProc)); - - IF_DEBUG(sanity, - //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs)."); - checkThreadQsSanity(rtsTrue)); - -#endif addToRunQueue(cap,t); -#if defined(GRAN) - /* add a ContinueThread event to actually process the thread */ - new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc], - ContinueThread, - t, (StgClosure*)NULL, (rtsSpark*)NULL); - IF_GRAN_DEBUG(bq, - debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n"); - G_EVENTQ(0); - G_CURR_THREADQ(0)); -#endif return rtsFalse; } @@ -1729,54 +1352,19 @@ scheduleHandleThreadBlocked( StgTSO *t #endif ) { -#if defined(GRAN) - IF_DEBUG(scheduler, - debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n", - t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure))); - if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure)); - - // ??? needed; should emit block before - IF_DEBUG(gran, - DumpGranEvent(GR_DESCHEDULE, t)); - prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t - /* - ngoq Dogh! - ASSERT(procStatus[CurrentProc]==Busy || - ((procStatus[CurrentProc]==Fetching) && - (t->block_info.closure!=(StgClosure*)NULL))); - if (run_queue_hds[CurrentProc] == END_TSO_QUEUE && - !(!RtsFlags.GranFlags.DoAsyncFetch && - procStatus[CurrentProc]==Fetching)) - procStatus[CurrentProc] = Idle; - */ -#elif defined(PAR) - IF_DEBUG(scheduler, - debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n", - t->id, t, whatNext_strs[t->what_next], t->block_info.closure)); - IF_PAR_DEBUG(bq, - - if (t->block_info.closure!=(StgClosure*)NULL) - print_bq(t->block_info.closure)); - - /* Send a fetch (if BlockedOnGA) and dump event to log file */ - blockThread(t); - - /* whatever we schedule next, we must log that schedule */ - emitSchedule = rtsTrue; - -#else /* !GRAN */ // We don't need to do anything. The thread is blocked, and it // has tidied up its stack and placed itself on whatever queue // it needs to be on. -#if !defined(THREADED_RTS) - ASSERT(t->why_blocked != NotBlocked); - // This might not be true under THREADED_RTS: we don't have - // exclusive access to this TSO, so someone might have - // woken it up by now. This actually happens: try - // conc023 +RTS -N2. -#endif + // ASSERT(t->why_blocked != NotBlocked); + // Not true: for example, + // - in THREADED_RTS, the thread may already have been woken + // up by another Capability. This actually happens: try + // conc023 +RTS -N2. + // - the thread may have woken itself up already, because + // threadPaused() might have raised a blocked throwTo + // exception, see maybePerformBlockedException(). #ifdef DEBUG if (traceClass(DEBUG_sched)) { @@ -1786,12 +1374,6 @@ scheduleHandleThreadBlocked( StgTSO *t debugTraceEnd(); } #endif - - /* Only for dumping event to log file - ToDo: do I need this in GranSim, too? - blockThread(t); - */ -#endif } /* ----------------------------------------------------------------------------- @@ -1810,48 +1392,6 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", (unsigned long)t->id, whatNext_strs[t->what_next]); -#if defined(GRAN) - endThread(t, CurrentProc); // clean-up the thread -#elif defined(PARALLEL_HASKELL) - /* For now all are advisory -- HWL */ - //if(t->priority==AdvisoryPriority) ?? - advisory_thread_count--; // JB: Caution with this counter, buggy! - -# if defined(DIST) - if(t->dist.priority==RevalPriority) - FinishReval(t); -# endif - -# if defined(EDENOLD) - // the thread could still have an outport... (BUG) - if (t->eden.outport != -1) { - // delete the outport for the tso which has finished... - IF_PAR_DEBUG(eden_ports, - debugBelch("WARNING: Scheduler removes outport %d for TSO %d.\n", - t->eden.outport, t->id)); - deleteOPT(t); - } - // thread still in the process (HEAVY BUG! since outport has just been closed...) - if (t->eden.epid != -1) { - IF_PAR_DEBUG(eden_ports, - debugBelch("WARNING: Scheduler removes TSO %d from process %d .\n", - t->id, t->eden.epid)); - removeTSOfromProcess(t); - } -# endif - -# if defined(PAR) - if (RtsFlags.ParFlags.ParStats.Full && - !RtsFlags.ParFlags.ParStats.Suppressed) - DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */); - - // t->par only contains statistics: left out for now... - IF_PAR_DEBUG(fish, - debugBelch("**** end thread: ended sparked thread %d (%lx); sparkname: %lx\n", - t->id,t,t->par.sparkname)); -# endif -#endif // PARALLEL_HASKELL - // // Check whether the thread that just completed was a bound // thread, and if so return with the result. @@ -1871,7 +1411,7 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) // point where we can deal with this. Leaving it on the run // queue also ensures that the garbage collector knows about // this thread and its return value (it gets dropped from the - // all_threads list so there's no other way to find it). + // step->threads list so there's no other way to find it). appendToRunQueue(cap,t); return rtsFalse; #else @@ -1909,36 +1449,21 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) } /* ----------------------------------------------------------------------------- - * Perform a heap census, if PROFILING + * Perform a heap census * -------------------------------------------------------------------------- */ static rtsBool -scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED ) +scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED ) { -#if defined(PROFILING) // When we have +RTS -i0 and we're heap profiling, do a census at // every GC. This lets us get repeatable runs for debugging. if (performHeapProfile || (RtsFlags.ProfFlags.profileInterval==0 && RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) { - - // checking black holes is necessary before GC, otherwise - // there may be threads that are unreachable except by the - // blackhole queue, which the GC will consider to be - // deadlocked. - scheduleCheckBlackHoles(&MainCapability); - - debugTrace(DEBUG_sched, "garbage collecting before heap census"); - GarbageCollect(GetRoots, rtsTrue); - - debugTrace(DEBUG_sched, "performing heap census"); - heapCensus(); - - performHeapProfile = rtsFalse; - return rtsTrue; // true <=> we already GC'd + return rtsTrue; + } else { + return rtsFalse; } -#endif - return rtsFalse; } /* ----------------------------------------------------------------------------- @@ -1946,12 +1471,12 @@ scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED ) * -------------------------------------------------------------------------- */ static Capability * -scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, - rtsBool force_major, void (*get_roots)(evac_fn)) +scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) { - StgTSO *t; + rtsBool heap_census; #ifdef THREADED_RTS - static volatile StgWord waiting_for_gc; + /* extern static volatile StgWord waiting_for_gc; + lives inside capability.c */ rtsBool was_waiting; nat i; #endif @@ -1968,6 +1493,10 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, // the other tasks to sleep and stay asleep. // + /* Other capabilities are prevented from running yet more Haskell + threads if waiting_for_gc is set. Tested inside + yieldCapability() and releaseCapability() in Capability.c */ + was_waiting = cas(&waiting_for_gc, 0, 1); if (was_waiting) { do { @@ -1977,6 +1506,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, return cap; // NOTE: task->cap might have changed here } + setContextSwitches(); for (i=0; i < n_capabilities; i++) { debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities); if (cap != &capabilities[i]) { @@ -1987,7 +1517,6 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, // all the Capabilities, but even so it's a slightly // unsavoury invariant. task->cap = pcap; - context_switch = 1; waitForReturnCapability(&pcap, task); if (pcap != &capabilities[i]) { barf("scheduleDoGC: got the wrong capability"); @@ -1998,51 +1527,6 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, waiting_for_gc = rtsFalse; #endif - /* Kick any transactions which are invalid back to their - * atomically frames. When next scheduled they will try to - * commit, this commit will fail and they will retry. - */ - { - StgTSO *next; - - for (t = all_threads; t != END_TSO_QUEUE; t = next) { - if (t->what_next == ThreadRelocated) { - next = t->link; - } else { - next = t->global_link; - - // This is a good place to check for blocked - // exceptions. It might be the case that a thread is - // blocked on delivering an exception to a thread that - // is also blocked - we try to ensure that this - // doesn't happen in throwTo(), but it's too hard (or - // impossible) to close all the race holes, so we - // accept that some might get through and deal with - // them here. A GC will always happen at some point, - // even if the system is otherwise deadlocked. - maybePerformBlockedException (&capabilities[0], t); - - if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) { - if (!stmValidateNestOfTransactions (t -> trec)) { - debugTrace(DEBUG_sched | DEBUG_stm, - "trec %p found wasting its time", t); - - // strip the stack back to the - // ATOMICALLY_FRAME, aborting the (nested) - // transaction, and saving the stack of any - // partially-evaluated thunks on the heap. - throwToSingleThreaded_(&capabilities[0], t, - NULL, rtsTrue, NULL); - -#ifdef REG_R1 - ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME); -#endif - } - } - } - } - } - // so this happens periodically: if (cap) scheduleCheckBlackHoles(cap); @@ -2057,6 +1541,8 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, deleteAllThreads(&capabilities[0]); sched_state = SCHED_SHUTTING_DOWN; } + + heap_census = scheduleNeedHeapProfile(rtsTrue); /* everybody back, start the GC. * Could do it in this thread, or signal a condition var @@ -2066,8 +1552,22 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, #if defined(THREADED_RTS) debugTrace(DEBUG_sched, "doing GC"); #endif - GarbageCollect(get_roots, force_major); + GarbageCollect(force_major || heap_census); + if (heap_census) { + debugTrace(DEBUG_sched, "performing heap census"); + heapCensus(); + performHeapProfile = rtsFalse; + } + +#ifdef SPARKBALANCE + /* JB + Once we are all together... this would be the place to balance all + spark pools. No concurrent stealing or adding of new sparks can + occur. Should be defined in Sparks.c. */ + balanceSparkPoolsCaps(n_capabilities, capabilities); +#endif + #if defined(THREADED_RTS) // release our stash of capabilities. for (i = 0; i < n_capabilities; i++) { @@ -2083,17 +1583,6 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, } #endif -#if defined(GRAN) - /* add a ContinueThread event to continue execution of current thread */ - new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc], - ContinueThread, - t, (StgClosure*)NULL, (rtsSpark*)NULL); - IF_GRAN_DEBUG(bq, - debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n"); - G_EVENTQ(0); - G_CURR_THREADQ(0)); -#endif /* GRAN */ - return cap; } @@ -2101,7 +1590,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, * Singleton fork(). Do not copy any running threads. * ------------------------------------------------------------------------- */ -StgInt +pid_t forkProcess(HsStablePtr *entry #ifndef FORKPROCESS_PRIMOP_SUPPORTED STG_UNUSED @@ -2113,6 +1602,7 @@ forkProcess(HsStablePtr *entry pid_t pid; StgTSO* t,*next; Capability *cap; + nat s; #if defined(THREADED_RTS) if (RtsFlags.ParFlags.nNodes > 1) { @@ -2126,25 +1616,44 @@ forkProcess(HsStablePtr *entry // ToDo: for SMP, we should probably acquire *all* the capabilities cap = rts_lock(); + // no funny business: hold locks while we fork, otherwise if some + // other thread is holding a lock when the fork happens, the data + // structure protected by the lock will forever be in an + // inconsistent state in the child. See also #1391. + ACQUIRE_LOCK(&sched_mutex); + ACQUIRE_LOCK(&cap->lock); + ACQUIRE_LOCK(&cap->running_task->lock); + pid = fork(); if (pid) { // parent + RELEASE_LOCK(&sched_mutex); + RELEASE_LOCK(&cap->lock); + RELEASE_LOCK(&cap->running_task->lock); + // just return the pid rts_unlock(cap); return pid; } else { // child +#if defined(THREADED_RTS) + initMutex(&sched_mutex); + initMutex(&cap->lock); + initMutex(&cap->running_task->lock); +#endif + // Now, all OS threads except the thread that forked are // stopped. We need to stop all Haskell threads, including // those involved in foreign calls. Also we need to delete // all Tasks, because they correspond to OS threads that are // now gone. - for (t = all_threads; t != END_TSO_QUEUE; t = next) { + for (s = 0; s < total_steps; s++) { + for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) { if (t->what_next == ThreadRelocated) { - next = t->link; + next = t->_link; } else { next = t->global_link; // don't allow threads to catch the ThreadKilled @@ -2152,6 +1661,7 @@ forkProcess(HsStablePtr *entry // threads may be evaluating thunks that we need later. deleteThread_(cap,t); } + } } // Empty the run queue. It seems tempting to let all the @@ -2165,14 +1675,19 @@ forkProcess(HsStablePtr *entry // don't exist now: cap->suspended_ccalling_tasks = NULL; - // Empty the all_threads list. Otherwise, the garbage + // Empty the threads lists. Otherwise, the garbage // collector may attempt to resurrect some of these threads. - all_threads = END_TSO_QUEUE; + for (s = 0; s < total_steps; s++) { + all_steps[s].threads = END_TSO_QUEUE; + } // Wipe the task list, except the current Task. ACQUIRE_LOCK(&sched_mutex); for (task = all_tasks; task != NULL; task=task->all_link) { if (task != cap->running_task) { +#if defined(THREADED_RTS) + initMutex(&task->lock); // see #1391 +#endif discardTask(task); } } @@ -2186,6 +1701,11 @@ forkProcess(HsStablePtr *entry cap->returning_tasks_tl = NULL; #endif + // On Unix, all timers are reset in the child, so we need to start + // the timer again. + initTimer(); + startTimer(); + cap = rts_evalStableIO(cap, entry, NULL); // run the action rts_checkSchedStatus("forkProcess",cap); @@ -2209,14 +1729,18 @@ deleteAllThreads ( Capability *cap ) // NOTE: only safe to call if we own all capabilities. StgTSO* t, *next; + nat s; + debugTrace(DEBUG_sched,"deleting all threads"); - for (t = all_threads; t != END_TSO_QUEUE; t = next) { + for (s = 0; s < total_steps; s++) { + for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) { if (t->what_next == ThreadRelocated) { - next = t->link; + next = t->_link; } else { next = t->global_link; deleteThread(cap,t); } + } } // The run queue now contains a bunch of ThreadKilled threads. We @@ -2282,9 +1806,17 @@ void * suspendThread (StgRegTable *reg) { Capability *cap; - int saved_errno = errno; + int saved_errno; StgTSO *tso; Task *task; +#if mingw32_HOST_OS + StgWord32 saved_winerror; +#endif + + saved_errno = errno; +#if mingw32_HOST_OS + saved_winerror = GetLastError(); +#endif /* assume that *reg is a pointer to the StgRegTable part of a Capability. */ @@ -2317,7 +1849,7 @@ suspendThread (StgRegTable *reg) suspendTask(cap,task); cap->in_haskell = rtsFalse; - releaseCapability_(cap); + releaseCapability_(cap,rtsFalse); RELEASE_LOCK(&cap->lock); @@ -2329,6 +1861,9 @@ suspendThread (StgRegTable *reg) #endif errno = saved_errno; +#if mingw32_HOST_OS + SetLastError(saved_winerror); +#endif return task; } @@ -2337,8 +1872,16 @@ resumeThread (void *task_) { StgTSO *tso; Capability *cap; - int saved_errno = errno; Task *task = task_; + int saved_errno; +#if mingw32_HOST_OS + StgWord32 saved_winerror; +#endif + + saved_errno = errno; +#if mingw32_HOST_OS + saved_winerror = GetLastError(); +#endif cap = task->cap; // Wait for permission to re-enter the RTS with the result. @@ -2352,7 +1895,7 @@ resumeThread (void *task_) tso = task->suspended_tso; task->suspended_tso = NULL; - tso->link = END_TSO_QUEUE; + tso->_link = END_TSO_QUEUE; // no write barrier reqd debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id); if (tso->why_blocked == BlockedOnCCall) { @@ -2366,9 +1909,12 @@ resumeThread (void *task_) cap->r.rCurrentTSO = tso; cap->in_haskell = rtsTrue; errno = saved_errno; +#if mingw32_HOST_OS + SetLastError(saved_winerror); +#endif /* We might have GC'd, mark the TSO dirty again */ - dirtyTSO(tso); + dirty_TSO(cap,tso); IF_DEBUG(sanity, checkTSO(tso)); @@ -2403,7 +1949,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { - migrateThreadToCapability_lock(&capabilities[cpu],tso); + wakeupThreadOnCapability(cap, &capabilities[cpu], tso); } #else appendToRunQueue(cap,tso); @@ -2431,13 +1977,6 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id); -#if defined(GRAN) - /* GranSim specific init */ - CurrentTSO = m->tso; // the TSO to run - procStatus[MainProc] = Busy; // status of main PE - CurrentProc = MainProc; // PE to run it on -#endif - cap = schedule(cap,task); ASSERT(task->stat != NoStatus); @@ -2452,7 +1991,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) * ------------------------------------------------------------------------- */ #if defined(THREADED_RTS) -void +void OSThreadProcAttr workerStart(Task *task) { Capability *cap; @@ -2486,32 +2025,17 @@ workerStart(Task *task) void initScheduler(void) { -#if defined(GRAN) - nat i; - for (i=0; i<=MAX_PROC; i++) { - run_queue_hds[i] = END_TSO_QUEUE; - run_queue_tls[i] = END_TSO_QUEUE; - blocked_queue_hds[i] = END_TSO_QUEUE; - blocked_queue_tls[i] = END_TSO_QUEUE; - ccalling_threadss[i] = END_TSO_QUEUE; - blackhole_queue[i] = END_TSO_QUEUE; - sleeping_queue = END_TSO_QUEUE; - } -#elif !defined(THREADED_RTS) +#if !defined(THREADED_RTS) blocked_queue_hd = END_TSO_QUEUE; blocked_queue_tl = END_TSO_QUEUE; sleeping_queue = END_TSO_QUEUE; #endif blackhole_queue = END_TSO_QUEUE; - all_threads = END_TSO_QUEUE; - context_switch = 0; sched_state = SCHED_RUNNING; + recent_activity = ACTIVITY_YES; - RtsFlags.ConcFlags.ctxtSwitchTicks = - RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS; - #if defined(THREADED_RTS) /* Initialise the mutex and condition variables used by * the scheduler. */ @@ -2557,7 +2081,13 @@ initScheduler(void) } void -exitScheduler( void ) +exitScheduler( + rtsBool wait_foreign +#if !defined(THREADED_RTS) + __attribute__((unused)) +#endif +) + /* see Capability.c, shutdownCapability() */ { Task *task = NULL; @@ -2570,7 +2100,7 @@ exitScheduler( void ) // If we haven't killed all the threads yet, do it now. if (sched_state < SCHED_SHUTTING_DOWN) { sched_state = SCHED_INTERRUPTING; - scheduleDoGC(NULL,task,rtsFalse,GetRoots); + scheduleDoGC(NULL,task,rtsFalse); } sched_state = SCHED_SHUTTING_DOWN; @@ -2579,91 +2109,24 @@ exitScheduler( void ) nat i; for (i = 0; i < n_capabilities; i++) { - shutdownCapability(&capabilities[i], task); + shutdownCapability(&capabilities[i], task, wait_foreign); } boundTaskExiting(task); stopTaskManager(); } - closeMutex(&sched_mutex); #endif } -/* --------------------------------------------------------------------------- - Where are the roots that we know about? - - - all the threads on the runnable queue - - all the threads on the blocked queue - - all the threads on the sleeping queue - - all the thread currently executing a _ccall_GC - - all the "main threads" - - ------------------------------------------------------------------------ */ - -/* This has to be protected either by the scheduler monitor, or by the - garbage collection monitor (probably the latter). - KH @ 25/10/99 -*/ - void -GetRoots( evac_fn evac ) +freeScheduler( void ) { - nat i; - Capability *cap; - Task *task; - -#if defined(GRAN) - for (i=0; i<=RtsFlags.GranFlags.proc; i++) { - if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL))) - evac((StgClosure **)&run_queue_hds[i]); - if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL))) - evac((StgClosure **)&run_queue_tls[i]); - - if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL))) - evac((StgClosure **)&blocked_queue_hds[i]); - if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL))) - evac((StgClosure **)&blocked_queue_tls[i]); - if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL))) - evac((StgClosure **)&ccalling_threads[i]); + freeCapabilities(); + freeTaskManager(); + if (n_capabilities != 1) { + stgFree(capabilities); } - - markEventQueue(); - -#else /* !GRAN */ - - for (i = 0; i < n_capabilities; i++) { - cap = &capabilities[i]; - evac((StgClosure **)(void *)&cap->run_queue_hd); - evac((StgClosure **)(void *)&cap->run_queue_tl); #if defined(THREADED_RTS) - evac((StgClosure **)(void *)&cap->wakeup_queue_hd); - evac((StgClosure **)(void *)&cap->wakeup_queue_tl); -#endif - for (task = cap->suspended_ccalling_tasks; task != NULL; - task=task->next) { - debugTrace(DEBUG_sched, - "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id); - evac((StgClosure **)(void *)&task->suspended_tso); - } - - } - - -#if !defined(THREADED_RTS) - evac((StgClosure **)(void *)&blocked_queue_hd); - evac((StgClosure **)(void *)&blocked_queue_tl); - evac((StgClosure **)(void *)&sleeping_queue); -#endif -#endif - - // evac((StgClosure **)&blackhole_queue); - -#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN) - markSparkQueue(evac); -#endif - -#if defined(RTS_USER_SIGNALS) - // mark the signal handlers (signals should be already blocked) - markSignalHandlers(evac); + closeMutex(&sched_mutex); #endif } @@ -2673,17 +2136,10 @@ GetRoots( evac_fn evac ) This is the interface to the garbage collector from Haskell land. We provide this so that external C code can allocate and garbage collect when called from Haskell via _ccall_GC. - - It might be useful to provide an interface whereby the programmer - can specify more roots (ToDo). - - This needs to be protected by the GC condition variable above. KH. -------------------------------------------------------------------------- */ -static void (*extra_roots)(evac_fn); - static void -performGC_(rtsBool force_major, void (*get_roots)(evac_fn)) +performGC_(rtsBool force_major) { Task *task; // We must grab a new Task here, because the existing Task may be @@ -2692,34 +2148,20 @@ performGC_(rtsBool force_major, void (*get_roots)(evac_fn)) ACQUIRE_LOCK(&sched_mutex); task = newBoundTask(); RELEASE_LOCK(&sched_mutex); - scheduleDoGC(NULL,task,force_major, get_roots); + scheduleDoGC(NULL,task,force_major); boundTaskExiting(task); } void performGC(void) { - performGC_(rtsFalse, GetRoots); + performGC_(rtsFalse); } void performMajorGC(void) { - performGC_(rtsTrue, GetRoots); -} - -static void -AllRoots(evac_fn evac) -{ - GetRoots(evac); // the scheduler's roots - extra_roots(evac); // the user's roots -} - -void -performGCWithRoots(void (*get_roots)(evac_fn)) -{ - extra_roots = get_roots; - performGC_(rtsFalse, AllRoots); + performGC_(rtsTrue); } /* ----------------------------------------------------------------------------- @@ -2745,7 +2187,12 @@ threadStackOverflow(Capability *cap, StgTSO *tso) // while we are moving the TSO: lockClosure((StgClosure *)tso); - if (tso->stack_size >= tso->max_stack_size) { + if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) { + // NB. never raise a StackOverflow exception if the thread is + // inside Control.Exceptino.block. It is impractical to protect + // against stack overflow exceptions, since virtually anything + // can raise one (even 'catch'), so this is the only sensible + // thing to do here. See bug #767. debugTrace(DEBUG_gc, "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)", @@ -2762,10 +2209,17 @@ threadStackOverflow(Capability *cap, StgTSO *tso) } /* Try to double the current stack size. If that takes us over the - * maximum stack size for this thread, then use the maximum instead. - * Finally round up so the TSO ends up as a whole number of blocks. + * maximum stack size for this thread, then use the maximum instead + * (that is, unless we're already at or over the max size and we + * can't raise the StackOverflow exception (see above), in which + * case just double the size). Finally round up so the TSO ends up as + * a whole number of blocks. */ - new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size); + if (tso->stack_size >= tso->max_stack_size) { + new_stack_size = tso->stack_size * 2; + } else { + new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size); + } new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + TSO_STRUCT_SIZE)/sizeof(W_); new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */ @@ -2775,7 +2229,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso) "increasing stack size from %ld words to %d.", (long)tso->stack_size, new_stack_size); - dest = (StgTSO *)allocate(new_tso_size); + dest = (StgTSO *)allocateLocal(cap,new_tso_size); TICK_ALLOC_TSO(new_stack_size,0); /* copy the TSO block and the old stack into the new area */ @@ -2796,7 +2250,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso) * dead TSO's stack. */ tso->what_next = ThreadRelocated; - tso->link = dest; + setTSOLink(cap,tso,dest); tso->sp = (P_)&(tso->stack[tso->stack_size]); tso->why_blocked = NotBlocked; @@ -2818,6 +2272,56 @@ threadStackOverflow(Capability *cap, StgTSO *tso) return dest; } +static StgTSO * +threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso) +{ + bdescr *bd, *new_bd; + lnat free_w, tso_size_w; + StgTSO *new_tso; + + tso_size_w = tso_sizeW(tso); + + if (tso_size_w < MBLOCK_SIZE_W || + (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) + { + return tso; + } + + // don't allow throwTo() to modify the blocked_exceptions queue + // while we are moving the TSO: + lockClosure((StgClosure *)tso); + + // this is the number of words we'll free + free_w = round_to_mblocks(tso_size_w/2); + + bd = Bdescr((StgPtr)tso); + new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W); + bd->free = bd->start + TSO_STRUCT_SIZEW; + + new_tso = (StgTSO *)new_bd->start; + memcpy(new_tso,tso,TSO_STRUCT_SIZE); + new_tso->stack_size = new_bd->free - new_tso->stack; + + debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu", + (long)tso->id, tso_size_w, tso_sizeW(new_tso)); + + tso->what_next = ThreadRelocated; + tso->_link = new_tso; // no write barrier reqd: same generation + + // The TSO attached to this Task may have moved, so update the + // pointer to it. + if (task->tso == tso) { + task->tso = new_tso; + } + + unlockTSO(new_tso); + unlockTSO(tso); + + IF_DEBUG(sanity,checkTSO(new_tso)); + + return new_tso; +} + /* --------------------------------------------------------------------------- Interrupt execution - usually called inside a signal handler so it mustn't do anything fancy. @@ -2827,7 +2331,7 @@ void interruptStgRts(void) { sched_state = SCHED_INTERRUPTING; - context_switch = 1; + setContextSwitches(); wakeUpRts(); } @@ -2848,17 +2352,10 @@ void wakeUpRts(void) { #if defined(THREADED_RTS) -#if !defined(mingw32_HOST_OS) // This forces the IO Manager thread to wakeup, which will // in turn ensure that some OS thread wakes up and runs the // scheduler loop, which will cause a GC and deadlock check. ioManagerWakeup(); -#else - // On Windows this might be safe enough, because we aren't - // in a signal handler. Later we should use the IO Manager, - // though. - prodOneCapability(); -#endif #endif } @@ -2894,17 +2391,15 @@ checkBlackHoles (Capability *cap) t = blackhole_queue; while (t != END_TSO_QUEUE) { ASSERT(t->why_blocked == BlockedOnBlackHole); - type = get_itbl(t->block_info.closure)->type; + type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type; if (type != BLACKHOLE && type != CAF_BLACKHOLE) { IF_DEBUG(sanity,checkTSO(t)); t = unblockOne(cap, t); - // urk, the threads migrate to the current capability - // here, but we'd like to keep them on the original one. *prev = t; any_woke_up = rtsTrue; } else { - prev = &t->link; - t = t->link; + prev = &t->_link; + t = t->_link; } } @@ -3042,8 +2537,9 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception) This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#) or should be a ATOMICALLY_FRAME (if the retry# reaches the top level). - We skip CATCH_STM_FRAMEs because retries are not considered to be exceptions, - despite the similar implementation. + We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they + create) because retries are not considered to be exceptions, despite the + similar implementation. We should not expect to see CATCH_FRAME or STOP_FRAME because those should not be created within memory transactions. @@ -3063,7 +2559,7 @@ findRetryFrameHelper (StgTSO *tso) case ATOMICALLY_FRAME: debugTrace(DEBUG_stm, - "found ATOMICALLY_FRAME at %p during retrry", p); + "found ATOMICALLY_FRAME at %p during retry", p); tso->sp = p; return ATOMICALLY_FRAME; @@ -3073,7 +2569,20 @@ findRetryFrameHelper (StgTSO *tso) tso->sp = p; return CATCH_RETRY_FRAME; - case CATCH_STM_FRAME: + case CATCH_STM_FRAME: { + StgTRecHeader *trec = tso -> trec; + StgTRecHeader *outer = stmGetEnclosingTRec(trec); + debugTrace(DEBUG_stm, + "found CATCH_STM_FRAME at %p during retry", p); + debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer); + stmAbortTransaction(tso -> cap, trec); + stmFreeAbortedTRec(tso -> cap, trec); + tso -> trec = outer; + p = next; + continue; + } + + default: ASSERT(info->i.type != CATCH_FRAME); ASSERT(info->i.type != STOP_FRAME); @@ -3098,11 +2607,15 @@ resurrectThreads (StgTSO *threads) { StgTSO *tso, *next; Capability *cap; + step *step; for (tso = threads; tso != END_TSO_QUEUE; tso = next) { next = tso->global_link; - tso->global_link = all_threads; - all_threads = tso; + + step = Bdescr((P_)tso)->step; + tso->global_link = step->threads; + step->threads = tso; + debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id); // Wake up the thread on the Capability it was last on @@ -3113,15 +2626,15 @@ resurrectThreads (StgTSO *threads) case BlockedOnException: /* Called by GC - sched_mutex lock is currently held. */ throwToSingleThreaded(cap, tso, - (StgClosure *)BlockedOnDeadMVar_closure); + (StgClosure *)blockedOnDeadMVar_closure); break; case BlockedOnBlackHole: throwToSingleThreaded(cap, tso, - (StgClosure *)NonTermination_closure); + (StgClosure *)nonTermination_closure); break; case BlockedOnSTM: throwToSingleThreaded(cap, tso, - (StgClosure *)BlockedIndefinitely_closure); + (StgClosure *)blockedIndefinitely_closure); break; case NotBlocked: /* This might happen if the thread was blocked on a black hole @@ -3134,3 +2647,37 @@ resurrectThreads (StgTSO *threads) } } } + +/* ----------------------------------------------------------------------------- + performPendingThrowTos is called after garbage collection, and + passed a list of threads that were found to have pending throwTos + (tso->blocked_exceptions was not empty), and were blocked. + Normally this doesn't happen, because we would deliver the + exception directly if the target thread is blocked, but there are + small windows where it might occur on a multiprocessor (see + throwTo()). + + NB. we must be holding all the capabilities at this point, just + like resurrectThreads(). + -------------------------------------------------------------------------- */ + +void +performPendingThrowTos (StgTSO *threads) +{ + StgTSO *tso, *next; + Capability *cap; + step *step; + + for (tso = threads; tso != END_TSO_QUEUE; tso = next) { + next = tso->global_link; + + step = Bdescr((P_)tso)->step; + tso->global_link = step->threads; + step->threads = tso; + + debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id); + + cap = tso->cap; + maybePerformBlockedException(cap, tso); + } +}