1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * The scheduler and thread-related functionality
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
10 #define KEEP_LOCKCLOSURE
15 #include "OSThreads.h"
20 #include "StgMiscClosures.h"
21 #include "Interpreter.h"
23 #include "RtsSignals.h"
29 #include "ThreadLabels.h"
30 #include "LdvProfile.h"
32 #include "Proftimer.h"
34 #if defined(GRAN) || defined(PARALLEL_HASKELL)
35 # include "GranSimRts.h"
37 # include "ParallelRts.h"
38 # include "Parallel.h"
39 # include "ParallelDebug.h"
44 #include "Capability.h"
46 #include "AwaitEvent.h"
47 #if defined(mingw32_HOST_OS)
48 #include "win32/IOManager.h"
51 #include "RaiseAsync.h"
53 #include "ThrIOManager.h"
55 #ifdef HAVE_SYS_TYPES_H
56 #include <sys/types.h>
70 // Turn off inlining when debugging - it obfuscates things
73 # define STATIC_INLINE static
76 /* -----------------------------------------------------------------------------
78 * -------------------------------------------------------------------------- */
82 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
83 /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */
86 In GranSim we have a runnable and a blocked queue for each processor.
87 In order to minimise code changes new arrays run_queue_hds/tls
88 are created. run_queue_hd is then a short cut (macro) for
89 run_queue_hds[CurrentProc] (see GranSim.h).
92 StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
93 StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
94 StgTSO *ccalling_threadss[MAX_PROC];
95 /* We use the same global list of threads (all_threads) in GranSim as in
96 the std RTS (i.e. we are cheating). However, we don't use this list in
97 the GranSim specific code at the moment (so we are only potentially
102 #if !defined(THREADED_RTS)
103 // Blocked/sleeping thrads
104 StgTSO *blocked_queue_hd = NULL;
105 StgTSO *blocked_queue_tl = NULL;
106 StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table?
109 /* Threads blocked on blackholes.
110 * LOCK: sched_mutex+capability, or all capabilities
112 StgTSO *blackhole_queue = NULL;
115 /* The blackhole_queue should be checked for threads to wake up. See
116 * Schedule.h for more thorough comment.
117 * LOCK: none (doesn't matter if we miss an update)
119 rtsBool blackholes_need_checking = rtsFalse;
121 /* Linked list of all threads.
122 * Used for detecting garbage collected threads.
123 * LOCK: sched_mutex+capability, or all capabilities
125 StgTSO *all_threads = NULL;
127 /* flag set by signal handler to precipitate a context switch
128 * LOCK: none (just an advisory flag)
130 int context_switch = 0;
132 /* flag that tracks whether we have done any execution in this time slice.
133 * LOCK: currently none, perhaps we should lock (but needs to be
134 * updated in the fast path of the scheduler).
136 nat recent_activity = ACTIVITY_YES;
138 /* if this flag is set as well, give up execution
139 * LOCK: none (changes once, from false->true)
141 rtsBool sched_state = SCHED_RUNNING;
147 /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually
148 * exists - earlier gccs apparently didn't.
154 * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
155 * in an MT setting, needed to signal that a worker thread shouldn't hang around
156 * in the scheduler when it is out of work.
158 rtsBool shutting_down_scheduler = rtsFalse;
161 * This mutex protects most of the global scheduler data in
162 * the THREADED_RTS runtime.
164 #if defined(THREADED_RTS)
168 #if defined(PARALLEL_HASKELL)
170 rtsTime TimeOfLastYield;
171 rtsBool emitSchedule = rtsTrue;
174 #if !defined(mingw32_HOST_OS)
175 #define FORKPROCESS_PRIMOP_SUPPORTED
178 /* -----------------------------------------------------------------------------
179 * static function prototypes
180 * -------------------------------------------------------------------------- */
182 static Capability *schedule (Capability *initialCapability, Task *task);
185 // These function all encapsulate parts of the scheduler loop, and are
186 // abstracted only to make the structure and control flow of the
187 // scheduler clearer.
189 static void schedulePreLoop (void);
190 #if defined(THREADED_RTS)
191 static void schedulePushWork(Capability *cap, Task *task);
193 static void scheduleStartSignalHandlers (Capability *cap);
194 static void scheduleCheckBlockedThreads (Capability *cap);
195 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
196 static void scheduleCheckBlackHoles (Capability *cap);
197 static void scheduleDetectDeadlock (Capability *cap, Task *task);
199 static StgTSO *scheduleProcessEvent(rtsEvent *event);
201 #if defined(PARALLEL_HASKELL)
202 static StgTSO *scheduleSendPendingMessages(void);
203 static void scheduleActivateSpark(void);
204 static rtsBool scheduleGetRemoteWork(rtsBool *receivedFinish);
206 #if defined(PAR) || defined(GRAN)
207 static void scheduleGranParReport(void);
209 static void schedulePostRunThread(void);
210 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
211 static void scheduleHandleStackOverflow( Capability *cap, Task *task,
213 static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
214 nat prev_what_next );
215 static void scheduleHandleThreadBlocked( StgTSO *t );
216 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
218 static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
219 static Capability *scheduleDoGC(Capability *cap, Task *task,
220 rtsBool force_major);
222 static rtsBool checkBlackHoles(Capability *cap);
224 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
226 static void deleteThread (Capability *cap, StgTSO *tso);
227 static void deleteAllThreads (Capability *cap);
229 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
230 static void deleteThread_(Capability *cap, StgTSO *tso);
233 #if defined(PARALLEL_HASKELL)
234 StgTSO * createSparkThread(rtsSpark spark);
235 StgTSO * activateSpark (rtsSpark spark);
239 static char *whatNext_strs[] = {
249 /* -----------------------------------------------------------------------------
250 * Putting a thread on the run queue: different scheduling policies
251 * -------------------------------------------------------------------------- */
254 addToRunQueue( Capability *cap, StgTSO *t )
256 #if defined(PARALLEL_HASKELL)
257 if (RtsFlags.ParFlags.doFairScheduling) {
258 // this does round-robin scheduling; good for concurrency
259 appendToRunQueue(cap,t);
261 // this does unfair scheduling; good for parallelism
262 pushOnRunQueue(cap,t);
265 // this does round-robin scheduling; good for concurrency
266 appendToRunQueue(cap,t);
270 /* ---------------------------------------------------------------------------
271 Main scheduling loop.
273 We use round-robin scheduling, each thread returning to the
274 scheduler loop when one of these conditions is detected:
277 * timer expires (thread yields)
283 In a GranSim setup this loop iterates over the global event queue.
284 This revolves around the global event queue, which determines what
285 to do next. Therefore, it's more complicated than either the
286 concurrent or the parallel (GUM) setup.
289 GUM iterates over incoming messages.
290 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
291 and sends out a fish whenever it has nothing to do; in-between
292 doing the actual reductions (shared code below) it processes the
293 incoming messages and deals with delayed operations
294 (see PendingFetches).
295 This is not the ugliest code you could imagine, but it's bloody close.
297 ------------------------------------------------------------------------ */
300 schedule (Capability *initialCapability, Task *task)
304 StgThreadReturnCode ret;
307 #elif defined(PARALLEL_HASKELL)
310 rtsBool receivedFinish = rtsFalse;
312 nat tp_size, sp_size; // stats only
317 #if defined(THREADED_RTS)
318 rtsBool first = rtsTrue;
321 cap = initialCapability;
323 // Pre-condition: this task owns initialCapability.
324 // The sched_mutex is *NOT* held
325 // NB. on return, we still hold a capability.
327 debugTrace (DEBUG_sched,
328 "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
329 task, initialCapability);
333 // -----------------------------------------------------------
334 // Scheduler loop starts here:
336 #if defined(PARALLEL_HASKELL)
337 #define TERMINATION_CONDITION (!receivedFinish)
339 #define TERMINATION_CONDITION ((event = get_next_event()) != (rtsEvent*)NULL)
341 #define TERMINATION_CONDITION rtsTrue
344 while (TERMINATION_CONDITION) {
347 /* Choose the processor with the next event */
348 CurrentProc = event->proc;
349 CurrentTSO = event->tso;
352 #if defined(THREADED_RTS)
354 // don't yield the first time, we want a chance to run this
355 // thread for a bit, even if there are others banging at the
358 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
360 // Yield the capability to higher-priority tasks if necessary.
361 yieldCapability(&cap, task);
365 #if defined(THREADED_RTS)
366 schedulePushWork(cap,task);
369 // Check whether we have re-entered the RTS from Haskell without
370 // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
372 if (cap->in_haskell) {
373 errorBelch("schedule: re-entered unsafely.\n"
374 " Perhaps a 'foreign import unsafe' should be 'safe'?");
375 stg_exit(EXIT_FAILURE);
378 // The interruption / shutdown sequence.
380 // In order to cleanly shut down the runtime, we want to:
381 // * make sure that all main threads return to their callers
382 // with the state 'Interrupted'.
383 // * clean up all OS threads assocated with the runtime
384 // * free all memory etc.
386 // So the sequence for ^C goes like this:
388 // * ^C handler sets sched_state := SCHED_INTERRUPTING and
389 // arranges for some Capability to wake up
391 // * all threads in the system are halted, and the zombies are
392 // placed on the run queue for cleaning up. We acquire all
393 // the capabilities in order to delete the threads, this is
394 // done by scheduleDoGC() for convenience (because GC already
395 // needs to acquire all the capabilities). We can't kill
396 // threads involved in foreign calls.
398 // * somebody calls shutdownHaskell(), which calls exitScheduler()
400 // * sched_state := SCHED_SHUTTING_DOWN
402 // * all workers exit when the run queue on their capability
403 // drains. All main threads will also exit when their TSO
404 // reaches the head of the run queue and they can return.
406 // * eventually all Capabilities will shut down, and the RTS can
409 // * We might be left with threads blocked in foreign calls,
410 // we should really attempt to kill these somehow (TODO);
412 switch (sched_state) {
415 case SCHED_INTERRUPTING:
416 debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
417 #if defined(THREADED_RTS)
418 discardSparksCap(cap);
420 /* scheduleDoGC() deletes all the threads */
421 cap = scheduleDoGC(cap,task,rtsFalse);
423 case SCHED_SHUTTING_DOWN:
424 debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
425 // If we are a worker, just exit. If we're a bound thread
426 // then we will exit below when we've removed our TSO from
428 if (task->tso == NULL && emptyRunQueue(cap)) {
433 barf("sched_state: %d", sched_state);
436 #if defined(THREADED_RTS)
437 // If the run queue is empty, take a spark and turn it into a thread.
439 if (emptyRunQueue(cap)) {
441 spark = findSpark(cap);
443 debugTrace(DEBUG_sched,
444 "turning spark of closure %p into a thread",
445 (StgClosure *)spark);
446 createSparkThread(cap,spark);
450 #endif // THREADED_RTS
452 scheduleStartSignalHandlers(cap);
454 // Only check the black holes here if we've nothing else to do.
455 // During normal execution, the black hole list only gets checked
456 // at GC time, to avoid repeatedly traversing this possibly long
457 // list each time around the scheduler.
458 if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
460 scheduleCheckWakeupThreads(cap);
462 scheduleCheckBlockedThreads(cap);
464 scheduleDetectDeadlock(cap,task);
465 #if defined(THREADED_RTS)
466 cap = task->cap; // reload cap, it might have changed
469 // Normally, the only way we can get here with no threads to
470 // run is if a keyboard interrupt received during
471 // scheduleCheckBlockedThreads() or scheduleDetectDeadlock().
472 // Additionally, it is not fatal for the
473 // threaded RTS to reach here with no threads to run.
475 // win32: might be here due to awaitEvent() being abandoned
476 // as a result of a console event having been delivered.
477 if ( emptyRunQueue(cap) ) {
478 #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
479 ASSERT(sched_state >= SCHED_INTERRUPTING);
481 continue; // nothing to do
484 #if defined(PARALLEL_HASKELL)
485 scheduleSendPendingMessages();
486 if (emptyRunQueue(cap) && scheduleActivateSpark())
490 ASSERT(next_fish_to_send_at==0); // i.e. no delayed fishes left!
493 /* If we still have no work we need to send a FISH to get a spark
495 if (emptyRunQueue(cap)) {
496 if (!scheduleGetRemoteWork(&receivedFinish)) continue;
497 ASSERT(rtsFalse); // should not happen at the moment
499 // from here: non-empty run queue.
500 // TODO: merge above case with this, only one call processMessages() !
501 if (PacketsWaiting()) { /* process incoming messages, if
502 any pending... only in else
503 because getRemoteWork waits for
505 receivedFinish = processMessages();
510 scheduleProcessEvent(event);
514 // Get a thread to run
516 t = popRunQueue(cap);
518 #if defined(GRAN) || defined(PAR)
519 scheduleGranParReport(); // some kind of debuging output
521 // Sanity check the thread we're about to run. This can be
522 // expensive if there is lots of thread switching going on...
523 IF_DEBUG(sanity,checkTSO(t));
526 #if defined(THREADED_RTS)
527 // Check whether we can run this thread in the current task.
528 // If not, we have to pass our capability to the right task.
530 Task *bound = t->bound;
534 debugTrace(DEBUG_sched,
535 "### Running thread %lu in bound thread", (unsigned long)t->id);
536 // yes, the Haskell thread is bound to the current native thread
538 debugTrace(DEBUG_sched,
539 "### thread %lu bound to another OS thread", (unsigned long)t->id);
540 // no, bound to a different Haskell thread: pass to that thread
541 pushOnRunQueue(cap,t);
545 // The thread we want to run is unbound.
547 debugTrace(DEBUG_sched,
548 "### this OS thread cannot run thread %lu", (unsigned long)t->id);
549 // no, the current native thread is bound to a different
550 // Haskell thread, so pass it to any worker thread
551 pushOnRunQueue(cap,t);
558 cap->r.rCurrentTSO = t;
560 /* context switches are initiated by the timer signal, unless
561 * the user specified "context switch as often as possible", with
564 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
565 && !emptyThreadQueues(cap)) {
571 debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
572 (long)t->id, whatNext_strs[t->what_next]);
574 startHeapProfTimer();
576 // Check for exceptions blocked on this thread
577 maybePerformBlockedException (cap, t);
579 // ----------------------------------------------------------------------
580 // Run the current thread
582 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
583 ASSERT(t->cap == cap);
585 prev_what_next = t->what_next;
587 errno = t->saved_errno;
589 SetLastError(t->saved_winerror);
592 cap->in_haskell = rtsTrue;
596 recent_activity = ACTIVITY_YES;
598 switch (prev_what_next) {
602 /* Thread already finished, return to scheduler. */
603 ret = ThreadFinished;
609 r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
610 cap = regTableToCapability(r);
615 case ThreadInterpret:
616 cap = interpretBCO(cap);
621 barf("schedule: invalid what_next field");
624 cap->in_haskell = rtsFalse;
626 // The TSO might have moved, eg. if it re-entered the RTS and a GC
627 // happened. So find the new location:
628 t = cap->r.rCurrentTSO;
630 // We have run some Haskell code: there might be blackhole-blocked
631 // threads to wake up now.
632 // Lock-free test here should be ok, we're just setting a flag.
633 if ( blackhole_queue != END_TSO_QUEUE ) {
634 blackholes_need_checking = rtsTrue;
637 // And save the current errno in this thread.
638 // XXX: possibly bogus for SMP because this thread might already
639 // be running again, see code below.
640 t->saved_errno = errno;
642 // Similarly for Windows error code
643 t->saved_winerror = GetLastError();
646 #if defined(THREADED_RTS)
647 // If ret is ThreadBlocked, and this Task is bound to the TSO that
648 // blocked, we are in limbo - the TSO is now owned by whatever it
649 // is blocked on, and may in fact already have been woken up,
650 // perhaps even on a different Capability. It may be the case
651 // that task->cap != cap. We better yield this Capability
652 // immediately and return to normaility.
653 if (ret == ThreadBlocked) {
654 debugTrace(DEBUG_sched,
655 "--<< thread %lu (%s) stopped: blocked",
656 (unsigned long)t->id, whatNext_strs[t->what_next]);
661 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
662 ASSERT(t->cap == cap);
664 // ----------------------------------------------------------------------
666 // Costs for the scheduler are assigned to CCS_SYSTEM
668 #if defined(PROFILING)
672 schedulePostRunThread();
674 ready_to_gc = rtsFalse;
678 ready_to_gc = scheduleHandleHeapOverflow(cap,t);
682 scheduleHandleStackOverflow(cap,task,t);
686 if (scheduleHandleYield(cap, t, prev_what_next)) {
687 // shortcut for switching between compiler/interpreter:
693 scheduleHandleThreadBlocked(t);
697 if (scheduleHandleThreadFinished(cap, task, t)) return cap;
698 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
702 barf("schedule: invalid thread return code %d", (int)ret);
705 if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
706 cap = scheduleDoGC(cap,task,rtsFalse);
708 } /* end of while() */
710 debugTrace(PAR_DEBUG_verbose,
711 "== Leaving schedule() after having received Finish");
714 /* ----------------------------------------------------------------------------
715 * Setting up the scheduler loop
716 * ------------------------------------------------------------------------- */
719 schedulePreLoop(void)
722 /* set up first event to get things going */
723 /* ToDo: assign costs for system setup and init MainTSO ! */
724 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
726 CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
728 debugTrace (DEBUG_gran,
729 "GRAN: Init CurrentTSO (in schedule) = %p",
731 IF_DEBUG(gran, G_TSO(CurrentTSO, 5));
733 if (RtsFlags.GranFlags.Light) {
734 /* Save current time; GranSim Light only */
735 CurrentTSO->gran.clock = CurrentTime[CurrentProc];
740 /* -----------------------------------------------------------------------------
743 * Push work to other Capabilities if we have some.
744 * -------------------------------------------------------------------------- */
746 #if defined(THREADED_RTS)
748 schedulePushWork(Capability *cap USED_IF_THREADS,
749 Task *task USED_IF_THREADS)
751 Capability *free_caps[n_capabilities], *cap0;
754 // migration can be turned off with +RTS -qg
755 if (!RtsFlags.ParFlags.migrate) return;
757 // Check whether we have more threads on our run queue, or sparks
758 // in our pool, that we could hand to another Capability.
759 if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
760 && sparkPoolSizeCap(cap) < 2) {
764 // First grab as many free Capabilities as we can.
765 for (i=0, n_free_caps=0; i < n_capabilities; i++) {
766 cap0 = &capabilities[i];
767 if (cap != cap0 && tryGrabCapability(cap0,task)) {
768 if (!emptyRunQueue(cap0) || cap->returning_tasks_hd != NULL) {
769 // it already has some work, we just grabbed it at
770 // the wrong moment. Or maybe it's deadlocked!
771 releaseCapability(cap0);
773 free_caps[n_free_caps++] = cap0;
778 // we now have n_free_caps free capabilities stashed in
779 // free_caps[]. Share our run queue equally with them. This is
780 // probably the simplest thing we could do; improvements we might
781 // want to do include:
783 // - giving high priority to moving relatively new threads, on
784 // the gournds that they haven't had time to build up a
785 // working set in the cache on this CPU/Capability.
787 // - giving low priority to moving long-lived threads
789 if (n_free_caps > 0) {
790 StgTSO *prev, *t, *next;
791 rtsBool pushed_to_all;
793 debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
796 pushed_to_all = rtsFalse;
798 if (cap->run_queue_hd != END_TSO_QUEUE) {
799 prev = cap->run_queue_hd;
801 prev->link = END_TSO_QUEUE;
802 for (; t != END_TSO_QUEUE; t = next) {
804 t->link = END_TSO_QUEUE;
805 if (t->what_next == ThreadRelocated
806 || t->bound == task // don't move my bound thread
807 || tsoLocked(t)) { // don't move a locked thread
810 } else if (i == n_free_caps) {
811 pushed_to_all = rtsTrue;
817 debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
818 appendToRunQueue(free_caps[i],t);
819 if (t->bound) { t->bound->cap = free_caps[i]; }
820 t->cap = free_caps[i];
824 cap->run_queue_tl = prev;
827 // If there are some free capabilities that we didn't push any
828 // threads to, then try to push a spark to each one.
829 if (!pushed_to_all) {
831 // i is the next free capability to push to
832 for (; i < n_free_caps; i++) {
833 if (emptySparkPoolCap(free_caps[i])) {
834 spark = findSpark(cap);
836 debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
837 newSpark(&(free_caps[i]->r), spark);
843 // release the capabilities
844 for (i = 0; i < n_free_caps; i++) {
845 task->cap = free_caps[i];
846 releaseCapability(free_caps[i]);
849 task->cap = cap; // reset to point to our Capability.
853 /* ----------------------------------------------------------------------------
854 * Start any pending signal handlers
855 * ------------------------------------------------------------------------- */
857 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
859 scheduleStartSignalHandlers(Capability *cap)
861 if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) {
862 // safe outside the lock
863 startSignalHandlers(cap);
868 scheduleStartSignalHandlers(Capability *cap STG_UNUSED)
873 /* ----------------------------------------------------------------------------
874 * Check for blocked threads that can be woken up.
875 * ------------------------------------------------------------------------- */
878 scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
880 #if !defined(THREADED_RTS)
882 // Check whether any waiting threads need to be woken up. If the
883 // run queue is empty, and there are no other tasks running, we
884 // can wait indefinitely for something to happen.
886 if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
888 awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
894 /* ----------------------------------------------------------------------------
895 * Check for threads woken up by other Capabilities
896 * ------------------------------------------------------------------------- */
899 scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
901 #if defined(THREADED_RTS)
902 // Any threads that were woken up by other Capabilities get
903 // appended to our run queue.
904 if (!emptyWakeupQueue(cap)) {
905 ACQUIRE_LOCK(&cap->lock);
906 if (emptyRunQueue(cap)) {
907 cap->run_queue_hd = cap->wakeup_queue_hd;
908 cap->run_queue_tl = cap->wakeup_queue_tl;
910 cap->run_queue_tl->link = cap->wakeup_queue_hd;
911 cap->run_queue_tl = cap->wakeup_queue_tl;
913 cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
914 RELEASE_LOCK(&cap->lock);
919 /* ----------------------------------------------------------------------------
920 * Check for threads blocked on BLACKHOLEs that can be woken up
921 * ------------------------------------------------------------------------- */
923 scheduleCheckBlackHoles (Capability *cap)
925 if ( blackholes_need_checking ) // check without the lock first
927 ACQUIRE_LOCK(&sched_mutex);
928 if ( blackholes_need_checking ) {
929 checkBlackHoles(cap);
930 blackholes_need_checking = rtsFalse;
932 RELEASE_LOCK(&sched_mutex);
936 /* ----------------------------------------------------------------------------
937 * Detect deadlock conditions and attempt to resolve them.
938 * ------------------------------------------------------------------------- */
941 scheduleDetectDeadlock (Capability *cap, Task *task)
944 #if defined(PARALLEL_HASKELL)
945 // ToDo: add deadlock detection in GUM (similar to THREADED_RTS) -- HWL
950 * Detect deadlock: when we have no threads to run, there are no
951 * threads blocked, waiting for I/O, or sleeping, and all the
952 * other tasks are waiting for work, we must have a deadlock of
955 if ( emptyThreadQueues(cap) )
957 #if defined(THREADED_RTS)
959 * In the threaded RTS, we only check for deadlock if there
960 * has been no activity in a complete timeslice. This means
961 * we won't eagerly start a full GC just because we don't have
962 * any threads to run currently.
964 if (recent_activity != ACTIVITY_INACTIVE) return;
967 debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
969 // Garbage collection can release some new threads due to
970 // either (a) finalizers or (b) threads resurrected because
971 // they are unreachable and will therefore be sent an
972 // exception. Any threads thus released will be immediately
974 cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
976 recent_activity = ACTIVITY_DONE_GC;
978 if ( !emptyRunQueue(cap) ) return;
980 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
981 /* If we have user-installed signal handlers, then wait
982 * for signals to arrive rather then bombing out with a
985 if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) {
986 debugTrace(DEBUG_sched,
987 "still deadlocked, waiting for signals...");
991 if (signals_pending()) {
992 startSignalHandlers(cap);
995 // either we have threads to run, or we were interrupted:
996 ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
1000 #if !defined(THREADED_RTS)
1001 /* Probably a real deadlock. Send the current main thread the
1002 * Deadlock exception.
1005 switch (task->tso->why_blocked) {
1007 case BlockedOnBlackHole:
1008 case BlockedOnException:
1010 throwToSingleThreaded(cap, task->tso,
1011 (StgClosure *)NonTermination_closure);
1014 barf("deadlock: main thread blocked in a strange way");
1022 /* ----------------------------------------------------------------------------
1023 * Process an event (GRAN only)
1024 * ------------------------------------------------------------------------- */
1028 scheduleProcessEvent(rtsEvent *event)
1032 if (RtsFlags.GranFlags.Light)
1033 GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
1035 /* adjust time based on time-stamp */
1036 if (event->time > CurrentTime[CurrentProc] &&
1037 event->evttype != ContinueThread)
1038 CurrentTime[CurrentProc] = event->time;
1040 /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
1041 if (!RtsFlags.GranFlags.Light)
1044 IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
1046 /* main event dispatcher in GranSim */
1047 switch (event->evttype) {
1048 /* Should just be continuing execution */
1049 case ContinueThread:
1050 IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
1051 /* ToDo: check assertion
1052 ASSERT(run_queue_hd != (StgTSO*)NULL &&
1053 run_queue_hd != END_TSO_QUEUE);
1055 /* Ignore ContinueThreads for fetching threads (if synchr comm) */
1056 if (!RtsFlags.GranFlags.DoAsyncFetch &&
1057 procStatus[CurrentProc]==Fetching) {
1058 debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
1059 CurrentTSO->id, CurrentTSO, CurrentProc);
1062 /* Ignore ContinueThreads for completed threads */
1063 if (CurrentTSO->what_next == ThreadComplete) {
1064 debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n",
1065 CurrentTSO->id, CurrentTSO, CurrentProc);
1068 /* Ignore ContinueThreads for threads that are being migrated */
1069 if (PROCS(CurrentTSO)==Nowhere) {
1070 debugBelch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)\n",
1071 CurrentTSO->id, CurrentTSO, CurrentProc);
1074 /* The thread should be at the beginning of the run queue */
1075 if (CurrentTSO!=run_queue_hds[CurrentProc]) {
1076 debugBelch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread\n",
1077 CurrentTSO->id, CurrentTSO, CurrentProc);
1078 break; // run the thread anyway
1081 new_event(proc, proc, CurrentTime[proc],
1083 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
1085 */ /* Catches superfluous CONTINUEs -- should be unnecessary */
1086 break; // now actually run the thread; DaH Qu'vam yImuHbej
1089 do_the_fetchnode(event);
1090 goto next_thread; /* handle next event in event queue */
1093 do_the_globalblock(event);
1094 goto next_thread; /* handle next event in event queue */
1097 do_the_fetchreply(event);
1098 goto next_thread; /* handle next event in event queue */
1100 case UnblockThread: /* Move from the blocked queue to the tail of */
1101 do_the_unblock(event);
1102 goto next_thread; /* handle next event in event queue */
1104 case ResumeThread: /* Move from the blocked queue to the tail of */
1105 /* the runnable queue ( i.e. Qu' SImqa'lu') */
1106 event->tso->gran.blocktime +=
1107 CurrentTime[CurrentProc] - event->tso->gran.blockedat;
1108 do_the_startthread(event);
1109 goto next_thread; /* handle next event in event queue */
1112 do_the_startthread(event);
1113 goto next_thread; /* handle next event in event queue */
1116 do_the_movethread(event);
1117 goto next_thread; /* handle next event in event queue */
1120 do_the_movespark(event);
1121 goto next_thread; /* handle next event in event queue */
1124 do_the_findwork(event);
1125 goto next_thread; /* handle next event in event queue */
1128 barf("Illegal event type %u\n", event->evttype);
1131 /* This point was scheduler_loop in the old RTS */
1133 IF_DEBUG(gran, debugBelch("GRAN: after main switch\n"));
1135 TimeOfLastEvent = CurrentTime[CurrentProc];
1136 TimeOfNextEvent = get_time_of_next_event();
1137 IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
1138 // CurrentTSO = ThreadQueueHd;
1140 IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n",
1143 if (RtsFlags.GranFlags.Light)
1144 GranSimLight_leave_system(event, &ActiveTSO);
1146 EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
1149 debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice));
1151 /* in a GranSim setup the TSO stays on the run queue */
1153 /* Take a thread from the run queue. */
1154 POP_RUN_QUEUE(t); // take_off_run_queue(t);
1157 debugBelch("GRAN: About to run current thread, which is\n");
1160 context_switch = 0; // turned on via GranYield, checking events and time slice
1163 DumpGranEvent(GR_SCHEDULE, t));
1165 procStatus[CurrentProc] = Busy;
1169 /* ----------------------------------------------------------------------------
1170 * Send pending messages (PARALLEL_HASKELL only)
1171 * ------------------------------------------------------------------------- */
1173 #if defined(PARALLEL_HASKELL)
1175 scheduleSendPendingMessages(void)
1181 # if defined(PAR) // global Mem.Mgmt., omit for now
1182 if (PendingFetches != END_BF_QUEUE) {
1187 if (RtsFlags.ParFlags.BufferTime) {
1188 // if we use message buffering, we must send away all message
1189 // packets which have become too old...
1195 /* ----------------------------------------------------------------------------
1196 * Activate spark threads (PARALLEL_HASKELL only)
1197 * ------------------------------------------------------------------------- */
1199 #if defined(PARALLEL_HASKELL)
1201 scheduleActivateSpark(void)
1204 ASSERT(emptyRunQueue());
1205 /* We get here if the run queue is empty and want some work.
1206 We try to turn a spark into a thread, and add it to the run queue,
1207 from where it will be picked up in the next iteration of the scheduler
1211 /* :-[ no local threads => look out for local sparks */
1212 /* the spark pool for the current PE */
1213 pool = &(cap.r.rSparks); // JB: cap = (old) MainCap
1214 if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
1215 pool->hd < pool->tl) {
1217 * ToDo: add GC code check that we really have enough heap afterwards!!
1219 * If we're here (no runnable threads) and we have pending
1220 * sparks, we must have a space problem. Get enough space
1221 * to turn one of those pending sparks into a
1225 spark = findSpark(rtsFalse); /* get a spark */
1226 if (spark != (rtsSpark) NULL) {
1227 tso = createThreadFromSpark(spark); /* turn the spark into a thread */
1228 IF_PAR_DEBUG(fish, // schedule,
1229 debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
1230 tso->id, tso, advisory_thread_count));
1232 if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
1233 IF_PAR_DEBUG(fish, // schedule,
1234 debugBelch("==^^ failed to create thread from spark @ %lx\n",
1236 return rtsFalse; /* failed to generate a thread */
1237 } /* otherwise fall through & pick-up new tso */
1239 IF_PAR_DEBUG(fish, // schedule,
1240 debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n",
1241 spark_queue_len(pool)));
1242 return rtsFalse; /* failed to generate a thread */
1244 return rtsTrue; /* success in generating a thread */
1245 } else { /* no more threads permitted or pool empty */
1246 return rtsFalse; /* failed to generateThread */
1249 tso = NULL; // avoid compiler warning only
1250 return rtsFalse; /* dummy in non-PAR setup */
1253 #endif // PARALLEL_HASKELL
1255 /* ----------------------------------------------------------------------------
1256 * Get work from a remote node (PARALLEL_HASKELL only)
1257 * ------------------------------------------------------------------------- */
1259 #if defined(PARALLEL_HASKELL)
1261 scheduleGetRemoteWork(rtsBool *receivedFinish)
1263 ASSERT(emptyRunQueue());
1265 if (RtsFlags.ParFlags.BufferTime) {
1266 IF_PAR_DEBUG(verbose,
1267 debugBelch("...send all pending data,"));
1270 for (i=1; i<=nPEs; i++)
1271 sendImmediately(i); // send all messages away immediately
1275 //++EDEN++ idle() , i.e. send all buffers, wait for work
1276 // suppress fishing in EDEN... just look for incoming messages
1277 // (blocking receive)
1278 IF_PAR_DEBUG(verbose,
1279 debugBelch("...wait for incoming messages...\n"));
1280 *receivedFinish = processMessages(); // blocking receive...
1282 // and reenter scheduling loop after having received something
1283 // (return rtsFalse below)
1285 # else /* activate SPARKS machinery */
1286 /* We get here, if we have no work, tried to activate a local spark, but still
1287 have no work. We try to get a remote spark, by sending a FISH message.
1288 Thread migration should be added here, and triggered when a sequence of
1289 fishes returns without work. */
1290 delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll);
1292 /* =8-[ no local sparks => look for work on other PEs */
1294 * We really have absolutely no work. Send out a fish
1295 * (there may be some out there already), and wait for
1296 * something to arrive. We clearly can't run any threads
1297 * until a SCHEDULE or RESUME arrives, and so that's what
1298 * we're hoping to see. (Of course, we still have to
1299 * respond to other types of messages.)
1301 rtsTime now = msTime() /*CURRENT_TIME*/;
1302 IF_PAR_DEBUG(verbose,
1303 debugBelch("-- now=%ld\n", now));
1304 IF_PAR_DEBUG(fish, // verbose,
1305 if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
1306 (last_fish_arrived_at!=0 &&
1307 last_fish_arrived_at+delay > now)) {
1308 debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n",
1309 now, last_fish_arrived_at+delay,
1310 last_fish_arrived_at,
1314 if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
1315 advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when?
1316 if (last_fish_arrived_at==0 ||
1317 (last_fish_arrived_at+delay <= now)) { // send FISH now!
1318 /* outstandingFishes is set in sendFish, processFish;
1319 avoid flooding system with fishes via delay */
1320 next_fish_to_send_at = 0;
1322 /* ToDo: this should be done in the main scheduling loop to avoid the
1323 busy wait here; not so bad if fish delay is very small */
1324 int iq = 0; // DEBUGGING -- HWL
1325 next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send
1326 /* send a fish when ready, but process messages that arrive in the meantime */
1328 if (PacketsWaiting()) {
1330 *receivedFinish = processMessages();
1333 } while (!*receivedFinish || now<next_fish_to_send_at);
1334 // JB: This means the fish could become obsolete, if we receive
1335 // work. Better check for work again?
1336 // last line: while (!receivedFinish || !haveWork || now<...)
1337 // next line: if (receivedFinish || haveWork )
1339 if (*receivedFinish) // no need to send a FISH if we are finishing anyway
1340 return rtsFalse; // NB: this will leave scheduler loop
1341 // immediately after return!
1343 IF_PAR_DEBUG(fish, // verbose,
1344 debugBelch("--$$ <%llu> sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count));
1348 // JB: IMHO, this should all be hidden inside sendFish(...)
1350 sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY,
1353 // Global statistics: count no. of fishes
1354 if (RtsFlags.ParFlags.ParStats.Global &&
1355 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
1356 globalParStats.tot_fish_mess++;
1360 /* delayed fishes must have been sent by now! */
1361 next_fish_to_send_at = 0;
1364 *receivedFinish = processMessages();
1365 # endif /* SPARKS */
1368 /* NB: this function always returns rtsFalse, meaning the scheduler
1369 loop continues with the next iteration;
1371 return code means success in finding work; we enter this function
1372 if there is no local work, thus have to send a fish which takes
1373 time until it arrives with work; in the meantime we should process
1374 messages in the main loop;
1377 #endif // PARALLEL_HASKELL
1379 /* ----------------------------------------------------------------------------
1380 * PAR/GRAN: Report stats & debugging info(?)
1381 * ------------------------------------------------------------------------- */
1383 #if defined(PAR) || defined(GRAN)
1385 scheduleGranParReport(void)
1387 ASSERT(run_queue_hd != END_TSO_QUEUE);
1389 /* Take a thread from the run queue, if we have work */
1390 POP_RUN_QUEUE(t); // take_off_run_queue(END_TSO_QUEUE);
1392 /* If this TSO has got its outport closed in the meantime,
1393 * it mustn't be run. Instead, we have to clean it up as if it was finished.
1394 * It has to be marked as TH_DEAD for this purpose.
1395 * If it is TH_TERM instead, it is supposed to have finished in the normal way.
1397 JB: TODO: investigate wether state change field could be nuked
1398 entirely and replaced by the normal tso state (whatnext
1399 field). All we want to do is to kill tsos from outside.
1402 /* ToDo: write something to the log-file
1403 if (RTSflags.ParFlags.granSimStats && !sameThread)
1404 DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
1408 /* the spark pool for the current PE */
1409 pool = &(cap.r.rSparks); // cap = (old) MainCap
1412 debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
1413 run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
1416 debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
1417 run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
1419 if (RtsFlags.ParFlags.ParStats.Full &&
1420 (t->par.sparkname != (StgInt)0) && // only log spark generated threads
1421 (emitSchedule || // forced emit
1422 (t && LastTSO && t->id != LastTSO->id))) {
1424 we are running a different TSO, so write a schedule event to log file
1425 NB: If we use fair scheduling we also have to write a deschedule
1426 event for LastTSO; with unfair scheduling we know that the
1427 previous tso has blocked whenever we switch to another tso, so
1428 we don't need it in GUM for now
1430 IF_PAR_DEBUG(fish, // schedule,
1431 debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname));
1433 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
1434 GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
1435 emitSchedule = rtsFalse;
1440 /* ----------------------------------------------------------------------------
1441 * After running a thread...
1442 * ------------------------------------------------------------------------- */
1445 schedulePostRunThread(void)
1448 /* HACK 675: if the last thread didn't yield, make sure to print a
1449 SCHEDULE event to the log file when StgRunning the next thread, even
1450 if it is the same one as before */
1452 TimeOfLastYield = CURRENT_TIME;
1455 /* some statistics gathering in the parallel case */
1457 #if defined(GRAN) || defined(PAR) || defined(EDEN)
1461 IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
1462 globalGranStats.tot_heapover++;
1464 globalParStats.tot_heapover++;
1471 DumpGranEvent(GR_DESCHEDULE, t));
1472 globalGranStats.tot_stackover++;
1475 // DumpGranEvent(GR_DESCHEDULE, t);
1476 globalParStats.tot_stackover++;
1480 case ThreadYielding:
1483 DumpGranEvent(GR_DESCHEDULE, t));
1484 globalGranStats.tot_yields++;
1487 // DumpGranEvent(GR_DESCHEDULE, t);
1488 globalParStats.tot_yields++;
1494 debugTrace(DEBUG_sched,
1495 "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
1496 t->id, t, whatNext_strs[t->what_next], t->block_info.closure,
1497 (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1498 if (t->block_info.closure!=(StgClosure*)NULL)
1499 print_bq(t->block_info.closure);
1502 // ??? needed; should emit block before
1504 DumpGranEvent(GR_DESCHEDULE, t));
1505 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1508 ASSERT(procStatus[CurrentProc]==Busy ||
1509 ((procStatus[CurrentProc]==Fetching) &&
1510 (t->block_info.closure!=(StgClosure*)NULL)));
1511 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1512 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1513 procStatus[CurrentProc]==Fetching))
1514 procStatus[CurrentProc] = Idle;
1517 //++PAR++ blockThread() writes the event (change?)
1521 case ThreadFinished:
1525 barf("parGlobalStats: unknown return code");
1531 /* -----------------------------------------------------------------------------
1532 * Handle a thread that returned to the scheduler with ThreadHeepOverflow
1533 * -------------------------------------------------------------------------- */
1536 scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
1538 // did the task ask for a large block?
1539 if (cap->r.rHpAlloc > BLOCK_SIZE) {
1540 // if so, get one and push it on the front of the nursery.
1544 blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
1546 debugTrace(DEBUG_sched,
1547 "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
1548 (long)t->id, whatNext_strs[t->what_next], blocks);
1550 // don't do this if the nursery is (nearly) full, we'll GC first.
1551 if (cap->r.rCurrentNursery->link != NULL ||
1552 cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
1553 // if the nursery has only one block.
1556 bd = allocGroup( blocks );
1558 cap->r.rNursery->n_blocks += blocks;
1560 // link the new group into the list
1561 bd->link = cap->r.rCurrentNursery;
1562 bd->u.back = cap->r.rCurrentNursery->u.back;
1563 if (cap->r.rCurrentNursery->u.back != NULL) {
1564 cap->r.rCurrentNursery->u.back->link = bd;
1566 #if !defined(THREADED_RTS)
1567 ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
1568 g0s0 == cap->r.rNursery);
1570 cap->r.rNursery->blocks = bd;
1572 cap->r.rCurrentNursery->u.back = bd;
1574 // initialise it as a nursery block. We initialise the
1575 // step, gen_no, and flags field of *every* sub-block in
1576 // this large block, because this is easier than making
1577 // sure that we always find the block head of a large
1578 // block whenever we call Bdescr() (eg. evacuate() and
1579 // isAlive() in the GC would both have to do this, at
1583 for (x = bd; x < bd + blocks; x++) {
1584 x->step = cap->r.rNursery;
1590 // This assert can be a killer if the app is doing lots
1591 // of large block allocations.
1592 IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
1594 // now update the nursery to point to the new block
1595 cap->r.rCurrentNursery = bd;
1597 // we might be unlucky and have another thread get on the
1598 // run queue before us and steal the large block, but in that
1599 // case the thread will just end up requesting another large
1601 pushOnRunQueue(cap,t);
1602 return rtsFalse; /* not actually GC'ing */
1606 debugTrace(DEBUG_sched,
1607 "--<< thread %ld (%s) stopped: HeapOverflow\n",
1608 (long)t->id, whatNext_strs[t->what_next]);
1611 ASSERT(!is_on_queue(t,CurrentProc));
1612 #elif defined(PARALLEL_HASKELL)
1613 /* Currently we emit a DESCHEDULE event before GC in GUM.
1614 ToDo: either add separate event to distinguish SYSTEM time from rest
1615 or just nuke this DESCHEDULE (and the following SCHEDULE) */
1616 if (0 && RtsFlags.ParFlags.ParStats.Full) {
1617 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
1618 GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
1619 emitSchedule = rtsTrue;
1623 pushOnRunQueue(cap,t);
1625 /* actual GC is done at the end of the while loop in schedule() */
1628 /* -----------------------------------------------------------------------------
1629 * Handle a thread that returned to the scheduler with ThreadStackOverflow
1630 * -------------------------------------------------------------------------- */
1633 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
1635 debugTrace (DEBUG_sched,
1636 "--<< thread %ld (%s) stopped, StackOverflow",
1637 (long)t->id, whatNext_strs[t->what_next]);
1639 /* just adjust the stack for this thread, then pop it back
1643 /* enlarge the stack */
1644 StgTSO *new_t = threadStackOverflow(cap, t);
1646 /* The TSO attached to this Task may have moved, so update the
1649 if (task->tso == t) {
1652 pushOnRunQueue(cap,new_t);
1656 /* -----------------------------------------------------------------------------
1657 * Handle a thread that returned to the scheduler with ThreadYielding
1658 * -------------------------------------------------------------------------- */
1661 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
1663 // Reset the context switch flag. We don't do this just before
1664 // running the thread, because that would mean we would lose ticks
1665 // during GC, which can lead to unfair scheduling (a thread hogs
1666 // the CPU because the tick always arrives during GC). This way
1667 // penalises threads that do a lot of allocation, but that seems
1668 // better than the alternative.
1671 /* put the thread back on the run queue. Then, if we're ready to
1672 * GC, check whether this is the last task to stop. If so, wake
1673 * up the GC thread. getThread will block during a GC until the
1677 if (t->what_next != prev_what_next) {
1678 debugTrace(DEBUG_sched,
1679 "--<< thread %ld (%s) stopped to switch evaluators",
1680 (long)t->id, whatNext_strs[t->what_next]);
1682 debugTrace(DEBUG_sched,
1683 "--<< thread %ld (%s) stopped, yielding",
1684 (long)t->id, whatNext_strs[t->what_next]);
1689 //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
1691 ASSERT(t->link == END_TSO_QUEUE);
1693 // Shortcut if we're just switching evaluators: don't bother
1694 // doing stack squeezing (which can be expensive), just run the
1696 if (t->what_next != prev_what_next) {
1701 ASSERT(!is_on_queue(t,CurrentProc));
1704 //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
1705 checkThreadQsSanity(rtsTrue));
1709 addToRunQueue(cap,t);
1712 /* add a ContinueThread event to actually process the thread */
1713 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1715 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1717 debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
1724 /* -----------------------------------------------------------------------------
1725 * Handle a thread that returned to the scheduler with ThreadBlocked
1726 * -------------------------------------------------------------------------- */
1729 scheduleHandleThreadBlocked( StgTSO *t
1730 #if !defined(GRAN) && !defined(DEBUG)
1737 debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n",
1738 t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1739 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1741 // ??? needed; should emit block before
1743 DumpGranEvent(GR_DESCHEDULE, t));
1744 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1747 ASSERT(procStatus[CurrentProc]==Busy ||
1748 ((procStatus[CurrentProc]==Fetching) &&
1749 (t->block_info.closure!=(StgClosure*)NULL)));
1750 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1751 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1752 procStatus[CurrentProc]==Fetching))
1753 procStatus[CurrentProc] = Idle;
1757 debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n",
1758 t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
1761 if (t->block_info.closure!=(StgClosure*)NULL)
1762 print_bq(t->block_info.closure));
1764 /* Send a fetch (if BlockedOnGA) and dump event to log file */
1767 /* whatever we schedule next, we must log that schedule */
1768 emitSchedule = rtsTrue;
1772 // We don't need to do anything. The thread is blocked, and it
1773 // has tidied up its stack and placed itself on whatever queue
1774 // it needs to be on.
1776 // ASSERT(t->why_blocked != NotBlocked);
1777 // Not true: for example,
1778 // - in THREADED_RTS, the thread may already have been woken
1779 // up by another Capability. This actually happens: try
1780 // conc023 +RTS -N2.
1781 // - the thread may have woken itself up already, because
1782 // threadPaused() might have raised a blocked throwTo
1783 // exception, see maybePerformBlockedException().
1786 if (traceClass(DEBUG_sched)) {
1787 debugTraceBegin("--<< thread %lu (%s) stopped: ",
1788 (unsigned long)t->id, whatNext_strs[t->what_next]);
1789 printThreadBlockage(t);
1794 /* Only for dumping event to log file
1795 ToDo: do I need this in GranSim, too?
1801 /* -----------------------------------------------------------------------------
1802 * Handle a thread that returned to the scheduler with ThreadFinished
1803 * -------------------------------------------------------------------------- */
1806 scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
1808 /* Need to check whether this was a main thread, and if so,
1809 * return with the return value.
1811 * We also end up here if the thread kills itself with an
1812 * uncaught exception, see Exception.cmm.
1814 debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished",
1815 (unsigned long)t->id, whatNext_strs[t->what_next]);
1818 endThread(t, CurrentProc); // clean-up the thread
1819 #elif defined(PARALLEL_HASKELL)
1820 /* For now all are advisory -- HWL */
1821 //if(t->priority==AdvisoryPriority) ??
1822 advisory_thread_count--; // JB: Caution with this counter, buggy!
1825 if(t->dist.priority==RevalPriority)
1829 # if defined(EDENOLD)
1830 // the thread could still have an outport... (BUG)
1831 if (t->eden.outport != -1) {
1832 // delete the outport for the tso which has finished...
1833 IF_PAR_DEBUG(eden_ports,
1834 debugBelch("WARNING: Scheduler removes outport %d for TSO %d.\n",
1835 t->eden.outport, t->id));
1838 // thread still in the process (HEAVY BUG! since outport has just been closed...)
1839 if (t->eden.epid != -1) {
1840 IF_PAR_DEBUG(eden_ports,
1841 debugBelch("WARNING: Scheduler removes TSO %d from process %d .\n",
1842 t->id, t->eden.epid));
1843 removeTSOfromProcess(t);
1848 if (RtsFlags.ParFlags.ParStats.Full &&
1849 !RtsFlags.ParFlags.ParStats.Suppressed)
1850 DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
1852 // t->par only contains statistics: left out for now...
1854 debugBelch("**** end thread: ended sparked thread %d (%lx); sparkname: %lx\n",
1855 t->id,t,t->par.sparkname));
1857 #endif // PARALLEL_HASKELL
1860 // Check whether the thread that just completed was a bound
1861 // thread, and if so return with the result.
1863 // There is an assumption here that all thread completion goes
1864 // through this point; we need to make sure that if a thread
1865 // ends up in the ThreadKilled state, that it stays on the run
1866 // queue so it can be dealt with here.
1871 if (t->bound != task) {
1872 #if !defined(THREADED_RTS)
1873 // Must be a bound thread that is not the topmost one. Leave
1874 // it on the run queue until the stack has unwound to the
1875 // point where we can deal with this. Leaving it on the run
1876 // queue also ensures that the garbage collector knows about
1877 // this thread and its return value (it gets dropped from the
1878 // all_threads list so there's no other way to find it).
1879 appendToRunQueue(cap,t);
1882 // this cannot happen in the threaded RTS, because a
1883 // bound thread can only be run by the appropriate Task.
1884 barf("finished bound thread that isn't mine");
1888 ASSERT(task->tso == t);
1890 if (t->what_next == ThreadComplete) {
1892 // NOTE: return val is tso->sp[1] (see StgStartup.hc)
1893 *(task->ret) = (StgClosure *)task->tso->sp[1];
1895 task->stat = Success;
1898 *(task->ret) = NULL;
1900 if (sched_state >= SCHED_INTERRUPTING) {
1901 task->stat = Interrupted;
1903 task->stat = Killed;
1907 removeThreadLabel((StgWord)task->tso->id);
1909 return rtsTrue; // tells schedule() to return
1915 /* -----------------------------------------------------------------------------
1916 * Perform a heap census
1917 * -------------------------------------------------------------------------- */
1920 scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
1922 // When we have +RTS -i0 and we're heap profiling, do a census at
1923 // every GC. This lets us get repeatable runs for debugging.
1924 if (performHeapProfile ||
1925 (RtsFlags.ProfFlags.profileInterval==0 &&
1926 RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
1933 /* -----------------------------------------------------------------------------
1934 * Perform a garbage collection if necessary
1935 * -------------------------------------------------------------------------- */
1938 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
1941 rtsBool heap_census;
1943 static volatile StgWord waiting_for_gc;
1944 rtsBool was_waiting;
1949 // In order to GC, there must be no threads running Haskell code.
1950 // Therefore, the GC thread needs to hold *all* the capabilities,
1951 // and release them after the GC has completed.
1953 // This seems to be the simplest way: previous attempts involved
1954 // making all the threads with capabilities give up their
1955 // capabilities and sleep except for the *last* one, which
1956 // actually did the GC. But it's quite hard to arrange for all
1957 // the other tasks to sleep and stay asleep.
1960 was_waiting = cas(&waiting_for_gc, 0, 1);
1963 debugTrace(DEBUG_sched, "someone else is trying to GC...");
1964 if (cap) yieldCapability(&cap,task);
1965 } while (waiting_for_gc);
1966 return cap; // NOTE: task->cap might have changed here
1969 for (i=0; i < n_capabilities; i++) {
1970 debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
1971 if (cap != &capabilities[i]) {
1972 Capability *pcap = &capabilities[i];
1973 // we better hope this task doesn't get migrated to
1974 // another Capability while we're waiting for this one.
1975 // It won't, because load balancing happens while we have
1976 // all the Capabilities, but even so it's a slightly
1977 // unsavoury invariant.
1980 waitForReturnCapability(&pcap, task);
1981 if (pcap != &capabilities[i]) {
1982 barf("scheduleDoGC: got the wrong capability");
1987 waiting_for_gc = rtsFalse;
1990 /* Kick any transactions which are invalid back to their
1991 * atomically frames. When next scheduled they will try to
1992 * commit, this commit will fail and they will retry.
1997 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
1998 if (t->what_next == ThreadRelocated) {
2001 next = t->global_link;
2003 // This is a good place to check for blocked
2004 // exceptions. It might be the case that a thread is
2005 // blocked on delivering an exception to a thread that
2006 // is also blocked - we try to ensure that this
2007 // doesn't happen in throwTo(), but it's too hard (or
2008 // impossible) to close all the race holes, so we
2009 // accept that some might get through and deal with
2010 // them here. A GC will always happen at some point,
2011 // even if the system is otherwise deadlocked.
2012 maybePerformBlockedException (&capabilities[0], t);
2014 if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
2015 if (!stmValidateNestOfTransactions (t -> trec)) {
2016 debugTrace(DEBUG_sched | DEBUG_stm,
2017 "trec %p found wasting its time", t);
2019 // strip the stack back to the
2020 // ATOMICALLY_FRAME, aborting the (nested)
2021 // transaction, and saving the stack of any
2022 // partially-evaluated thunks on the heap.
2023 throwToSingleThreaded_(&capabilities[0], t,
2024 NULL, rtsTrue, NULL);
2027 ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
2035 // so this happens periodically:
2036 if (cap) scheduleCheckBlackHoles(cap);
2038 IF_DEBUG(scheduler, printAllThreads());
2041 * We now have all the capabilities; if we're in an interrupting
2042 * state, then we should take the opportunity to delete all the
2043 * threads in the system.
2045 if (sched_state >= SCHED_INTERRUPTING) {
2046 deleteAllThreads(&capabilities[0]);
2047 sched_state = SCHED_SHUTTING_DOWN;
2050 heap_census = scheduleNeedHeapProfile(rtsTrue);
2052 /* everybody back, start the GC.
2053 * Could do it in this thread, or signal a condition var
2054 * to do it in another thread. Either way, we need to
2055 * broadcast on gc_pending_cond afterward.
2057 #if defined(THREADED_RTS)
2058 debugTrace(DEBUG_sched, "doing GC");
2060 GarbageCollect(force_major || heap_census);
2063 debugTrace(DEBUG_sched, "performing heap census");
2065 performHeapProfile = rtsFalse;
2068 #if defined(THREADED_RTS)
2069 // release our stash of capabilities.
2070 for (i = 0; i < n_capabilities; i++) {
2071 if (cap != &capabilities[i]) {
2072 task->cap = &capabilities[i];
2073 releaseCapability(&capabilities[i]);
2084 /* add a ContinueThread event to continue execution of current thread */
2085 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
2087 t, (StgClosure*)NULL, (rtsSpark*)NULL);
2089 debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
2097 /* ---------------------------------------------------------------------------
2098 * Singleton fork(). Do not copy any running threads.
2099 * ------------------------------------------------------------------------- */
2102 forkProcess(HsStablePtr *entry
2103 #ifndef FORKPROCESS_PRIMOP_SUPPORTED
2108 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
2114 #if defined(THREADED_RTS)
2115 if (RtsFlags.ParFlags.nNodes > 1) {
2116 errorBelch("forking not supported with +RTS -N<n> greater than 1");
2117 stg_exit(EXIT_FAILURE);
2121 debugTrace(DEBUG_sched, "forking!");
2123 // ToDo: for SMP, we should probably acquire *all* the capabilities
2128 if (pid) { // parent
2130 // just return the pid
2136 // Now, all OS threads except the thread that forked are
2137 // stopped. We need to stop all Haskell threads, including
2138 // those involved in foreign calls. Also we need to delete
2139 // all Tasks, because they correspond to OS threads that are
2142 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
2143 if (t->what_next == ThreadRelocated) {
2146 next = t->global_link;
2147 // don't allow threads to catch the ThreadKilled
2148 // exception, but we do want to raiseAsync() because these
2149 // threads may be evaluating thunks that we need later.
2150 deleteThread_(cap,t);
2154 // Empty the run queue. It seems tempting to let all the
2155 // killed threads stay on the run queue as zombies to be
2156 // cleaned up later, but some of them correspond to bound
2157 // threads for which the corresponding Task does not exist.
2158 cap->run_queue_hd = END_TSO_QUEUE;
2159 cap->run_queue_tl = END_TSO_QUEUE;
2161 // Any suspended C-calling Tasks are no more, their OS threads
2163 cap->suspended_ccalling_tasks = NULL;
2165 // Empty the all_threads list. Otherwise, the garbage
2166 // collector may attempt to resurrect some of these threads.
2167 all_threads = END_TSO_QUEUE;
2169 // Wipe the task list, except the current Task.
2170 ACQUIRE_LOCK(&sched_mutex);
2171 for (task = all_tasks; task != NULL; task=task->all_link) {
2172 if (task != cap->running_task) {
2176 RELEASE_LOCK(&sched_mutex);
2178 #if defined(THREADED_RTS)
2179 // Wipe our spare workers list, they no longer exist. New
2180 // workers will be created if necessary.
2181 cap->spare_workers = NULL;
2182 cap->returning_tasks_hd = NULL;
2183 cap->returning_tasks_tl = NULL;
2186 // On Unix, all timers are reset in the child, so we need to start
2190 cap = rts_evalStableIO(cap, entry, NULL); // run the action
2191 rts_checkSchedStatus("forkProcess",cap);
2194 hs_exit(); // clean up and exit
2195 stg_exit(EXIT_SUCCESS);
2197 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
2198 barf("forkProcess#: primop not supported on this platform, sorry!\n");
2203 /* ---------------------------------------------------------------------------
2204 * Delete all the threads in the system
2205 * ------------------------------------------------------------------------- */
2208 deleteAllThreads ( Capability *cap )
2210 // NOTE: only safe to call if we own all capabilities.
2213 debugTrace(DEBUG_sched,"deleting all threads");
2214 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
2215 if (t->what_next == ThreadRelocated) {
2218 next = t->global_link;
2219 deleteThread(cap,t);
2223 // The run queue now contains a bunch of ThreadKilled threads. We
2224 // must not throw these away: the main thread(s) will be in there
2225 // somewhere, and the main scheduler loop has to deal with it.
2226 // Also, the run queue is the only thing keeping these threads from
2227 // being GC'd, and we don't want the "main thread has been GC'd" panic.
2229 #if !defined(THREADED_RTS)
2230 ASSERT(blocked_queue_hd == END_TSO_QUEUE);
2231 ASSERT(sleeping_queue == END_TSO_QUEUE);
2235 /* -----------------------------------------------------------------------------
2236 Managing the suspended_ccalling_tasks list.
2237 Locks required: sched_mutex
2238 -------------------------------------------------------------------------- */
2241 suspendTask (Capability *cap, Task *task)
2243 ASSERT(task->next == NULL && task->prev == NULL);
2244 task->next = cap->suspended_ccalling_tasks;
2246 if (cap->suspended_ccalling_tasks) {
2247 cap->suspended_ccalling_tasks->prev = task;
2249 cap->suspended_ccalling_tasks = task;
2253 recoverSuspendedTask (Capability *cap, Task *task)
2256 task->prev->next = task->next;
2258 ASSERT(cap->suspended_ccalling_tasks == task);
2259 cap->suspended_ccalling_tasks = task->next;
2262 task->next->prev = task->prev;
2264 task->next = task->prev = NULL;
2267 /* ---------------------------------------------------------------------------
2268 * Suspending & resuming Haskell threads.
2270 * When making a "safe" call to C (aka _ccall_GC), the task gives back
2271 * its capability before calling the C function. This allows another
2272 * task to pick up the capability and carry on running Haskell
2273 * threads. It also means that if the C call blocks, it won't lock
2276 * The Haskell thread making the C call is put to sleep for the
2277 * duration of the call, on the susepended_ccalling_threads queue. We
2278 * give out a token to the task, which it can use to resume the thread
2279 * on return from the C function.
2280 * ------------------------------------------------------------------------- */
2283 suspendThread (StgRegTable *reg)
2290 StgWord32 saved_winerror;
2293 saved_errno = errno;
2295 saved_winerror = GetLastError();
2298 /* assume that *reg is a pointer to the StgRegTable part of a Capability.
2300 cap = regTableToCapability(reg);
2302 task = cap->running_task;
2303 tso = cap->r.rCurrentTSO;
2305 debugTrace(DEBUG_sched,
2306 "thread %lu did a safe foreign call",
2307 (unsigned long)cap->r.rCurrentTSO->id);
2309 // XXX this might not be necessary --SDM
2310 tso->what_next = ThreadRunGHC;
2312 threadPaused(cap,tso);
2314 if ((tso->flags & TSO_BLOCKEX) == 0) {
2315 tso->why_blocked = BlockedOnCCall;
2316 tso->flags |= TSO_BLOCKEX;
2317 tso->flags &= ~TSO_INTERRUPTIBLE;
2319 tso->why_blocked = BlockedOnCCall_NoUnblockExc;
2322 // Hand back capability
2323 task->suspended_tso = tso;
2325 ACQUIRE_LOCK(&cap->lock);
2327 suspendTask(cap,task);
2328 cap->in_haskell = rtsFalse;
2329 releaseCapability_(cap);
2331 RELEASE_LOCK(&cap->lock);
2333 #if defined(THREADED_RTS)
2334 /* Preparing to leave the RTS, so ensure there's a native thread/task
2335 waiting to take over.
2337 debugTrace(DEBUG_sched, "thread %lu: leaving RTS", (unsigned long)tso->id);
2340 errno = saved_errno;
2342 SetLastError(saved_winerror);
2348 resumeThread (void *task_)
2355 StgWord32 saved_winerror;
2358 saved_errno = errno;
2360 saved_winerror = GetLastError();
2364 // Wait for permission to re-enter the RTS with the result.
2365 waitForReturnCapability(&cap,task);
2366 // we might be on a different capability now... but if so, our
2367 // entry on the suspended_ccalling_tasks list will also have been
2370 // Remove the thread from the suspended list
2371 recoverSuspendedTask(cap,task);
2373 tso = task->suspended_tso;
2374 task->suspended_tso = NULL;
2375 tso->link = END_TSO_QUEUE;
2376 debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
2378 if (tso->why_blocked == BlockedOnCCall) {
2379 awakenBlockedExceptionQueue(cap,tso);
2380 tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
2383 /* Reset blocking status */
2384 tso->why_blocked = NotBlocked;
2386 cap->r.rCurrentTSO = tso;
2387 cap->in_haskell = rtsTrue;
2388 errno = saved_errno;
2390 SetLastError(saved_winerror);
2393 /* We might have GC'd, mark the TSO dirty again */
2396 IF_DEBUG(sanity, checkTSO(tso));
2401 /* ---------------------------------------------------------------------------
2404 * scheduleThread puts a thread on the end of the runnable queue.
2405 * This will usually be done immediately after a thread is created.
2406 * The caller of scheduleThread must create the thread using e.g.
2407 * createThread and push an appropriate closure
2408 * on this thread's stack before the scheduler is invoked.
2409 * ------------------------------------------------------------------------ */
2412 scheduleThread(Capability *cap, StgTSO *tso)
2414 // The thread goes at the *end* of the run-queue, to avoid possible
2415 // starvation of any threads already on the queue.
2416 appendToRunQueue(cap,tso);
2420 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
2422 #if defined(THREADED_RTS)
2423 tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
2424 // move this thread from now on.
2425 cpu %= RtsFlags.ParFlags.nNodes;
2426 if (cpu == cap->no) {
2427 appendToRunQueue(cap,tso);
2429 migrateThreadToCapability_lock(&capabilities[cpu],tso);
2432 appendToRunQueue(cap,tso);
2437 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
2441 // We already created/initialised the Task
2442 task = cap->running_task;
2444 // This TSO is now a bound thread; make the Task and TSO
2445 // point to each other.
2451 task->stat = NoStatus;
2453 appendToRunQueue(cap,tso);
2455 debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
2458 /* GranSim specific init */
2459 CurrentTSO = m->tso; // the TSO to run
2460 procStatus[MainProc] = Busy; // status of main PE
2461 CurrentProc = MainProc; // PE to run it on
2464 cap = schedule(cap,task);
2466 ASSERT(task->stat != NoStatus);
2467 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
2469 debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
2473 /* ----------------------------------------------------------------------------
2475 * ------------------------------------------------------------------------- */
2477 #if defined(THREADED_RTS)
2479 workerStart(Task *task)
2483 // See startWorkerTask().
2484 ACQUIRE_LOCK(&task->lock);
2486 RELEASE_LOCK(&task->lock);
2488 // set the thread-local pointer to the Task:
2491 // schedule() runs without a lock.
2492 cap = schedule(cap,task);
2494 // On exit from schedule(), we have a Capability.
2495 releaseCapability(cap);
2496 workerTaskStop(task);
2500 /* ---------------------------------------------------------------------------
2503 * Initialise the scheduler. This resets all the queues - if the
2504 * queues contained any threads, they'll be garbage collected at the
2507 * ------------------------------------------------------------------------ */
2514 for (i=0; i<=MAX_PROC; i++) {
2515 run_queue_hds[i] = END_TSO_QUEUE;
2516 run_queue_tls[i] = END_TSO_QUEUE;
2517 blocked_queue_hds[i] = END_TSO_QUEUE;
2518 blocked_queue_tls[i] = END_TSO_QUEUE;
2519 ccalling_threadss[i] = END_TSO_QUEUE;
2520 blackhole_queue[i] = END_TSO_QUEUE;
2521 sleeping_queue = END_TSO_QUEUE;
2523 #elif !defined(THREADED_RTS)
2524 blocked_queue_hd = END_TSO_QUEUE;
2525 blocked_queue_tl = END_TSO_QUEUE;
2526 sleeping_queue = END_TSO_QUEUE;
2529 blackhole_queue = END_TSO_QUEUE;
2530 all_threads = END_TSO_QUEUE;
2533 sched_state = SCHED_RUNNING;
2535 #if defined(THREADED_RTS)
2536 /* Initialise the mutex and condition variables used by
2538 initMutex(&sched_mutex);
2541 ACQUIRE_LOCK(&sched_mutex);
2543 /* A capability holds the state a native thread needs in
2544 * order to execute STG code. At least one capability is
2545 * floating around (only THREADED_RTS builds have more than one).
2551 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
2555 #if defined(THREADED_RTS)
2557 * Eagerly start one worker to run each Capability, except for
2558 * Capability 0. The idea is that we're probably going to start a
2559 * bound thread on Capability 0 pretty soon, so we don't want a
2560 * worker task hogging it.
2565 for (i = 1; i < n_capabilities; i++) {
2566 cap = &capabilities[i];
2567 ACQUIRE_LOCK(&cap->lock);
2568 startWorkerTask(cap, workerStart);
2569 RELEASE_LOCK(&cap->lock);
2574 trace(TRACE_sched, "start: %d capabilities", n_capabilities);
2576 RELEASE_LOCK(&sched_mutex);
2580 exitScheduler( rtsBool wait_foreign )
2581 /* see Capability.c, shutdownCapability() */
2585 #if defined(THREADED_RTS)
2586 ACQUIRE_LOCK(&sched_mutex);
2587 task = newBoundTask();
2588 RELEASE_LOCK(&sched_mutex);
2591 // If we haven't killed all the threads yet, do it now.
2592 if (sched_state < SCHED_SHUTTING_DOWN) {
2593 sched_state = SCHED_INTERRUPTING;
2594 scheduleDoGC(NULL,task,rtsFalse);
2596 sched_state = SCHED_SHUTTING_DOWN;
2598 #if defined(THREADED_RTS)
2602 for (i = 0; i < n_capabilities; i++) {
2603 shutdownCapability(&capabilities[i], task, wait_foreign);
2605 boundTaskExiting(task);
2609 freeCapability(&MainCapability);
2614 freeScheduler( void )
2617 if (n_capabilities != 1) {
2618 stgFree(capabilities);
2620 #if defined(THREADED_RTS)
2621 closeMutex(&sched_mutex);
2625 /* ---------------------------------------------------------------------------
2626 Where are the roots that we know about?
2628 - all the threads on the runnable queue
2629 - all the threads on the blocked queue
2630 - all the threads on the sleeping queue
2631 - all the thread currently executing a _ccall_GC
2632 - all the "main threads"
2634 ------------------------------------------------------------------------ */
2636 /* This has to be protected either by the scheduler monitor, or by the
2637 garbage collection monitor (probably the latter).
2642 GetRoots( evac_fn evac )
2649 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
2650 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
2651 evac((StgClosure **)&run_queue_hds[i]);
2652 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
2653 evac((StgClosure **)&run_queue_tls[i]);
2655 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
2656 evac((StgClosure **)&blocked_queue_hds[i]);
2657 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
2658 evac((StgClosure **)&blocked_queue_tls[i]);
2659 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
2660 evac((StgClosure **)&ccalling_threads[i]);
2667 for (i = 0; i < n_capabilities; i++) {
2668 cap = &capabilities[i];
2669 evac((StgClosure **)(void *)&cap->run_queue_hd);
2670 evac((StgClosure **)(void *)&cap->run_queue_tl);
2671 #if defined(THREADED_RTS)
2672 evac((StgClosure **)(void *)&cap->wakeup_queue_hd);
2673 evac((StgClosure **)(void *)&cap->wakeup_queue_tl);
2675 for (task = cap->suspended_ccalling_tasks; task != NULL;
2677 debugTrace(DEBUG_sched,
2678 "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
2679 evac((StgClosure **)(void *)&task->suspended_tso);
2685 #if !defined(THREADED_RTS)
2686 evac((StgClosure **)(void *)&blocked_queue_hd);
2687 evac((StgClosure **)(void *)&blocked_queue_tl);
2688 evac((StgClosure **)(void *)&sleeping_queue);
2692 // evac((StgClosure **)&blackhole_queue);
2694 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
2695 markSparkQueue(evac);
2698 #if defined(RTS_USER_SIGNALS)
2699 // mark the signal handlers (signals should be already blocked)
2700 if (RtsFlags.MiscFlags.install_signal_handlers) {
2701 markSignalHandlers(evac);
2706 /* -----------------------------------------------------------------------------
2709 This is the interface to the garbage collector from Haskell land.
2710 We provide this so that external C code can allocate and garbage
2711 collect when called from Haskell via _ccall_GC.
2712 -------------------------------------------------------------------------- */
2715 performGC_(rtsBool force_major)
2718 // We must grab a new Task here, because the existing Task may be
2719 // associated with a particular Capability, and chained onto the
2720 // suspended_ccalling_tasks queue.
2721 ACQUIRE_LOCK(&sched_mutex);
2722 task = newBoundTask();
2723 RELEASE_LOCK(&sched_mutex);
2724 scheduleDoGC(NULL,task,force_major);
2725 boundTaskExiting(task);
2731 performGC_(rtsFalse);
2735 performMajorGC(void)
2737 performGC_(rtsTrue);
2740 /* -----------------------------------------------------------------------------
2743 If the thread has reached its maximum stack size, then raise the
2744 StackOverflow exception in the offending thread. Otherwise
2745 relocate the TSO into a larger chunk of memory and adjust its stack
2747 -------------------------------------------------------------------------- */
2750 threadStackOverflow(Capability *cap, StgTSO *tso)
2752 nat new_stack_size, stack_words;
2757 IF_DEBUG(sanity,checkTSO(tso));
2759 // don't allow throwTo() to modify the blocked_exceptions queue
2760 // while we are moving the TSO:
2761 lockClosure((StgClosure *)tso);
2763 if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
2764 // NB. never raise a StackOverflow exception if the thread is
2765 // inside Control.Exceptino.block. It is impractical to protect
2766 // against stack overflow exceptions, since virtually anything
2767 // can raise one (even 'catch'), so this is the only sensible
2768 // thing to do here. See bug #767.
2770 debugTrace(DEBUG_gc,
2771 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
2772 (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
2774 /* If we're debugging, just print out the top of the stack */
2775 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2778 // Send this thread the StackOverflow exception
2780 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
2784 /* Try to double the current stack size. If that takes us over the
2785 * maximum stack size for this thread, then use the maximum instead.
2786 * Finally round up so the TSO ends up as a whole number of blocks.
2788 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2789 new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2790 TSO_STRUCT_SIZE)/sizeof(W_);
2791 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2792 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2794 debugTrace(DEBUG_sched,
2795 "increasing stack size from %ld words to %d.",
2796 (long)tso->stack_size, new_stack_size);
2798 dest = (StgTSO *)allocate(new_tso_size);
2799 TICK_ALLOC_TSO(new_stack_size,0);
2801 /* copy the TSO block and the old stack into the new area */
2802 memcpy(dest,tso,TSO_STRUCT_SIZE);
2803 stack_words = tso->stack + tso->stack_size - tso->sp;
2804 new_sp = (P_)dest + new_tso_size - stack_words;
2805 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2807 /* relocate the stack pointers... */
2809 dest->stack_size = new_stack_size;
2811 /* Mark the old TSO as relocated. We have to check for relocated
2812 * TSOs in the garbage collector and any primops that deal with TSOs.
2814 * It's important to set the sp value to just beyond the end
2815 * of the stack, so we don't attempt to scavenge any part of the
2818 tso->what_next = ThreadRelocated;
2820 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2821 tso->why_blocked = NotBlocked;
2823 IF_PAR_DEBUG(verbose,
2824 debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
2825 tso->id, tso, tso->stack_size);
2826 /* If we're debugging, just print out the top of the stack */
2827 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2833 IF_DEBUG(sanity,checkTSO(dest));
2835 IF_DEBUG(scheduler,printTSO(dest));
2841 /* ---------------------------------------------------------------------------
2843 - usually called inside a signal handler so it mustn't do anything fancy.
2844 ------------------------------------------------------------------------ */
2847 interruptStgRts(void)
2849 sched_state = SCHED_INTERRUPTING;
2854 /* -----------------------------------------------------------------------------
2857 This function causes at least one OS thread to wake up and run the
2858 scheduler loop. It is invoked when the RTS might be deadlocked, or
2859 an external event has arrived that may need servicing (eg. a
2860 keyboard interrupt).
2862 In the single-threaded RTS we don't do anything here; we only have
2863 one thread anyway, and the event that caused us to want to wake up
2864 will have interrupted any blocking system call in progress anyway.
2865 -------------------------------------------------------------------------- */
2870 #if defined(THREADED_RTS)
2871 // This forces the IO Manager thread to wakeup, which will
2872 // in turn ensure that some OS thread wakes up and runs the
2873 // scheduler loop, which will cause a GC and deadlock check.
2878 /* -----------------------------------------------------------------------------
2881 * Check the blackhole_queue for threads that can be woken up. We do
2882 * this periodically: before every GC, and whenever the run queue is
2885 * An elegant solution might be to just wake up all the blocked
2886 * threads with awakenBlockedQueue occasionally: they'll go back to
2887 * sleep again if the object is still a BLACKHOLE. Unfortunately this
2888 * doesn't give us a way to tell whether we've actually managed to
2889 * wake up any threads, so we would be busy-waiting.
2891 * -------------------------------------------------------------------------- */
2894 checkBlackHoles (Capability *cap)
2897 rtsBool any_woke_up = rtsFalse;
2900 // blackhole_queue is global:
2901 ASSERT_LOCK_HELD(&sched_mutex);
2903 debugTrace(DEBUG_sched, "checking threads blocked on black holes");
2905 // ASSUMES: sched_mutex
2906 prev = &blackhole_queue;
2907 t = blackhole_queue;
2908 while (t != END_TSO_QUEUE) {
2909 ASSERT(t->why_blocked == BlockedOnBlackHole);
2910 type = get_itbl(t->block_info.closure)->type;
2911 if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
2912 IF_DEBUG(sanity,checkTSO(t));
2913 t = unblockOne(cap, t);
2914 // urk, the threads migrate to the current capability
2915 // here, but we'd like to keep them on the original one.
2917 any_woke_up = rtsTrue;
2927 /* -----------------------------------------------------------------------------
2930 This is used for interruption (^C) and forking, and corresponds to
2931 raising an exception but without letting the thread catch the
2933 -------------------------------------------------------------------------- */
2936 deleteThread (Capability *cap, StgTSO *tso)
2938 // NOTE: must only be called on a TSO that we have exclusive
2939 // access to, because we will call throwToSingleThreaded() below.
2940 // The TSO must be on the run queue of the Capability we own, or
2941 // we must own all Capabilities.
2943 if (tso->why_blocked != BlockedOnCCall &&
2944 tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
2945 throwToSingleThreaded(cap,tso,NULL);
2949 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
2951 deleteThread_(Capability *cap, StgTSO *tso)
2952 { // for forkProcess only:
2953 // like deleteThread(), but we delete threads in foreign calls, too.
2955 if (tso->why_blocked == BlockedOnCCall ||
2956 tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
2957 unblockOne(cap,tso);
2958 tso->what_next = ThreadKilled;
2960 deleteThread(cap,tso);
2965 /* -----------------------------------------------------------------------------
2966 raiseExceptionHelper
2968 This function is called by the raise# primitve, just so that we can
2969 move some of the tricky bits of raising an exception from C-- into
2970 C. Who knows, it might be a useful re-useable thing here too.
2971 -------------------------------------------------------------------------- */
2974 raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
2976 Capability *cap = regTableToCapability(reg);
2977 StgThunk *raise_closure = NULL;
2979 StgRetInfoTable *info;
2981 // This closure represents the expression 'raise# E' where E
2982 // is the exception raise. It is used to overwrite all the
2983 // thunks which are currently under evaluataion.
2986 // OLD COMMENT (we don't have MIN_UPD_SIZE now):
2987 // LDV profiling: stg_raise_info has THUNK as its closure
2988 // type. Since a THUNK takes at least MIN_UPD_SIZE words in its
2989 // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
2990 // 1 does not cause any problem unless profiling is performed.
2991 // However, when LDV profiling goes on, we need to linearly scan
2992 // small object pool, where raise_closure is stored, so we should
2993 // use MIN_UPD_SIZE.
2995 // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
2996 // sizeofW(StgClosure)+1);
3000 // Walk up the stack, looking for the catch frame. On the way,
3001 // we update any closures pointed to from update frames with the
3002 // raise closure that we just built.
3006 info = get_ret_itbl((StgClosure *)p);
3007 next = p + stack_frame_sizeW((StgClosure *)p);
3008 switch (info->i.type) {
3011 // Only create raise_closure if we need to.
3012 if (raise_closure == NULL) {
3014 (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
3015 SET_HDR(raise_closure, &stg_raise_info, CCCS);
3016 raise_closure->payload[0] = exception;
3018 UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
3022 case ATOMICALLY_FRAME:
3023 debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
3025 return ATOMICALLY_FRAME;
3031 case CATCH_STM_FRAME:
3032 debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
3034 return CATCH_STM_FRAME;
3040 case CATCH_RETRY_FRAME:
3049 /* -----------------------------------------------------------------------------
3050 findRetryFrameHelper
3052 This function is called by the retry# primitive. It traverses the stack
3053 leaving tso->sp referring to the frame which should handle the retry.
3055 This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#)
3056 or should be a ATOMICALLY_FRAME (if the retry# reaches the top level).
3058 We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they
3059 create) because retries are not considered to be exceptions, despite the
3060 similar implementation.
3062 We should not expect to see CATCH_FRAME or STOP_FRAME because those should
3063 not be created within memory transactions.
3064 -------------------------------------------------------------------------- */
3067 findRetryFrameHelper (StgTSO *tso)
3070 StgRetInfoTable *info;
3074 info = get_ret_itbl((StgClosure *)p);
3075 next = p + stack_frame_sizeW((StgClosure *)p);
3076 switch (info->i.type) {
3078 case ATOMICALLY_FRAME:
3079 debugTrace(DEBUG_stm,
3080 "found ATOMICALLY_FRAME at %p during retry", p);
3082 return ATOMICALLY_FRAME;
3084 case CATCH_RETRY_FRAME:
3085 debugTrace(DEBUG_stm,
3086 "found CATCH_RETRY_FRAME at %p during retrry", p);
3088 return CATCH_RETRY_FRAME;
3090 case CATCH_STM_FRAME: {
3091 debugTrace(DEBUG_stm,
3092 "found CATCH_STM_FRAME at %p during retry", p);
3093 StgTRecHeader *trec = tso -> trec;
3094 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
3095 debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
3096 stmAbortTransaction(tso -> cap, trec);
3097 stmFreeAbortedTRec(tso -> cap, trec);
3098 tso -> trec = outer;
3105 ASSERT(info->i.type != CATCH_FRAME);
3106 ASSERT(info->i.type != STOP_FRAME);
3113 /* -----------------------------------------------------------------------------
3114 resurrectThreads is called after garbage collection on the list of
3115 threads found to be garbage. Each of these threads will be woken
3116 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
3117 on an MVar, or NonTermination if the thread was blocked on a Black
3120 Locks: assumes we hold *all* the capabilities.
3121 -------------------------------------------------------------------------- */
3124 resurrectThreads (StgTSO *threads)
3129 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
3130 next = tso->global_link;
3131 tso->global_link = all_threads;
3133 debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
3135 // Wake up the thread on the Capability it was last on
3138 switch (tso->why_blocked) {
3140 case BlockedOnException:
3141 /* Called by GC - sched_mutex lock is currently held. */
3142 throwToSingleThreaded(cap, tso,
3143 (StgClosure *)BlockedOnDeadMVar_closure);
3145 case BlockedOnBlackHole:
3146 throwToSingleThreaded(cap, tso,
3147 (StgClosure *)NonTermination_closure);
3150 throwToSingleThreaded(cap, tso,
3151 (StgClosure *)BlockedIndefinitely_closure);
3154 /* This might happen if the thread was blocked on a black hole
3155 * belonging to a thread that we've just woken up (raiseAsync
3156 * can wake up threads, remember...).
3160 barf("resurrectThreads: thread blocked in a strange way");