1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * The scheduler and thread-related functionality
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
10 #define KEEP_LOCKCLOSURE
15 #include "OSThreads.h"
20 #include "StgMiscClosures.h"
21 #include "Interpreter.h"
23 #include "RtsSignals.h"
29 #include "ThreadLabels.h"
30 #include "LdvProfile.h"
32 #include "Proftimer.h"
34 #if defined(GRAN) || defined(PARALLEL_HASKELL)
35 # include "GranSimRts.h"
37 # include "ParallelRts.h"
38 # include "Parallel.h"
39 # include "ParallelDebug.h"
44 #include "Capability.h"
46 #include "AwaitEvent.h"
47 #if defined(mingw32_HOST_OS)
48 #include "win32/IOManager.h"
51 #include "RaiseAsync.h"
53 #include "ThrIOManager.h"
55 #ifdef HAVE_SYS_TYPES_H
56 #include <sys/types.h>
70 // Turn off inlining when debugging - it obfuscates things
73 # define STATIC_INLINE static
76 /* -----------------------------------------------------------------------------
78 * -------------------------------------------------------------------------- */
82 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
83 /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */
86 In GranSim we have a runnable and a blocked queue for each processor.
87 In order to minimise code changes new arrays run_queue_hds/tls
88 are created. run_queue_hd is then a short cut (macro) for
89 run_queue_hds[CurrentProc] (see GranSim.h).
92 StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
93 StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
94 StgTSO *ccalling_threadss[MAX_PROC];
95 /* We use the same global list of threads (all_threads) in GranSim as in
96 the std RTS (i.e. we are cheating). However, we don't use this list in
97 the GranSim specific code at the moment (so we are only potentially
102 #if !defined(THREADED_RTS)
103 // Blocked/sleeping thrads
104 StgTSO *blocked_queue_hd = NULL;
105 StgTSO *blocked_queue_tl = NULL;
106 StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table?
109 /* Threads blocked on blackholes.
110 * LOCK: sched_mutex+capability, or all capabilities
112 StgTSO *blackhole_queue = NULL;
115 /* The blackhole_queue should be checked for threads to wake up. See
116 * Schedule.h for more thorough comment.
117 * LOCK: none (doesn't matter if we miss an update)
119 rtsBool blackholes_need_checking = rtsFalse;
121 /* Linked list of all threads.
122 * Used for detecting garbage collected threads.
123 * LOCK: sched_mutex+capability, or all capabilities
125 StgTSO *all_threads = NULL;
127 /* flag set by signal handler to precipitate a context switch
128 * LOCK: none (just an advisory flag)
130 int context_switch = 0;
132 /* flag that tracks whether we have done any execution in this time slice.
133 * LOCK: currently none, perhaps we should lock (but needs to be
134 * updated in the fast path of the scheduler).
136 nat recent_activity = ACTIVITY_YES;
138 /* if this flag is set as well, give up execution
139 * LOCK: none (changes once, from false->true)
141 rtsBool sched_state = SCHED_RUNNING;
147 /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually
148 * exists - earlier gccs apparently didn't.
154 * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
155 * in an MT setting, needed to signal that a worker thread shouldn't hang around
156 * in the scheduler when it is out of work.
158 rtsBool shutting_down_scheduler = rtsFalse;
161 * This mutex protects most of the global scheduler data in
162 * the THREADED_RTS runtime.
164 #if defined(THREADED_RTS)
168 #if defined(PARALLEL_HASKELL)
170 rtsTime TimeOfLastYield;
171 rtsBool emitSchedule = rtsTrue;
174 #if !defined(mingw32_HOST_OS)
175 #define FORKPROCESS_PRIMOP_SUPPORTED
178 /* -----------------------------------------------------------------------------
179 * static function prototypes
180 * -------------------------------------------------------------------------- */
182 static Capability *schedule (Capability *initialCapability, Task *task);
185 // These function all encapsulate parts of the scheduler loop, and are
186 // abstracted only to make the structure and control flow of the
187 // scheduler clearer.
189 static void schedulePreLoop (void);
190 #if defined(THREADED_RTS)
191 static void schedulePushWork(Capability *cap, Task *task);
193 static void scheduleStartSignalHandlers (Capability *cap);
194 static void scheduleCheckBlockedThreads (Capability *cap);
195 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
196 static void scheduleCheckBlackHoles (Capability *cap);
197 static void scheduleDetectDeadlock (Capability *cap, Task *task);
199 static StgTSO *scheduleProcessEvent(rtsEvent *event);
201 #if defined(PARALLEL_HASKELL)
202 static StgTSO *scheduleSendPendingMessages(void);
203 static void scheduleActivateSpark(void);
204 static rtsBool scheduleGetRemoteWork(rtsBool *receivedFinish);
206 #if defined(PAR) || defined(GRAN)
207 static void scheduleGranParReport(void);
209 static void schedulePostRunThread(void);
210 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
211 static void scheduleHandleStackOverflow( Capability *cap, Task *task,
213 static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
214 nat prev_what_next );
215 static void scheduleHandleThreadBlocked( StgTSO *t );
216 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
218 static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
219 static Capability *scheduleDoGC(Capability *cap, Task *task,
220 rtsBool force_major);
222 static rtsBool checkBlackHoles(Capability *cap);
224 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
226 static void deleteThread (Capability *cap, StgTSO *tso);
227 static void deleteAllThreads (Capability *cap);
229 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
230 static void deleteThread_(Capability *cap, StgTSO *tso);
233 #if defined(PARALLEL_HASKELL)
234 StgTSO * createSparkThread(rtsSpark spark);
235 StgTSO * activateSpark (rtsSpark spark);
239 static char *whatNext_strs[] = {
249 /* -----------------------------------------------------------------------------
250 * Putting a thread on the run queue: different scheduling policies
251 * -------------------------------------------------------------------------- */
254 addToRunQueue( Capability *cap, StgTSO *t )
256 #if defined(PARALLEL_HASKELL)
257 if (RtsFlags.ParFlags.doFairScheduling) {
258 // this does round-robin scheduling; good for concurrency
259 appendToRunQueue(cap,t);
261 // this does unfair scheduling; good for parallelism
262 pushOnRunQueue(cap,t);
265 // this does round-robin scheduling; good for concurrency
266 appendToRunQueue(cap,t);
270 /* ---------------------------------------------------------------------------
271 Main scheduling loop.
273 We use round-robin scheduling, each thread returning to the
274 scheduler loop when one of these conditions is detected:
277 * timer expires (thread yields)
283 In a GranSim setup this loop iterates over the global event queue.
284 This revolves around the global event queue, which determines what
285 to do next. Therefore, it's more complicated than either the
286 concurrent or the parallel (GUM) setup.
289 GUM iterates over incoming messages.
290 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
291 and sends out a fish whenever it has nothing to do; in-between
292 doing the actual reductions (shared code below) it processes the
293 incoming messages and deals with delayed operations
294 (see PendingFetches).
295 This is not the ugliest code you could imagine, but it's bloody close.
297 ------------------------------------------------------------------------ */
300 schedule (Capability *initialCapability, Task *task)
304 StgThreadReturnCode ret;
307 #elif defined(PARALLEL_HASKELL)
310 rtsBool receivedFinish = rtsFalse;
312 nat tp_size, sp_size; // stats only
317 #if defined(THREADED_RTS)
318 rtsBool first = rtsTrue;
321 cap = initialCapability;
323 // Pre-condition: this task owns initialCapability.
324 // The sched_mutex is *NOT* held
325 // NB. on return, we still hold a capability.
327 debugTrace (DEBUG_sched,
328 "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
329 task, initialCapability);
333 // -----------------------------------------------------------
334 // Scheduler loop starts here:
336 #if defined(PARALLEL_HASKELL)
337 #define TERMINATION_CONDITION (!receivedFinish)
339 #define TERMINATION_CONDITION ((event = get_next_event()) != (rtsEvent*)NULL)
341 #define TERMINATION_CONDITION rtsTrue
344 while (TERMINATION_CONDITION) {
347 /* Choose the processor with the next event */
348 CurrentProc = event->proc;
349 CurrentTSO = event->tso;
352 #if defined(THREADED_RTS)
354 // don't yield the first time, we want a chance to run this
355 // thread for a bit, even if there are others banging at the
358 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
360 // Yield the capability to higher-priority tasks if necessary.
361 yieldCapability(&cap, task);
365 #if defined(THREADED_RTS)
366 schedulePushWork(cap,task);
369 // Check whether we have re-entered the RTS from Haskell without
370 // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
372 if (cap->in_haskell) {
373 errorBelch("schedule: re-entered unsafely.\n"
374 " Perhaps a 'foreign import unsafe' should be 'safe'?");
375 stg_exit(EXIT_FAILURE);
378 // The interruption / shutdown sequence.
380 // In order to cleanly shut down the runtime, we want to:
381 // * make sure that all main threads return to their callers
382 // with the state 'Interrupted'.
383 // * clean up all OS threads assocated with the runtime
384 // * free all memory etc.
386 // So the sequence for ^C goes like this:
388 // * ^C handler sets sched_state := SCHED_INTERRUPTING and
389 // arranges for some Capability to wake up
391 // * all threads in the system are halted, and the zombies are
392 // placed on the run queue for cleaning up. We acquire all
393 // the capabilities in order to delete the threads, this is
394 // done by scheduleDoGC() for convenience (because GC already
395 // needs to acquire all the capabilities). We can't kill
396 // threads involved in foreign calls.
398 // * somebody calls shutdownHaskell(), which calls exitScheduler()
400 // * sched_state := SCHED_SHUTTING_DOWN
402 // * all workers exit when the run queue on their capability
403 // drains. All main threads will also exit when their TSO
404 // reaches the head of the run queue and they can return.
406 // * eventually all Capabilities will shut down, and the RTS can
409 // * We might be left with threads blocked in foreign calls,
410 // we should really attempt to kill these somehow (TODO);
412 switch (sched_state) {
415 case SCHED_INTERRUPTING:
416 debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
417 #if defined(THREADED_RTS)
418 discardSparksCap(cap);
420 /* scheduleDoGC() deletes all the threads */
421 cap = scheduleDoGC(cap,task,rtsFalse);
423 case SCHED_SHUTTING_DOWN:
424 debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
425 // If we are a worker, just exit. If we're a bound thread
426 // then we will exit below when we've removed our TSO from
428 if (task->tso == NULL && emptyRunQueue(cap)) {
433 barf("sched_state: %d", sched_state);
436 #if defined(THREADED_RTS)
437 // If the run queue is empty, take a spark and turn it into a thread.
439 if (emptyRunQueue(cap)) {
441 spark = findSpark(cap);
443 debugTrace(DEBUG_sched,
444 "turning spark of closure %p into a thread",
445 (StgClosure *)spark);
446 createSparkThread(cap,spark);
450 #endif // THREADED_RTS
452 scheduleStartSignalHandlers(cap);
454 // Only check the black holes here if we've nothing else to do.
455 // During normal execution, the black hole list only gets checked
456 // at GC time, to avoid repeatedly traversing this possibly long
457 // list each time around the scheduler.
458 if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
460 scheduleCheckWakeupThreads(cap);
462 scheduleCheckBlockedThreads(cap);
464 scheduleDetectDeadlock(cap,task);
465 #if defined(THREADED_RTS)
466 cap = task->cap; // reload cap, it might have changed
469 // Normally, the only way we can get here with no threads to
470 // run is if a keyboard interrupt received during
471 // scheduleCheckBlockedThreads() or scheduleDetectDeadlock().
472 // Additionally, it is not fatal for the
473 // threaded RTS to reach here with no threads to run.
475 // win32: might be here due to awaitEvent() being abandoned
476 // as a result of a console event having been delivered.
477 if ( emptyRunQueue(cap) ) {
478 #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
479 ASSERT(sched_state >= SCHED_INTERRUPTING);
481 continue; // nothing to do
484 #if defined(PARALLEL_HASKELL)
485 scheduleSendPendingMessages();
486 if (emptyRunQueue(cap) && scheduleActivateSpark())
490 ASSERT(next_fish_to_send_at==0); // i.e. no delayed fishes left!
493 /* If we still have no work we need to send a FISH to get a spark
495 if (emptyRunQueue(cap)) {
496 if (!scheduleGetRemoteWork(&receivedFinish)) continue;
497 ASSERT(rtsFalse); // should not happen at the moment
499 // from here: non-empty run queue.
500 // TODO: merge above case with this, only one call processMessages() !
501 if (PacketsWaiting()) { /* process incoming messages, if
502 any pending... only in else
503 because getRemoteWork waits for
505 receivedFinish = processMessages();
510 scheduleProcessEvent(event);
514 // Get a thread to run
516 t = popRunQueue(cap);
518 #if defined(GRAN) || defined(PAR)
519 scheduleGranParReport(); // some kind of debuging output
521 // Sanity check the thread we're about to run. This can be
522 // expensive if there is lots of thread switching going on...
523 IF_DEBUG(sanity,checkTSO(t));
526 #if defined(THREADED_RTS)
527 // Check whether we can run this thread in the current task.
528 // If not, we have to pass our capability to the right task.
530 Task *bound = t->bound;
534 debugTrace(DEBUG_sched,
535 "### Running thread %lu in bound thread", (unsigned long)t->id);
536 // yes, the Haskell thread is bound to the current native thread
538 debugTrace(DEBUG_sched,
539 "### thread %lu bound to another OS thread", (unsigned long)t->id);
540 // no, bound to a different Haskell thread: pass to that thread
541 pushOnRunQueue(cap,t);
545 // The thread we want to run is unbound.
547 debugTrace(DEBUG_sched,
548 "### this OS thread cannot run thread %lu", (unsigned long)t->id);
549 // no, the current native thread is bound to a different
550 // Haskell thread, so pass it to any worker thread
551 pushOnRunQueue(cap,t);
558 cap->r.rCurrentTSO = t;
560 /* context switches are initiated by the timer signal, unless
561 * the user specified "context switch as often as possible", with
564 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
565 && !emptyThreadQueues(cap)) {
571 debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
572 (long)t->id, whatNext_strs[t->what_next]);
574 startHeapProfTimer();
576 // Check for exceptions blocked on this thread
577 maybePerformBlockedException (cap, t);
579 // ----------------------------------------------------------------------
580 // Run the current thread
582 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
583 ASSERT(t->cap == cap);
585 prev_what_next = t->what_next;
587 errno = t->saved_errno;
589 SetLastError(t->saved_winerror);
592 cap->in_haskell = rtsTrue;
596 #if defined(THREADED_RTS)
597 if (recent_activity == ACTIVITY_DONE_GC) {
598 // ACTIVITY_DONE_GC means we turned off the timer signal to
599 // conserve power (see #1623). Re-enable it here.
601 prev = xchg(&recent_activity, ACTIVITY_YES);
602 if (prev == ACTIVITY_DONE_GC) {
606 recent_activity = ACTIVITY_YES;
610 switch (prev_what_next) {
614 /* Thread already finished, return to scheduler. */
615 ret = ThreadFinished;
621 r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
622 cap = regTableToCapability(r);
627 case ThreadInterpret:
628 cap = interpretBCO(cap);
633 barf("schedule: invalid what_next field");
636 cap->in_haskell = rtsFalse;
638 // The TSO might have moved, eg. if it re-entered the RTS and a GC
639 // happened. So find the new location:
640 t = cap->r.rCurrentTSO;
642 // We have run some Haskell code: there might be blackhole-blocked
643 // threads to wake up now.
644 // Lock-free test here should be ok, we're just setting a flag.
645 if ( blackhole_queue != END_TSO_QUEUE ) {
646 blackholes_need_checking = rtsTrue;
649 // And save the current errno in this thread.
650 // XXX: possibly bogus for SMP because this thread might already
651 // be running again, see code below.
652 t->saved_errno = errno;
654 // Similarly for Windows error code
655 t->saved_winerror = GetLastError();
658 #if defined(THREADED_RTS)
659 // If ret is ThreadBlocked, and this Task is bound to the TSO that
660 // blocked, we are in limbo - the TSO is now owned by whatever it
661 // is blocked on, and may in fact already have been woken up,
662 // perhaps even on a different Capability. It may be the case
663 // that task->cap != cap. We better yield this Capability
664 // immediately and return to normaility.
665 if (ret == ThreadBlocked) {
666 debugTrace(DEBUG_sched,
667 "--<< thread %lu (%s) stopped: blocked",
668 (unsigned long)t->id, whatNext_strs[t->what_next]);
673 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
674 ASSERT(t->cap == cap);
676 // ----------------------------------------------------------------------
678 // Costs for the scheduler are assigned to CCS_SYSTEM
680 #if defined(PROFILING)
684 schedulePostRunThread();
686 ready_to_gc = rtsFalse;
690 ready_to_gc = scheduleHandleHeapOverflow(cap,t);
694 scheduleHandleStackOverflow(cap,task,t);
698 if (scheduleHandleYield(cap, t, prev_what_next)) {
699 // shortcut for switching between compiler/interpreter:
705 scheduleHandleThreadBlocked(t);
709 if (scheduleHandleThreadFinished(cap, task, t)) return cap;
710 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
714 barf("schedule: invalid thread return code %d", (int)ret);
717 if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
718 cap = scheduleDoGC(cap,task,rtsFalse);
720 } /* end of while() */
722 debugTrace(PAR_DEBUG_verbose,
723 "== Leaving schedule() after having received Finish");
726 /* ----------------------------------------------------------------------------
727 * Setting up the scheduler loop
728 * ------------------------------------------------------------------------- */
731 schedulePreLoop(void)
734 /* set up first event to get things going */
735 /* ToDo: assign costs for system setup and init MainTSO ! */
736 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
738 CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
740 debugTrace (DEBUG_gran,
741 "GRAN: Init CurrentTSO (in schedule) = %p",
743 IF_DEBUG(gran, G_TSO(CurrentTSO, 5));
745 if (RtsFlags.GranFlags.Light) {
746 /* Save current time; GranSim Light only */
747 CurrentTSO->gran.clock = CurrentTime[CurrentProc];
752 /* -----------------------------------------------------------------------------
755 * Push work to other Capabilities if we have some.
756 * -------------------------------------------------------------------------- */
758 #if defined(THREADED_RTS)
760 schedulePushWork(Capability *cap USED_IF_THREADS,
761 Task *task USED_IF_THREADS)
763 Capability *free_caps[n_capabilities], *cap0;
766 // migration can be turned off with +RTS -qg
767 if (!RtsFlags.ParFlags.migrate) return;
769 // Check whether we have more threads on our run queue, or sparks
770 // in our pool, that we could hand to another Capability.
771 if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
772 && sparkPoolSizeCap(cap) < 2) {
776 // First grab as many free Capabilities as we can.
777 for (i=0, n_free_caps=0; i < n_capabilities; i++) {
778 cap0 = &capabilities[i];
779 if (cap != cap0 && tryGrabCapability(cap0,task)) {
780 if (!emptyRunQueue(cap0) || cap->returning_tasks_hd != NULL) {
781 // it already has some work, we just grabbed it at
782 // the wrong moment. Or maybe it's deadlocked!
783 releaseCapability(cap0);
785 free_caps[n_free_caps++] = cap0;
790 // we now have n_free_caps free capabilities stashed in
791 // free_caps[]. Share our run queue equally with them. This is
792 // probably the simplest thing we could do; improvements we might
793 // want to do include:
795 // - giving high priority to moving relatively new threads, on
796 // the gournds that they haven't had time to build up a
797 // working set in the cache on this CPU/Capability.
799 // - giving low priority to moving long-lived threads
801 if (n_free_caps > 0) {
802 StgTSO *prev, *t, *next;
803 rtsBool pushed_to_all;
805 debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
808 pushed_to_all = rtsFalse;
810 if (cap->run_queue_hd != END_TSO_QUEUE) {
811 prev = cap->run_queue_hd;
813 prev->link = END_TSO_QUEUE;
814 for (; t != END_TSO_QUEUE; t = next) {
816 t->link = END_TSO_QUEUE;
817 if (t->what_next == ThreadRelocated
818 || t->bound == task // don't move my bound thread
819 || tsoLocked(t)) { // don't move a locked thread
822 } else if (i == n_free_caps) {
823 pushed_to_all = rtsTrue;
829 debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
830 appendToRunQueue(free_caps[i],t);
831 if (t->bound) { t->bound->cap = free_caps[i]; }
832 t->cap = free_caps[i];
836 cap->run_queue_tl = prev;
839 // If there are some free capabilities that we didn't push any
840 // threads to, then try to push a spark to each one.
841 if (!pushed_to_all) {
843 // i is the next free capability to push to
844 for (; i < n_free_caps; i++) {
845 if (emptySparkPoolCap(free_caps[i])) {
846 spark = findSpark(cap);
848 debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
849 newSpark(&(free_caps[i]->r), spark);
855 // release the capabilities
856 for (i = 0; i < n_free_caps; i++) {
857 task->cap = free_caps[i];
858 releaseCapability(free_caps[i]);
861 task->cap = cap; // reset to point to our Capability.
865 /* ----------------------------------------------------------------------------
866 * Start any pending signal handlers
867 * ------------------------------------------------------------------------- */
869 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
871 scheduleStartSignalHandlers(Capability *cap)
873 if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) {
874 // safe outside the lock
875 startSignalHandlers(cap);
880 scheduleStartSignalHandlers(Capability *cap STG_UNUSED)
885 /* ----------------------------------------------------------------------------
886 * Check for blocked threads that can be woken up.
887 * ------------------------------------------------------------------------- */
890 scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
892 #if !defined(THREADED_RTS)
894 // Check whether any waiting threads need to be woken up. If the
895 // run queue is empty, and there are no other tasks running, we
896 // can wait indefinitely for something to happen.
898 if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
900 awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
906 /* ----------------------------------------------------------------------------
907 * Check for threads woken up by other Capabilities
908 * ------------------------------------------------------------------------- */
911 scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
913 #if defined(THREADED_RTS)
914 // Any threads that were woken up by other Capabilities get
915 // appended to our run queue.
916 if (!emptyWakeupQueue(cap)) {
917 ACQUIRE_LOCK(&cap->lock);
918 if (emptyRunQueue(cap)) {
919 cap->run_queue_hd = cap->wakeup_queue_hd;
920 cap->run_queue_tl = cap->wakeup_queue_tl;
922 cap->run_queue_tl->link = cap->wakeup_queue_hd;
923 cap->run_queue_tl = cap->wakeup_queue_tl;
925 cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
926 RELEASE_LOCK(&cap->lock);
931 /* ----------------------------------------------------------------------------
932 * Check for threads blocked on BLACKHOLEs that can be woken up
933 * ------------------------------------------------------------------------- */
935 scheduleCheckBlackHoles (Capability *cap)
937 if ( blackholes_need_checking ) // check without the lock first
939 ACQUIRE_LOCK(&sched_mutex);
940 if ( blackholes_need_checking ) {
941 checkBlackHoles(cap);
942 blackholes_need_checking = rtsFalse;
944 RELEASE_LOCK(&sched_mutex);
948 /* ----------------------------------------------------------------------------
949 * Detect deadlock conditions and attempt to resolve them.
950 * ------------------------------------------------------------------------- */
953 scheduleDetectDeadlock (Capability *cap, Task *task)
956 #if defined(PARALLEL_HASKELL)
957 // ToDo: add deadlock detection in GUM (similar to THREADED_RTS) -- HWL
962 * Detect deadlock: when we have no threads to run, there are no
963 * threads blocked, waiting for I/O, or sleeping, and all the
964 * other tasks are waiting for work, we must have a deadlock of
967 if ( emptyThreadQueues(cap) )
969 #if defined(THREADED_RTS)
971 * In the threaded RTS, we only check for deadlock if there
972 * has been no activity in a complete timeslice. This means
973 * we won't eagerly start a full GC just because we don't have
974 * any threads to run currently.
976 if (recent_activity != ACTIVITY_INACTIVE) return;
979 debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
981 // Garbage collection can release some new threads due to
982 // either (a) finalizers or (b) threads resurrected because
983 // they are unreachable and will therefore be sent an
984 // exception. Any threads thus released will be immediately
986 cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
988 recent_activity = ACTIVITY_DONE_GC;
989 // disable timer signals (see #1623)
992 if ( !emptyRunQueue(cap) ) return;
994 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
995 /* If we have user-installed signal handlers, then wait
996 * for signals to arrive rather then bombing out with a
999 if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) {
1000 debugTrace(DEBUG_sched,
1001 "still deadlocked, waiting for signals...");
1005 if (signals_pending()) {
1006 startSignalHandlers(cap);
1009 // either we have threads to run, or we were interrupted:
1010 ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
1014 #if !defined(THREADED_RTS)
1015 /* Probably a real deadlock. Send the current main thread the
1016 * Deadlock exception.
1019 switch (task->tso->why_blocked) {
1021 case BlockedOnBlackHole:
1022 case BlockedOnException:
1024 throwToSingleThreaded(cap, task->tso,
1025 (StgClosure *)NonTermination_closure);
1028 barf("deadlock: main thread blocked in a strange way");
1036 /* ----------------------------------------------------------------------------
1037 * Process an event (GRAN only)
1038 * ------------------------------------------------------------------------- */
1042 scheduleProcessEvent(rtsEvent *event)
1046 if (RtsFlags.GranFlags.Light)
1047 GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
1049 /* adjust time based on time-stamp */
1050 if (event->time > CurrentTime[CurrentProc] &&
1051 event->evttype != ContinueThread)
1052 CurrentTime[CurrentProc] = event->time;
1054 /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
1055 if (!RtsFlags.GranFlags.Light)
1058 IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
1060 /* main event dispatcher in GranSim */
1061 switch (event->evttype) {
1062 /* Should just be continuing execution */
1063 case ContinueThread:
1064 IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
1065 /* ToDo: check assertion
1066 ASSERT(run_queue_hd != (StgTSO*)NULL &&
1067 run_queue_hd != END_TSO_QUEUE);
1069 /* Ignore ContinueThreads for fetching threads (if synchr comm) */
1070 if (!RtsFlags.GranFlags.DoAsyncFetch &&
1071 procStatus[CurrentProc]==Fetching) {
1072 debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
1073 CurrentTSO->id, CurrentTSO, CurrentProc);
1076 /* Ignore ContinueThreads for completed threads */
1077 if (CurrentTSO->what_next == ThreadComplete) {
1078 debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n",
1079 CurrentTSO->id, CurrentTSO, CurrentProc);
1082 /* Ignore ContinueThreads for threads that are being migrated */
1083 if (PROCS(CurrentTSO)==Nowhere) {
1084 debugBelch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)\n",
1085 CurrentTSO->id, CurrentTSO, CurrentProc);
1088 /* The thread should be at the beginning of the run queue */
1089 if (CurrentTSO!=run_queue_hds[CurrentProc]) {
1090 debugBelch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread\n",
1091 CurrentTSO->id, CurrentTSO, CurrentProc);
1092 break; // run the thread anyway
1095 new_event(proc, proc, CurrentTime[proc],
1097 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
1099 */ /* Catches superfluous CONTINUEs -- should be unnecessary */
1100 break; // now actually run the thread; DaH Qu'vam yImuHbej
1103 do_the_fetchnode(event);
1104 goto next_thread; /* handle next event in event queue */
1107 do_the_globalblock(event);
1108 goto next_thread; /* handle next event in event queue */
1111 do_the_fetchreply(event);
1112 goto next_thread; /* handle next event in event queue */
1114 case UnblockThread: /* Move from the blocked queue to the tail of */
1115 do_the_unblock(event);
1116 goto next_thread; /* handle next event in event queue */
1118 case ResumeThread: /* Move from the blocked queue to the tail of */
1119 /* the runnable queue ( i.e. Qu' SImqa'lu') */
1120 event->tso->gran.blocktime +=
1121 CurrentTime[CurrentProc] - event->tso->gran.blockedat;
1122 do_the_startthread(event);
1123 goto next_thread; /* handle next event in event queue */
1126 do_the_startthread(event);
1127 goto next_thread; /* handle next event in event queue */
1130 do_the_movethread(event);
1131 goto next_thread; /* handle next event in event queue */
1134 do_the_movespark(event);
1135 goto next_thread; /* handle next event in event queue */
1138 do_the_findwork(event);
1139 goto next_thread; /* handle next event in event queue */
1142 barf("Illegal event type %u\n", event->evttype);
1145 /* This point was scheduler_loop in the old RTS */
1147 IF_DEBUG(gran, debugBelch("GRAN: after main switch\n"));
1149 TimeOfLastEvent = CurrentTime[CurrentProc];
1150 TimeOfNextEvent = get_time_of_next_event();
1151 IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
1152 // CurrentTSO = ThreadQueueHd;
1154 IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n",
1157 if (RtsFlags.GranFlags.Light)
1158 GranSimLight_leave_system(event, &ActiveTSO);
1160 EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
1163 debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice));
1165 /* in a GranSim setup the TSO stays on the run queue */
1167 /* Take a thread from the run queue. */
1168 POP_RUN_QUEUE(t); // take_off_run_queue(t);
1171 debugBelch("GRAN: About to run current thread, which is\n");
1174 context_switch = 0; // turned on via GranYield, checking events and time slice
1177 DumpGranEvent(GR_SCHEDULE, t));
1179 procStatus[CurrentProc] = Busy;
1183 /* ----------------------------------------------------------------------------
1184 * Send pending messages (PARALLEL_HASKELL only)
1185 * ------------------------------------------------------------------------- */
1187 #if defined(PARALLEL_HASKELL)
1189 scheduleSendPendingMessages(void)
1195 # if defined(PAR) // global Mem.Mgmt., omit for now
1196 if (PendingFetches != END_BF_QUEUE) {
1201 if (RtsFlags.ParFlags.BufferTime) {
1202 // if we use message buffering, we must send away all message
1203 // packets which have become too old...
1209 /* ----------------------------------------------------------------------------
1210 * Activate spark threads (PARALLEL_HASKELL only)
1211 * ------------------------------------------------------------------------- */
1213 #if defined(PARALLEL_HASKELL)
1215 scheduleActivateSpark(void)
1218 ASSERT(emptyRunQueue());
1219 /* We get here if the run queue is empty and want some work.
1220 We try to turn a spark into a thread, and add it to the run queue,
1221 from where it will be picked up in the next iteration of the scheduler
1225 /* :-[ no local threads => look out for local sparks */
1226 /* the spark pool for the current PE */
1227 pool = &(cap.r.rSparks); // JB: cap = (old) MainCap
1228 if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
1229 pool->hd < pool->tl) {
1231 * ToDo: add GC code check that we really have enough heap afterwards!!
1233 * If we're here (no runnable threads) and we have pending
1234 * sparks, we must have a space problem. Get enough space
1235 * to turn one of those pending sparks into a
1239 spark = findSpark(rtsFalse); /* get a spark */
1240 if (spark != (rtsSpark) NULL) {
1241 tso = createThreadFromSpark(spark); /* turn the spark into a thread */
1242 IF_PAR_DEBUG(fish, // schedule,
1243 debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
1244 tso->id, tso, advisory_thread_count));
1246 if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
1247 IF_PAR_DEBUG(fish, // schedule,
1248 debugBelch("==^^ failed to create thread from spark @ %lx\n",
1250 return rtsFalse; /* failed to generate a thread */
1251 } /* otherwise fall through & pick-up new tso */
1253 IF_PAR_DEBUG(fish, // schedule,
1254 debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n",
1255 spark_queue_len(pool)));
1256 return rtsFalse; /* failed to generate a thread */
1258 return rtsTrue; /* success in generating a thread */
1259 } else { /* no more threads permitted or pool empty */
1260 return rtsFalse; /* failed to generateThread */
1263 tso = NULL; // avoid compiler warning only
1264 return rtsFalse; /* dummy in non-PAR setup */
1267 #endif // PARALLEL_HASKELL
1269 /* ----------------------------------------------------------------------------
1270 * Get work from a remote node (PARALLEL_HASKELL only)
1271 * ------------------------------------------------------------------------- */
1273 #if defined(PARALLEL_HASKELL)
1275 scheduleGetRemoteWork(rtsBool *receivedFinish)
1277 ASSERT(emptyRunQueue());
1279 if (RtsFlags.ParFlags.BufferTime) {
1280 IF_PAR_DEBUG(verbose,
1281 debugBelch("...send all pending data,"));
1284 for (i=1; i<=nPEs; i++)
1285 sendImmediately(i); // send all messages away immediately
1289 //++EDEN++ idle() , i.e. send all buffers, wait for work
1290 // suppress fishing in EDEN... just look for incoming messages
1291 // (blocking receive)
1292 IF_PAR_DEBUG(verbose,
1293 debugBelch("...wait for incoming messages...\n"));
1294 *receivedFinish = processMessages(); // blocking receive...
1296 // and reenter scheduling loop after having received something
1297 // (return rtsFalse below)
1299 # else /* activate SPARKS machinery */
1300 /* We get here, if we have no work, tried to activate a local spark, but still
1301 have no work. We try to get a remote spark, by sending a FISH message.
1302 Thread migration should be added here, and triggered when a sequence of
1303 fishes returns without work. */
1304 delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll);
1306 /* =8-[ no local sparks => look for work on other PEs */
1308 * We really have absolutely no work. Send out a fish
1309 * (there may be some out there already), and wait for
1310 * something to arrive. We clearly can't run any threads
1311 * until a SCHEDULE or RESUME arrives, and so that's what
1312 * we're hoping to see. (Of course, we still have to
1313 * respond to other types of messages.)
1315 rtsTime now = msTime() /*CURRENT_TIME*/;
1316 IF_PAR_DEBUG(verbose,
1317 debugBelch("-- now=%ld\n", now));
1318 IF_PAR_DEBUG(fish, // verbose,
1319 if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
1320 (last_fish_arrived_at!=0 &&
1321 last_fish_arrived_at+delay > now)) {
1322 debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n",
1323 now, last_fish_arrived_at+delay,
1324 last_fish_arrived_at,
1328 if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
1329 advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when?
1330 if (last_fish_arrived_at==0 ||
1331 (last_fish_arrived_at+delay <= now)) { // send FISH now!
1332 /* outstandingFishes is set in sendFish, processFish;
1333 avoid flooding system with fishes via delay */
1334 next_fish_to_send_at = 0;
1336 /* ToDo: this should be done in the main scheduling loop to avoid the
1337 busy wait here; not so bad if fish delay is very small */
1338 int iq = 0; // DEBUGGING -- HWL
1339 next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send
1340 /* send a fish when ready, but process messages that arrive in the meantime */
1342 if (PacketsWaiting()) {
1344 *receivedFinish = processMessages();
1347 } while (!*receivedFinish || now<next_fish_to_send_at);
1348 // JB: This means the fish could become obsolete, if we receive
1349 // work. Better check for work again?
1350 // last line: while (!receivedFinish || !haveWork || now<...)
1351 // next line: if (receivedFinish || haveWork )
1353 if (*receivedFinish) // no need to send a FISH if we are finishing anyway
1354 return rtsFalse; // NB: this will leave scheduler loop
1355 // immediately after return!
1357 IF_PAR_DEBUG(fish, // verbose,
1358 debugBelch("--$$ <%llu> sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count));
1362 // JB: IMHO, this should all be hidden inside sendFish(...)
1364 sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY,
1367 // Global statistics: count no. of fishes
1368 if (RtsFlags.ParFlags.ParStats.Global &&
1369 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
1370 globalParStats.tot_fish_mess++;
1374 /* delayed fishes must have been sent by now! */
1375 next_fish_to_send_at = 0;
1378 *receivedFinish = processMessages();
1379 # endif /* SPARKS */
1382 /* NB: this function always returns rtsFalse, meaning the scheduler
1383 loop continues with the next iteration;
1385 return code means success in finding work; we enter this function
1386 if there is no local work, thus have to send a fish which takes
1387 time until it arrives with work; in the meantime we should process
1388 messages in the main loop;
1391 #endif // PARALLEL_HASKELL
1393 /* ----------------------------------------------------------------------------
1394 * PAR/GRAN: Report stats & debugging info(?)
1395 * ------------------------------------------------------------------------- */
1397 #if defined(PAR) || defined(GRAN)
1399 scheduleGranParReport(void)
1401 ASSERT(run_queue_hd != END_TSO_QUEUE);
1403 /* Take a thread from the run queue, if we have work */
1404 POP_RUN_QUEUE(t); // take_off_run_queue(END_TSO_QUEUE);
1406 /* If this TSO has got its outport closed in the meantime,
1407 * it mustn't be run. Instead, we have to clean it up as if it was finished.
1408 * It has to be marked as TH_DEAD for this purpose.
1409 * If it is TH_TERM instead, it is supposed to have finished in the normal way.
1411 JB: TODO: investigate wether state change field could be nuked
1412 entirely and replaced by the normal tso state (whatnext
1413 field). All we want to do is to kill tsos from outside.
1416 /* ToDo: write something to the log-file
1417 if (RTSflags.ParFlags.granSimStats && !sameThread)
1418 DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
1422 /* the spark pool for the current PE */
1423 pool = &(cap.r.rSparks); // cap = (old) MainCap
1426 debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
1427 run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
1430 debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
1431 run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
1433 if (RtsFlags.ParFlags.ParStats.Full &&
1434 (t->par.sparkname != (StgInt)0) && // only log spark generated threads
1435 (emitSchedule || // forced emit
1436 (t && LastTSO && t->id != LastTSO->id))) {
1438 we are running a different TSO, so write a schedule event to log file
1439 NB: If we use fair scheduling we also have to write a deschedule
1440 event for LastTSO; with unfair scheduling we know that the
1441 previous tso has blocked whenever we switch to another tso, so
1442 we don't need it in GUM for now
1444 IF_PAR_DEBUG(fish, // schedule,
1445 debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname));
1447 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
1448 GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
1449 emitSchedule = rtsFalse;
1454 /* ----------------------------------------------------------------------------
1455 * After running a thread...
1456 * ------------------------------------------------------------------------- */
1459 schedulePostRunThread(void)
1462 /* HACK 675: if the last thread didn't yield, make sure to print a
1463 SCHEDULE event to the log file when StgRunning the next thread, even
1464 if it is the same one as before */
1466 TimeOfLastYield = CURRENT_TIME;
1469 /* some statistics gathering in the parallel case */
1471 #if defined(GRAN) || defined(PAR) || defined(EDEN)
1475 IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
1476 globalGranStats.tot_heapover++;
1478 globalParStats.tot_heapover++;
1485 DumpGranEvent(GR_DESCHEDULE, t));
1486 globalGranStats.tot_stackover++;
1489 // DumpGranEvent(GR_DESCHEDULE, t);
1490 globalParStats.tot_stackover++;
1494 case ThreadYielding:
1497 DumpGranEvent(GR_DESCHEDULE, t));
1498 globalGranStats.tot_yields++;
1501 // DumpGranEvent(GR_DESCHEDULE, t);
1502 globalParStats.tot_yields++;
1508 debugTrace(DEBUG_sched,
1509 "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
1510 t->id, t, whatNext_strs[t->what_next], t->block_info.closure,
1511 (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1512 if (t->block_info.closure!=(StgClosure*)NULL)
1513 print_bq(t->block_info.closure);
1516 // ??? needed; should emit block before
1518 DumpGranEvent(GR_DESCHEDULE, t));
1519 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1522 ASSERT(procStatus[CurrentProc]==Busy ||
1523 ((procStatus[CurrentProc]==Fetching) &&
1524 (t->block_info.closure!=(StgClosure*)NULL)));
1525 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1526 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1527 procStatus[CurrentProc]==Fetching))
1528 procStatus[CurrentProc] = Idle;
1531 //++PAR++ blockThread() writes the event (change?)
1535 case ThreadFinished:
1539 barf("parGlobalStats: unknown return code");
1545 /* -----------------------------------------------------------------------------
1546 * Handle a thread that returned to the scheduler with ThreadHeepOverflow
1547 * -------------------------------------------------------------------------- */
1550 scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
1552 // did the task ask for a large block?
1553 if (cap->r.rHpAlloc > BLOCK_SIZE) {
1554 // if so, get one and push it on the front of the nursery.
1558 blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
1560 debugTrace(DEBUG_sched,
1561 "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
1562 (long)t->id, whatNext_strs[t->what_next], blocks);
1564 // don't do this if the nursery is (nearly) full, we'll GC first.
1565 if (cap->r.rCurrentNursery->link != NULL ||
1566 cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
1567 // if the nursery has only one block.
1570 bd = allocGroup( blocks );
1572 cap->r.rNursery->n_blocks += blocks;
1574 // link the new group into the list
1575 bd->link = cap->r.rCurrentNursery;
1576 bd->u.back = cap->r.rCurrentNursery->u.back;
1577 if (cap->r.rCurrentNursery->u.back != NULL) {
1578 cap->r.rCurrentNursery->u.back->link = bd;
1580 #if !defined(THREADED_RTS)
1581 ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
1582 g0s0 == cap->r.rNursery);
1584 cap->r.rNursery->blocks = bd;
1586 cap->r.rCurrentNursery->u.back = bd;
1588 // initialise it as a nursery block. We initialise the
1589 // step, gen_no, and flags field of *every* sub-block in
1590 // this large block, because this is easier than making
1591 // sure that we always find the block head of a large
1592 // block whenever we call Bdescr() (eg. evacuate() and
1593 // isAlive() in the GC would both have to do this, at
1597 for (x = bd; x < bd + blocks; x++) {
1598 x->step = cap->r.rNursery;
1604 // This assert can be a killer if the app is doing lots
1605 // of large block allocations.
1606 IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
1608 // now update the nursery to point to the new block
1609 cap->r.rCurrentNursery = bd;
1611 // we might be unlucky and have another thread get on the
1612 // run queue before us and steal the large block, but in that
1613 // case the thread will just end up requesting another large
1615 pushOnRunQueue(cap,t);
1616 return rtsFalse; /* not actually GC'ing */
1620 debugTrace(DEBUG_sched,
1621 "--<< thread %ld (%s) stopped: HeapOverflow\n",
1622 (long)t->id, whatNext_strs[t->what_next]);
1625 ASSERT(!is_on_queue(t,CurrentProc));
1626 #elif defined(PARALLEL_HASKELL)
1627 /* Currently we emit a DESCHEDULE event before GC in GUM.
1628 ToDo: either add separate event to distinguish SYSTEM time from rest
1629 or just nuke this DESCHEDULE (and the following SCHEDULE) */
1630 if (0 && RtsFlags.ParFlags.ParStats.Full) {
1631 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
1632 GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
1633 emitSchedule = rtsTrue;
1637 pushOnRunQueue(cap,t);
1639 /* actual GC is done at the end of the while loop in schedule() */
1642 /* -----------------------------------------------------------------------------
1643 * Handle a thread that returned to the scheduler with ThreadStackOverflow
1644 * -------------------------------------------------------------------------- */
1647 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
1649 debugTrace (DEBUG_sched,
1650 "--<< thread %ld (%s) stopped, StackOverflow",
1651 (long)t->id, whatNext_strs[t->what_next]);
1653 /* just adjust the stack for this thread, then pop it back
1657 /* enlarge the stack */
1658 StgTSO *new_t = threadStackOverflow(cap, t);
1660 /* The TSO attached to this Task may have moved, so update the
1663 if (task->tso == t) {
1666 pushOnRunQueue(cap,new_t);
1670 /* -----------------------------------------------------------------------------
1671 * Handle a thread that returned to the scheduler with ThreadYielding
1672 * -------------------------------------------------------------------------- */
1675 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
1677 // Reset the context switch flag. We don't do this just before
1678 // running the thread, because that would mean we would lose ticks
1679 // during GC, which can lead to unfair scheduling (a thread hogs
1680 // the CPU because the tick always arrives during GC). This way
1681 // penalises threads that do a lot of allocation, but that seems
1682 // better than the alternative.
1685 /* put the thread back on the run queue. Then, if we're ready to
1686 * GC, check whether this is the last task to stop. If so, wake
1687 * up the GC thread. getThread will block during a GC until the
1691 if (t->what_next != prev_what_next) {
1692 debugTrace(DEBUG_sched,
1693 "--<< thread %ld (%s) stopped to switch evaluators",
1694 (long)t->id, whatNext_strs[t->what_next]);
1696 debugTrace(DEBUG_sched,
1697 "--<< thread %ld (%s) stopped, yielding",
1698 (long)t->id, whatNext_strs[t->what_next]);
1703 //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
1705 ASSERT(t->link == END_TSO_QUEUE);
1707 // Shortcut if we're just switching evaluators: don't bother
1708 // doing stack squeezing (which can be expensive), just run the
1710 if (t->what_next != prev_what_next) {
1715 ASSERT(!is_on_queue(t,CurrentProc));
1718 //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
1719 checkThreadQsSanity(rtsTrue));
1723 addToRunQueue(cap,t);
1726 /* add a ContinueThread event to actually process the thread */
1727 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1729 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1731 debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
1738 /* -----------------------------------------------------------------------------
1739 * Handle a thread that returned to the scheduler with ThreadBlocked
1740 * -------------------------------------------------------------------------- */
1743 scheduleHandleThreadBlocked( StgTSO *t
1744 #if !defined(GRAN) && !defined(DEBUG)
1751 debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n",
1752 t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1753 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1755 // ??? needed; should emit block before
1757 DumpGranEvent(GR_DESCHEDULE, t));
1758 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1761 ASSERT(procStatus[CurrentProc]==Busy ||
1762 ((procStatus[CurrentProc]==Fetching) &&
1763 (t->block_info.closure!=(StgClosure*)NULL)));
1764 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1765 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1766 procStatus[CurrentProc]==Fetching))
1767 procStatus[CurrentProc] = Idle;
1771 debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n",
1772 t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
1775 if (t->block_info.closure!=(StgClosure*)NULL)
1776 print_bq(t->block_info.closure));
1778 /* Send a fetch (if BlockedOnGA) and dump event to log file */
1781 /* whatever we schedule next, we must log that schedule */
1782 emitSchedule = rtsTrue;
1786 // We don't need to do anything. The thread is blocked, and it
1787 // has tidied up its stack and placed itself on whatever queue
1788 // it needs to be on.
1790 // ASSERT(t->why_blocked != NotBlocked);
1791 // Not true: for example,
1792 // - in THREADED_RTS, the thread may already have been woken
1793 // up by another Capability. This actually happens: try
1794 // conc023 +RTS -N2.
1795 // - the thread may have woken itself up already, because
1796 // threadPaused() might have raised a blocked throwTo
1797 // exception, see maybePerformBlockedException().
1800 if (traceClass(DEBUG_sched)) {
1801 debugTraceBegin("--<< thread %lu (%s) stopped: ",
1802 (unsigned long)t->id, whatNext_strs[t->what_next]);
1803 printThreadBlockage(t);
1808 /* Only for dumping event to log file
1809 ToDo: do I need this in GranSim, too?
1815 /* -----------------------------------------------------------------------------
1816 * Handle a thread that returned to the scheduler with ThreadFinished
1817 * -------------------------------------------------------------------------- */
1820 scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
1822 /* Need to check whether this was a main thread, and if so,
1823 * return with the return value.
1825 * We also end up here if the thread kills itself with an
1826 * uncaught exception, see Exception.cmm.
1828 debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished",
1829 (unsigned long)t->id, whatNext_strs[t->what_next]);
1832 endThread(t, CurrentProc); // clean-up the thread
1833 #elif defined(PARALLEL_HASKELL)
1834 /* For now all are advisory -- HWL */
1835 //if(t->priority==AdvisoryPriority) ??
1836 advisory_thread_count--; // JB: Caution with this counter, buggy!
1839 if(t->dist.priority==RevalPriority)
1843 # if defined(EDENOLD)
1844 // the thread could still have an outport... (BUG)
1845 if (t->eden.outport != -1) {
1846 // delete the outport for the tso which has finished...
1847 IF_PAR_DEBUG(eden_ports,
1848 debugBelch("WARNING: Scheduler removes outport %d for TSO %d.\n",
1849 t->eden.outport, t->id));
1852 // thread still in the process (HEAVY BUG! since outport has just been closed...)
1853 if (t->eden.epid != -1) {
1854 IF_PAR_DEBUG(eden_ports,
1855 debugBelch("WARNING: Scheduler removes TSO %d from process %d .\n",
1856 t->id, t->eden.epid));
1857 removeTSOfromProcess(t);
1862 if (RtsFlags.ParFlags.ParStats.Full &&
1863 !RtsFlags.ParFlags.ParStats.Suppressed)
1864 DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
1866 // t->par only contains statistics: left out for now...
1868 debugBelch("**** end thread: ended sparked thread %d (%lx); sparkname: %lx\n",
1869 t->id,t,t->par.sparkname));
1871 #endif // PARALLEL_HASKELL
1874 // Check whether the thread that just completed was a bound
1875 // thread, and if so return with the result.
1877 // There is an assumption here that all thread completion goes
1878 // through this point; we need to make sure that if a thread
1879 // ends up in the ThreadKilled state, that it stays on the run
1880 // queue so it can be dealt with here.
1885 if (t->bound != task) {
1886 #if !defined(THREADED_RTS)
1887 // Must be a bound thread that is not the topmost one. Leave
1888 // it on the run queue until the stack has unwound to the
1889 // point where we can deal with this. Leaving it on the run
1890 // queue also ensures that the garbage collector knows about
1891 // this thread and its return value (it gets dropped from the
1892 // all_threads list so there's no other way to find it).
1893 appendToRunQueue(cap,t);
1896 // this cannot happen in the threaded RTS, because a
1897 // bound thread can only be run by the appropriate Task.
1898 barf("finished bound thread that isn't mine");
1902 ASSERT(task->tso == t);
1904 if (t->what_next == ThreadComplete) {
1906 // NOTE: return val is tso->sp[1] (see StgStartup.hc)
1907 *(task->ret) = (StgClosure *)task->tso->sp[1];
1909 task->stat = Success;
1912 *(task->ret) = NULL;
1914 if (sched_state >= SCHED_INTERRUPTING) {
1915 task->stat = Interrupted;
1917 task->stat = Killed;
1921 removeThreadLabel((StgWord)task->tso->id);
1923 return rtsTrue; // tells schedule() to return
1929 /* -----------------------------------------------------------------------------
1930 * Perform a heap census
1931 * -------------------------------------------------------------------------- */
1934 scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
1936 // When we have +RTS -i0 and we're heap profiling, do a census at
1937 // every GC. This lets us get repeatable runs for debugging.
1938 if (performHeapProfile ||
1939 (RtsFlags.ProfFlags.profileInterval==0 &&
1940 RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
1947 /* -----------------------------------------------------------------------------
1948 * Perform a garbage collection if necessary
1949 * -------------------------------------------------------------------------- */
1952 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
1955 rtsBool heap_census;
1957 static volatile StgWord waiting_for_gc;
1958 rtsBool was_waiting;
1963 // In order to GC, there must be no threads running Haskell code.
1964 // Therefore, the GC thread needs to hold *all* the capabilities,
1965 // and release them after the GC has completed.
1967 // This seems to be the simplest way: previous attempts involved
1968 // making all the threads with capabilities give up their
1969 // capabilities and sleep except for the *last* one, which
1970 // actually did the GC. But it's quite hard to arrange for all
1971 // the other tasks to sleep and stay asleep.
1974 was_waiting = cas(&waiting_for_gc, 0, 1);
1977 debugTrace(DEBUG_sched, "someone else is trying to GC...");
1978 if (cap) yieldCapability(&cap,task);
1979 } while (waiting_for_gc);
1980 return cap; // NOTE: task->cap might have changed here
1983 for (i=0; i < n_capabilities; i++) {
1984 debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
1985 if (cap != &capabilities[i]) {
1986 Capability *pcap = &capabilities[i];
1987 // we better hope this task doesn't get migrated to
1988 // another Capability while we're waiting for this one.
1989 // It won't, because load balancing happens while we have
1990 // all the Capabilities, but even so it's a slightly
1991 // unsavoury invariant.
1994 waitForReturnCapability(&pcap, task);
1995 if (pcap != &capabilities[i]) {
1996 barf("scheduleDoGC: got the wrong capability");
2001 waiting_for_gc = rtsFalse;
2004 /* Kick any transactions which are invalid back to their
2005 * atomically frames. When next scheduled they will try to
2006 * commit, this commit will fail and they will retry.
2011 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
2012 if (t->what_next == ThreadRelocated) {
2015 next = t->global_link;
2017 // This is a good place to check for blocked
2018 // exceptions. It might be the case that a thread is
2019 // blocked on delivering an exception to a thread that
2020 // is also blocked - we try to ensure that this
2021 // doesn't happen in throwTo(), but it's too hard (or
2022 // impossible) to close all the race holes, so we
2023 // accept that some might get through and deal with
2024 // them here. A GC will always happen at some point,
2025 // even if the system is otherwise deadlocked.
2026 maybePerformBlockedException (&capabilities[0], t);
2028 if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
2029 if (!stmValidateNestOfTransactions (t -> trec)) {
2030 debugTrace(DEBUG_sched | DEBUG_stm,
2031 "trec %p found wasting its time", t);
2033 // strip the stack back to the
2034 // ATOMICALLY_FRAME, aborting the (nested)
2035 // transaction, and saving the stack of any
2036 // partially-evaluated thunks on the heap.
2037 throwToSingleThreaded_(&capabilities[0], t,
2038 NULL, rtsTrue, NULL);
2041 ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
2049 // so this happens periodically:
2050 if (cap) scheduleCheckBlackHoles(cap);
2052 IF_DEBUG(scheduler, printAllThreads());
2055 * We now have all the capabilities; if we're in an interrupting
2056 * state, then we should take the opportunity to delete all the
2057 * threads in the system.
2059 if (sched_state >= SCHED_INTERRUPTING) {
2060 deleteAllThreads(&capabilities[0]);
2061 sched_state = SCHED_SHUTTING_DOWN;
2064 heap_census = scheduleNeedHeapProfile(rtsTrue);
2066 /* everybody back, start the GC.
2067 * Could do it in this thread, or signal a condition var
2068 * to do it in another thread. Either way, we need to
2069 * broadcast on gc_pending_cond afterward.
2071 #if defined(THREADED_RTS)
2072 debugTrace(DEBUG_sched, "doing GC");
2074 GarbageCollect(force_major || heap_census);
2077 debugTrace(DEBUG_sched, "performing heap census");
2079 performHeapProfile = rtsFalse;
2082 #if defined(THREADED_RTS)
2083 // release our stash of capabilities.
2084 for (i = 0; i < n_capabilities; i++) {
2085 if (cap != &capabilities[i]) {
2086 task->cap = &capabilities[i];
2087 releaseCapability(&capabilities[i]);
2098 /* add a ContinueThread event to continue execution of current thread */
2099 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
2101 t, (StgClosure*)NULL, (rtsSpark*)NULL);
2103 debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
2111 /* ---------------------------------------------------------------------------
2112 * Singleton fork(). Do not copy any running threads.
2113 * ------------------------------------------------------------------------- */
2116 forkProcess(HsStablePtr *entry
2117 #ifndef FORKPROCESS_PRIMOP_SUPPORTED
2122 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
2128 #if defined(THREADED_RTS)
2129 if (RtsFlags.ParFlags.nNodes > 1) {
2130 errorBelch("forking not supported with +RTS -N<n> greater than 1");
2131 stg_exit(EXIT_FAILURE);
2135 debugTrace(DEBUG_sched, "forking!");
2137 // ToDo: for SMP, we should probably acquire *all* the capabilities
2140 // no funny business: hold locks while we fork, otherwise if some
2141 // other thread is holding a lock when the fork happens, the data
2142 // structure protected by the lock will forever be in an
2143 // inconsistent state in the child. See also #1391.
2144 ACQUIRE_LOCK(&sched_mutex);
2145 ACQUIRE_LOCK(&cap->lock);
2146 ACQUIRE_LOCK(&cap->running_task->lock);
2150 if (pid) { // parent
2152 RELEASE_LOCK(&sched_mutex);
2153 RELEASE_LOCK(&cap->lock);
2154 RELEASE_LOCK(&cap->running_task->lock);
2156 // just return the pid
2162 #if defined(THREADED_RTS)
2163 initMutex(&sched_mutex);
2164 initMutex(&cap->lock);
2165 initMutex(&cap->running_task->lock);
2168 // Now, all OS threads except the thread that forked are
2169 // stopped. We need to stop all Haskell threads, including
2170 // those involved in foreign calls. Also we need to delete
2171 // all Tasks, because they correspond to OS threads that are
2174 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
2175 if (t->what_next == ThreadRelocated) {
2178 next = t->global_link;
2179 // don't allow threads to catch the ThreadKilled
2180 // exception, but we do want to raiseAsync() because these
2181 // threads may be evaluating thunks that we need later.
2182 deleteThread_(cap,t);
2186 // Empty the run queue. It seems tempting to let all the
2187 // killed threads stay on the run queue as zombies to be
2188 // cleaned up later, but some of them correspond to bound
2189 // threads for which the corresponding Task does not exist.
2190 cap->run_queue_hd = END_TSO_QUEUE;
2191 cap->run_queue_tl = END_TSO_QUEUE;
2193 // Any suspended C-calling Tasks are no more, their OS threads
2195 cap->suspended_ccalling_tasks = NULL;
2197 // Empty the all_threads list. Otherwise, the garbage
2198 // collector may attempt to resurrect some of these threads.
2199 all_threads = END_TSO_QUEUE;
2201 // Wipe the task list, except the current Task.
2202 ACQUIRE_LOCK(&sched_mutex);
2203 for (task = all_tasks; task != NULL; task=task->all_link) {
2204 if (task != cap->running_task) {
2208 RELEASE_LOCK(&sched_mutex);
2210 #if defined(THREADED_RTS)
2211 // Wipe our spare workers list, they no longer exist. New
2212 // workers will be created if necessary.
2213 cap->spare_workers = NULL;
2214 cap->returning_tasks_hd = NULL;
2215 cap->returning_tasks_tl = NULL;
2218 // On Unix, all timers are reset in the child, so we need to start
2223 cap = rts_evalStableIO(cap, entry, NULL); // run the action
2224 rts_checkSchedStatus("forkProcess",cap);
2227 hs_exit(); // clean up and exit
2228 stg_exit(EXIT_SUCCESS);
2230 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
2231 barf("forkProcess#: primop not supported on this platform, sorry!\n");
2236 /* ---------------------------------------------------------------------------
2237 * Delete all the threads in the system
2238 * ------------------------------------------------------------------------- */
2241 deleteAllThreads ( Capability *cap )
2243 // NOTE: only safe to call if we own all capabilities.
2246 debugTrace(DEBUG_sched,"deleting all threads");
2247 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
2248 if (t->what_next == ThreadRelocated) {
2251 next = t->global_link;
2252 deleteThread(cap,t);
2256 // The run queue now contains a bunch of ThreadKilled threads. We
2257 // must not throw these away: the main thread(s) will be in there
2258 // somewhere, and the main scheduler loop has to deal with it.
2259 // Also, the run queue is the only thing keeping these threads from
2260 // being GC'd, and we don't want the "main thread has been GC'd" panic.
2262 #if !defined(THREADED_RTS)
2263 ASSERT(blocked_queue_hd == END_TSO_QUEUE);
2264 ASSERT(sleeping_queue == END_TSO_QUEUE);
2268 /* -----------------------------------------------------------------------------
2269 Managing the suspended_ccalling_tasks list.
2270 Locks required: sched_mutex
2271 -------------------------------------------------------------------------- */
2274 suspendTask (Capability *cap, Task *task)
2276 ASSERT(task->next == NULL && task->prev == NULL);
2277 task->next = cap->suspended_ccalling_tasks;
2279 if (cap->suspended_ccalling_tasks) {
2280 cap->suspended_ccalling_tasks->prev = task;
2282 cap->suspended_ccalling_tasks = task;
2286 recoverSuspendedTask (Capability *cap, Task *task)
2289 task->prev->next = task->next;
2291 ASSERT(cap->suspended_ccalling_tasks == task);
2292 cap->suspended_ccalling_tasks = task->next;
2295 task->next->prev = task->prev;
2297 task->next = task->prev = NULL;
2300 /* ---------------------------------------------------------------------------
2301 * Suspending & resuming Haskell threads.
2303 * When making a "safe" call to C (aka _ccall_GC), the task gives back
2304 * its capability before calling the C function. This allows another
2305 * task to pick up the capability and carry on running Haskell
2306 * threads. It also means that if the C call blocks, it won't lock
2309 * The Haskell thread making the C call is put to sleep for the
2310 * duration of the call, on the susepended_ccalling_threads queue. We
2311 * give out a token to the task, which it can use to resume the thread
2312 * on return from the C function.
2313 * ------------------------------------------------------------------------- */
2316 suspendThread (StgRegTable *reg)
2323 StgWord32 saved_winerror;
2326 saved_errno = errno;
2328 saved_winerror = GetLastError();
2331 /* assume that *reg is a pointer to the StgRegTable part of a Capability.
2333 cap = regTableToCapability(reg);
2335 task = cap->running_task;
2336 tso = cap->r.rCurrentTSO;
2338 debugTrace(DEBUG_sched,
2339 "thread %lu did a safe foreign call",
2340 (unsigned long)cap->r.rCurrentTSO->id);
2342 // XXX this might not be necessary --SDM
2343 tso->what_next = ThreadRunGHC;
2345 threadPaused(cap,tso);
2347 if ((tso->flags & TSO_BLOCKEX) == 0) {
2348 tso->why_blocked = BlockedOnCCall;
2349 tso->flags |= TSO_BLOCKEX;
2350 tso->flags &= ~TSO_INTERRUPTIBLE;
2352 tso->why_blocked = BlockedOnCCall_NoUnblockExc;
2355 // Hand back capability
2356 task->suspended_tso = tso;
2358 ACQUIRE_LOCK(&cap->lock);
2360 suspendTask(cap,task);
2361 cap->in_haskell = rtsFalse;
2362 releaseCapability_(cap);
2364 RELEASE_LOCK(&cap->lock);
2366 #if defined(THREADED_RTS)
2367 /* Preparing to leave the RTS, so ensure there's a native thread/task
2368 waiting to take over.
2370 debugTrace(DEBUG_sched, "thread %lu: leaving RTS", (unsigned long)tso->id);
2373 errno = saved_errno;
2375 SetLastError(saved_winerror);
2381 resumeThread (void *task_)
2388 StgWord32 saved_winerror;
2391 saved_errno = errno;
2393 saved_winerror = GetLastError();
2397 // Wait for permission to re-enter the RTS with the result.
2398 waitForReturnCapability(&cap,task);
2399 // we might be on a different capability now... but if so, our
2400 // entry on the suspended_ccalling_tasks list will also have been
2403 // Remove the thread from the suspended list
2404 recoverSuspendedTask(cap,task);
2406 tso = task->suspended_tso;
2407 task->suspended_tso = NULL;
2408 tso->link = END_TSO_QUEUE;
2409 debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
2411 if (tso->why_blocked == BlockedOnCCall) {
2412 awakenBlockedExceptionQueue(cap,tso);
2413 tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
2416 /* Reset blocking status */
2417 tso->why_blocked = NotBlocked;
2419 cap->r.rCurrentTSO = tso;
2420 cap->in_haskell = rtsTrue;
2421 errno = saved_errno;
2423 SetLastError(saved_winerror);
2426 /* We might have GC'd, mark the TSO dirty again */
2429 IF_DEBUG(sanity, checkTSO(tso));
2434 /* ---------------------------------------------------------------------------
2437 * scheduleThread puts a thread on the end of the runnable queue.
2438 * This will usually be done immediately after a thread is created.
2439 * The caller of scheduleThread must create the thread using e.g.
2440 * createThread and push an appropriate closure
2441 * on this thread's stack before the scheduler is invoked.
2442 * ------------------------------------------------------------------------ */
2445 scheduleThread(Capability *cap, StgTSO *tso)
2447 // The thread goes at the *end* of the run-queue, to avoid possible
2448 // starvation of any threads already on the queue.
2449 appendToRunQueue(cap,tso);
2453 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
2455 #if defined(THREADED_RTS)
2456 tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
2457 // move this thread from now on.
2458 cpu %= RtsFlags.ParFlags.nNodes;
2459 if (cpu == cap->no) {
2460 appendToRunQueue(cap,tso);
2462 migrateThreadToCapability_lock(&capabilities[cpu],tso);
2465 appendToRunQueue(cap,tso);
2470 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
2474 // We already created/initialised the Task
2475 task = cap->running_task;
2477 // This TSO is now a bound thread; make the Task and TSO
2478 // point to each other.
2484 task->stat = NoStatus;
2486 appendToRunQueue(cap,tso);
2488 debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
2491 /* GranSim specific init */
2492 CurrentTSO = m->tso; // the TSO to run
2493 procStatus[MainProc] = Busy; // status of main PE
2494 CurrentProc = MainProc; // PE to run it on
2497 cap = schedule(cap,task);
2499 ASSERT(task->stat != NoStatus);
2500 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
2502 debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
2506 /* ----------------------------------------------------------------------------
2508 * ------------------------------------------------------------------------- */
2510 #if defined(THREADED_RTS)
2512 workerStart(Task *task)
2516 // See startWorkerTask().
2517 ACQUIRE_LOCK(&task->lock);
2519 RELEASE_LOCK(&task->lock);
2521 // set the thread-local pointer to the Task:
2524 // schedule() runs without a lock.
2525 cap = schedule(cap,task);
2527 // On exit from schedule(), we have a Capability.
2528 releaseCapability(cap);
2529 workerTaskStop(task);
2533 /* ---------------------------------------------------------------------------
2536 * Initialise the scheduler. This resets all the queues - if the
2537 * queues contained any threads, they'll be garbage collected at the
2540 * ------------------------------------------------------------------------ */
2547 for (i=0; i<=MAX_PROC; i++) {
2548 run_queue_hds[i] = END_TSO_QUEUE;
2549 run_queue_tls[i] = END_TSO_QUEUE;
2550 blocked_queue_hds[i] = END_TSO_QUEUE;
2551 blocked_queue_tls[i] = END_TSO_QUEUE;
2552 ccalling_threadss[i] = END_TSO_QUEUE;
2553 blackhole_queue[i] = END_TSO_QUEUE;
2554 sleeping_queue = END_TSO_QUEUE;
2556 #elif !defined(THREADED_RTS)
2557 blocked_queue_hd = END_TSO_QUEUE;
2558 blocked_queue_tl = END_TSO_QUEUE;
2559 sleeping_queue = END_TSO_QUEUE;
2562 blackhole_queue = END_TSO_QUEUE;
2563 all_threads = END_TSO_QUEUE;
2566 sched_state = SCHED_RUNNING;
2567 recent_activity = ACTIVITY_YES;
2569 #if defined(THREADED_RTS)
2570 /* Initialise the mutex and condition variables used by
2572 initMutex(&sched_mutex);
2575 ACQUIRE_LOCK(&sched_mutex);
2577 /* A capability holds the state a native thread needs in
2578 * order to execute STG code. At least one capability is
2579 * floating around (only THREADED_RTS builds have more than one).
2585 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
2589 #if defined(THREADED_RTS)
2591 * Eagerly start one worker to run each Capability, except for
2592 * Capability 0. The idea is that we're probably going to start a
2593 * bound thread on Capability 0 pretty soon, so we don't want a
2594 * worker task hogging it.
2599 for (i = 1; i < n_capabilities; i++) {
2600 cap = &capabilities[i];
2601 ACQUIRE_LOCK(&cap->lock);
2602 startWorkerTask(cap, workerStart);
2603 RELEASE_LOCK(&cap->lock);
2608 trace(TRACE_sched, "start: %d capabilities", n_capabilities);
2610 RELEASE_LOCK(&sched_mutex);
2615 rtsBool wait_foreign
2616 #if !defined(THREADED_RTS)
2617 __attribute__((unused))
2620 /* see Capability.c, shutdownCapability() */
2624 #if defined(THREADED_RTS)
2625 ACQUIRE_LOCK(&sched_mutex);
2626 task = newBoundTask();
2627 RELEASE_LOCK(&sched_mutex);
2630 // If we haven't killed all the threads yet, do it now.
2631 if (sched_state < SCHED_SHUTTING_DOWN) {
2632 sched_state = SCHED_INTERRUPTING;
2633 scheduleDoGC(NULL,task,rtsFalse);
2635 sched_state = SCHED_SHUTTING_DOWN;
2637 #if defined(THREADED_RTS)
2641 for (i = 0; i < n_capabilities; i++) {
2642 shutdownCapability(&capabilities[i], task, wait_foreign);
2644 boundTaskExiting(task);
2648 freeCapability(&MainCapability);
2653 freeScheduler( void )
2656 if (n_capabilities != 1) {
2657 stgFree(capabilities);
2659 #if defined(THREADED_RTS)
2660 closeMutex(&sched_mutex);
2664 /* ---------------------------------------------------------------------------
2665 Where are the roots that we know about?
2667 - all the threads on the runnable queue
2668 - all the threads on the blocked queue
2669 - all the threads on the sleeping queue
2670 - all the thread currently executing a _ccall_GC
2671 - all the "main threads"
2673 ------------------------------------------------------------------------ */
2675 /* This has to be protected either by the scheduler monitor, or by the
2676 garbage collection monitor (probably the latter).
2681 GetRoots( evac_fn evac )
2688 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
2689 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
2690 evac((StgClosure **)&run_queue_hds[i]);
2691 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
2692 evac((StgClosure **)&run_queue_tls[i]);
2694 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
2695 evac((StgClosure **)&blocked_queue_hds[i]);
2696 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
2697 evac((StgClosure **)&blocked_queue_tls[i]);
2698 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
2699 evac((StgClosure **)&ccalling_threads[i]);
2706 for (i = 0; i < n_capabilities; i++) {
2707 cap = &capabilities[i];
2708 evac((StgClosure **)(void *)&cap->run_queue_hd);
2709 evac((StgClosure **)(void *)&cap->run_queue_tl);
2710 #if defined(THREADED_RTS)
2711 evac((StgClosure **)(void *)&cap->wakeup_queue_hd);
2712 evac((StgClosure **)(void *)&cap->wakeup_queue_tl);
2714 for (task = cap->suspended_ccalling_tasks; task != NULL;
2716 debugTrace(DEBUG_sched,
2717 "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
2718 evac((StgClosure **)(void *)&task->suspended_tso);
2724 #if !defined(THREADED_RTS)
2725 evac((StgClosure **)(void *)&blocked_queue_hd);
2726 evac((StgClosure **)(void *)&blocked_queue_tl);
2727 evac((StgClosure **)(void *)&sleeping_queue);
2731 // evac((StgClosure **)&blackhole_queue);
2733 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
2734 markSparkQueue(evac);
2737 #if defined(RTS_USER_SIGNALS)
2738 // mark the signal handlers (signals should be already blocked)
2739 if (RtsFlags.MiscFlags.install_signal_handlers) {
2740 markSignalHandlers(evac);
2745 /* -----------------------------------------------------------------------------
2748 This is the interface to the garbage collector from Haskell land.
2749 We provide this so that external C code can allocate and garbage
2750 collect when called from Haskell via _ccall_GC.
2751 -------------------------------------------------------------------------- */
2754 performGC_(rtsBool force_major)
2757 // We must grab a new Task here, because the existing Task may be
2758 // associated with a particular Capability, and chained onto the
2759 // suspended_ccalling_tasks queue.
2760 ACQUIRE_LOCK(&sched_mutex);
2761 task = newBoundTask();
2762 RELEASE_LOCK(&sched_mutex);
2763 scheduleDoGC(NULL,task,force_major);
2764 boundTaskExiting(task);
2770 performGC_(rtsFalse);
2774 performMajorGC(void)
2776 performGC_(rtsTrue);
2779 /* -----------------------------------------------------------------------------
2782 If the thread has reached its maximum stack size, then raise the
2783 StackOverflow exception in the offending thread. Otherwise
2784 relocate the TSO into a larger chunk of memory and adjust its stack
2786 -------------------------------------------------------------------------- */
2789 threadStackOverflow(Capability *cap, StgTSO *tso)
2791 nat new_stack_size, stack_words;
2796 IF_DEBUG(sanity,checkTSO(tso));
2798 // don't allow throwTo() to modify the blocked_exceptions queue
2799 // while we are moving the TSO:
2800 lockClosure((StgClosure *)tso);
2802 if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
2803 // NB. never raise a StackOverflow exception if the thread is
2804 // inside Control.Exceptino.block. It is impractical to protect
2805 // against stack overflow exceptions, since virtually anything
2806 // can raise one (even 'catch'), so this is the only sensible
2807 // thing to do here. See bug #767.
2809 debugTrace(DEBUG_gc,
2810 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
2811 (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
2813 /* If we're debugging, just print out the top of the stack */
2814 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2817 // Send this thread the StackOverflow exception
2819 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
2823 /* Try to double the current stack size. If that takes us over the
2824 * maximum stack size for this thread, then use the maximum instead.
2825 * Finally round up so the TSO ends up as a whole number of blocks.
2827 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2828 new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2829 TSO_STRUCT_SIZE)/sizeof(W_);
2830 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2831 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2833 debugTrace(DEBUG_sched,
2834 "increasing stack size from %ld words to %d.",
2835 (long)tso->stack_size, new_stack_size);
2837 dest = (StgTSO *)allocate(new_tso_size);
2838 TICK_ALLOC_TSO(new_stack_size,0);
2840 /* copy the TSO block and the old stack into the new area */
2841 memcpy(dest,tso,TSO_STRUCT_SIZE);
2842 stack_words = tso->stack + tso->stack_size - tso->sp;
2843 new_sp = (P_)dest + new_tso_size - stack_words;
2844 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2846 /* relocate the stack pointers... */
2848 dest->stack_size = new_stack_size;
2850 /* Mark the old TSO as relocated. We have to check for relocated
2851 * TSOs in the garbage collector and any primops that deal with TSOs.
2853 * It's important to set the sp value to just beyond the end
2854 * of the stack, so we don't attempt to scavenge any part of the
2857 tso->what_next = ThreadRelocated;
2859 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2860 tso->why_blocked = NotBlocked;
2862 IF_PAR_DEBUG(verbose,
2863 debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
2864 tso->id, tso, tso->stack_size);
2865 /* If we're debugging, just print out the top of the stack */
2866 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2872 IF_DEBUG(sanity,checkTSO(dest));
2874 IF_DEBUG(scheduler,printTSO(dest));
2880 /* ---------------------------------------------------------------------------
2882 - usually called inside a signal handler so it mustn't do anything fancy.
2883 ------------------------------------------------------------------------ */
2886 interruptStgRts(void)
2888 sched_state = SCHED_INTERRUPTING;
2893 /* -----------------------------------------------------------------------------
2896 This function causes at least one OS thread to wake up and run the
2897 scheduler loop. It is invoked when the RTS might be deadlocked, or
2898 an external event has arrived that may need servicing (eg. a
2899 keyboard interrupt).
2901 In the single-threaded RTS we don't do anything here; we only have
2902 one thread anyway, and the event that caused us to want to wake up
2903 will have interrupted any blocking system call in progress anyway.
2904 -------------------------------------------------------------------------- */
2909 #if defined(THREADED_RTS)
2910 // This forces the IO Manager thread to wakeup, which will
2911 // in turn ensure that some OS thread wakes up and runs the
2912 // scheduler loop, which will cause a GC and deadlock check.
2917 /* -----------------------------------------------------------------------------
2920 * Check the blackhole_queue for threads that can be woken up. We do
2921 * this periodically: before every GC, and whenever the run queue is
2924 * An elegant solution might be to just wake up all the blocked
2925 * threads with awakenBlockedQueue occasionally: they'll go back to
2926 * sleep again if the object is still a BLACKHOLE. Unfortunately this
2927 * doesn't give us a way to tell whether we've actually managed to
2928 * wake up any threads, so we would be busy-waiting.
2930 * -------------------------------------------------------------------------- */
2933 checkBlackHoles (Capability *cap)
2936 rtsBool any_woke_up = rtsFalse;
2939 // blackhole_queue is global:
2940 ASSERT_LOCK_HELD(&sched_mutex);
2942 debugTrace(DEBUG_sched, "checking threads blocked on black holes");
2944 // ASSUMES: sched_mutex
2945 prev = &blackhole_queue;
2946 t = blackhole_queue;
2947 while (t != END_TSO_QUEUE) {
2948 ASSERT(t->why_blocked == BlockedOnBlackHole);
2949 type = get_itbl(t->block_info.closure)->type;
2950 if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
2951 IF_DEBUG(sanity,checkTSO(t));
2952 t = unblockOne(cap, t);
2953 // urk, the threads migrate to the current capability
2954 // here, but we'd like to keep them on the original one.
2956 any_woke_up = rtsTrue;
2966 /* -----------------------------------------------------------------------------
2969 This is used for interruption (^C) and forking, and corresponds to
2970 raising an exception but without letting the thread catch the
2972 -------------------------------------------------------------------------- */
2975 deleteThread (Capability *cap, StgTSO *tso)
2977 // NOTE: must only be called on a TSO that we have exclusive
2978 // access to, because we will call throwToSingleThreaded() below.
2979 // The TSO must be on the run queue of the Capability we own, or
2980 // we must own all Capabilities.
2982 if (tso->why_blocked != BlockedOnCCall &&
2983 tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
2984 throwToSingleThreaded(cap,tso,NULL);
2988 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
2990 deleteThread_(Capability *cap, StgTSO *tso)
2991 { // for forkProcess only:
2992 // like deleteThread(), but we delete threads in foreign calls, too.
2994 if (tso->why_blocked == BlockedOnCCall ||
2995 tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
2996 unblockOne(cap,tso);
2997 tso->what_next = ThreadKilled;
2999 deleteThread(cap,tso);
3004 /* -----------------------------------------------------------------------------
3005 raiseExceptionHelper
3007 This function is called by the raise# primitve, just so that we can
3008 move some of the tricky bits of raising an exception from C-- into
3009 C. Who knows, it might be a useful re-useable thing here too.
3010 -------------------------------------------------------------------------- */
3013 raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
3015 Capability *cap = regTableToCapability(reg);
3016 StgThunk *raise_closure = NULL;
3018 StgRetInfoTable *info;
3020 // This closure represents the expression 'raise# E' where E
3021 // is the exception raise. It is used to overwrite all the
3022 // thunks which are currently under evaluataion.
3025 // OLD COMMENT (we don't have MIN_UPD_SIZE now):
3026 // LDV profiling: stg_raise_info has THUNK as its closure
3027 // type. Since a THUNK takes at least MIN_UPD_SIZE words in its
3028 // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
3029 // 1 does not cause any problem unless profiling is performed.
3030 // However, when LDV profiling goes on, we need to linearly scan
3031 // small object pool, where raise_closure is stored, so we should
3032 // use MIN_UPD_SIZE.
3034 // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
3035 // sizeofW(StgClosure)+1);
3039 // Walk up the stack, looking for the catch frame. On the way,
3040 // we update any closures pointed to from update frames with the
3041 // raise closure that we just built.
3045 info = get_ret_itbl((StgClosure *)p);
3046 next = p + stack_frame_sizeW((StgClosure *)p);
3047 switch (info->i.type) {
3050 // Only create raise_closure if we need to.
3051 if (raise_closure == NULL) {
3053 (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
3054 SET_HDR(raise_closure, &stg_raise_info, CCCS);
3055 raise_closure->payload[0] = exception;
3057 UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
3061 case ATOMICALLY_FRAME:
3062 debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
3064 return ATOMICALLY_FRAME;
3070 case CATCH_STM_FRAME:
3071 debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
3073 return CATCH_STM_FRAME;
3079 case CATCH_RETRY_FRAME:
3088 /* -----------------------------------------------------------------------------
3089 findRetryFrameHelper
3091 This function is called by the retry# primitive. It traverses the stack
3092 leaving tso->sp referring to the frame which should handle the retry.
3094 This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#)
3095 or should be a ATOMICALLY_FRAME (if the retry# reaches the top level).
3097 We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they
3098 create) because retries are not considered to be exceptions, despite the
3099 similar implementation.
3101 We should not expect to see CATCH_FRAME or STOP_FRAME because those should
3102 not be created within memory transactions.
3103 -------------------------------------------------------------------------- */
3106 findRetryFrameHelper (StgTSO *tso)
3109 StgRetInfoTable *info;
3113 info = get_ret_itbl((StgClosure *)p);
3114 next = p + stack_frame_sizeW((StgClosure *)p);
3115 switch (info->i.type) {
3117 case ATOMICALLY_FRAME:
3118 debugTrace(DEBUG_stm,
3119 "found ATOMICALLY_FRAME at %p during retry", p);
3121 return ATOMICALLY_FRAME;
3123 case CATCH_RETRY_FRAME:
3124 debugTrace(DEBUG_stm,
3125 "found CATCH_RETRY_FRAME at %p during retrry", p);
3127 return CATCH_RETRY_FRAME;
3129 case CATCH_STM_FRAME: {
3130 StgTRecHeader *trec = tso -> trec;
3131 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
3132 debugTrace(DEBUG_stm,
3133 "found CATCH_STM_FRAME at %p during retry", p);
3134 debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
3135 stmAbortTransaction(tso -> cap, trec);
3136 stmFreeAbortedTRec(tso -> cap, trec);
3137 tso -> trec = outer;
3144 ASSERT(info->i.type != CATCH_FRAME);
3145 ASSERT(info->i.type != STOP_FRAME);
3152 /* -----------------------------------------------------------------------------
3153 resurrectThreads is called after garbage collection on the list of
3154 threads found to be garbage. Each of these threads will be woken
3155 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
3156 on an MVar, or NonTermination if the thread was blocked on a Black
3159 Locks: assumes we hold *all* the capabilities.
3160 -------------------------------------------------------------------------- */
3163 resurrectThreads (StgTSO *threads)
3168 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
3169 next = tso->global_link;
3170 tso->global_link = all_threads;
3172 debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
3174 // Wake up the thread on the Capability it was last on
3177 switch (tso->why_blocked) {
3179 case BlockedOnException:
3180 /* Called by GC - sched_mutex lock is currently held. */
3181 throwToSingleThreaded(cap, tso,
3182 (StgClosure *)BlockedOnDeadMVar_closure);
3184 case BlockedOnBlackHole:
3185 throwToSingleThreaded(cap, tso,
3186 (StgClosure *)NonTermination_closure);
3189 throwToSingleThreaded(cap, tso,
3190 (StgClosure *)BlockedIndefinitely_closure);
3193 /* This might happen if the thread was blocked on a black hole
3194 * belonging to a thread that we've just woken up (raiseAsync
3195 * can wake up threads, remember...).
3199 barf("resurrectThreads: thread blocked in a strange way");