1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2006
5 * The scheduler and thread-related functionality
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
10 #define KEEP_LOCKCLOSURE
13 #include "sm/Storage.h"
17 #include "Interpreter.h"
19 #include "RtsSignals.h"
24 #include "ThreadLabels.h"
26 #include "Proftimer.h"
29 #include "eventlog/EventLog.h"
30 #include "sm/GC.h" // waitForGcThreads, releaseGCThreads, N
32 #include "Capability.h"
34 #include "AwaitEvent.h"
35 #if defined(mingw32_HOST_OS)
36 #include "win32/IOManager.h"
39 #include "RaiseAsync.h"
42 #include "ThreadPaused.h"
44 #ifdef HAVE_SYS_TYPES_H
45 #include <sys/types.h>
59 /* -----------------------------------------------------------------------------
61 * -------------------------------------------------------------------------- */
63 #if !defined(THREADED_RTS)
64 // Blocked/sleeping thrads
65 StgTSO *blocked_queue_hd = NULL;
66 StgTSO *blocked_queue_tl = NULL;
67 StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table?
70 /* Threads blocked on blackholes.
71 * LOCK: sched_mutex+capability, or all capabilities
73 StgTSO *blackhole_queue = NULL;
75 /* The blackhole_queue should be checked for threads to wake up. See
76 * Schedule.h for more thorough comment.
77 * LOCK: none (doesn't matter if we miss an update)
79 rtsBool blackholes_need_checking = rtsFalse;
81 /* Set to true when the latest garbage collection failed to reclaim
82 * enough space, and the runtime should proceed to shut itself down in
83 * an orderly fashion (emitting profiling info etc.)
85 rtsBool heap_overflow = rtsFalse;
87 /* flag that tracks whether we have done any execution in this time slice.
88 * LOCK: currently none, perhaps we should lock (but needs to be
89 * updated in the fast path of the scheduler).
91 * NB. must be StgWord, we do xchg() on it.
93 volatile StgWord recent_activity = ACTIVITY_YES;
95 /* if this flag is set as well, give up execution
96 * LOCK: none (changes monotonically)
98 volatile StgWord sched_state = SCHED_RUNNING;
100 /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually
101 * exists - earlier gccs apparently didn't.
107 * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
108 * in an MT setting, needed to signal that a worker thread shouldn't hang around
109 * in the scheduler when it is out of work.
111 rtsBool shutting_down_scheduler = rtsFalse;
114 * This mutex protects most of the global scheduler data in
115 * the THREADED_RTS runtime.
117 #if defined(THREADED_RTS)
121 #if !defined(mingw32_HOST_OS)
122 #define FORKPROCESS_PRIMOP_SUPPORTED
125 /* -----------------------------------------------------------------------------
126 * static function prototypes
127 * -------------------------------------------------------------------------- */
129 static Capability *schedule (Capability *initialCapability, Task *task);
132 // These function all encapsulate parts of the scheduler loop, and are
133 // abstracted only to make the structure and control flow of the
134 // scheduler clearer.
136 static void schedulePreLoop (void);
137 static void scheduleFindWork (Capability *cap);
138 #if defined(THREADED_RTS)
139 static void scheduleYield (Capability **pcap, Task *task, rtsBool);
141 static void scheduleStartSignalHandlers (Capability *cap);
142 static void scheduleCheckBlockedThreads (Capability *cap);
143 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
144 static void scheduleCheckBlackHoles (Capability *cap);
145 static void scheduleDetectDeadlock (Capability *cap, Task *task);
146 static void schedulePushWork(Capability *cap, Task *task);
147 #if defined(THREADED_RTS)
148 static void scheduleActivateSpark(Capability *cap);
150 static void schedulePostRunThread(Capability *cap, StgTSO *t);
151 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
152 static void scheduleHandleStackOverflow( Capability *cap, Task *task,
154 static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
155 nat prev_what_next );
156 static void scheduleHandleThreadBlocked( StgTSO *t );
157 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
159 static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
160 static Capability *scheduleDoGC(Capability *cap, Task *task,
161 rtsBool force_major);
163 static rtsBool checkBlackHoles(Capability *cap);
165 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
166 static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
168 static void deleteThread (Capability *cap, StgTSO *tso);
169 static void deleteAllThreads (Capability *cap);
171 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
172 static void deleteThread_(Capability *cap, StgTSO *tso);
176 static char *whatNext_strs[] = {
178 [ThreadRunGHC] = "ThreadRunGHC",
179 [ThreadInterpret] = "ThreadInterpret",
180 [ThreadKilled] = "ThreadKilled",
181 [ThreadRelocated] = "ThreadRelocated",
182 [ThreadComplete] = "ThreadComplete"
186 /* -----------------------------------------------------------------------------
187 * Putting a thread on the run queue: different scheduling policies
188 * -------------------------------------------------------------------------- */
191 addToRunQueue( Capability *cap, StgTSO *t )
193 // this does round-robin scheduling; good for concurrency
194 appendToRunQueue(cap,t);
197 /* ---------------------------------------------------------------------------
198 Main scheduling loop.
200 We use round-robin scheduling, each thread returning to the
201 scheduler loop when one of these conditions is detected:
204 * timer expires (thread yields)
210 In a GranSim setup this loop iterates over the global event queue.
211 This revolves around the global event queue, which determines what
212 to do next. Therefore, it's more complicated than either the
213 concurrent or the parallel (GUM) setup.
214 This version has been entirely removed (JB 2008/08).
217 GUM iterates over incoming messages.
218 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
219 and sends out a fish whenever it has nothing to do; in-between
220 doing the actual reductions (shared code below) it processes the
221 incoming messages and deals with delayed operations
222 (see PendingFetches).
223 This is not the ugliest code you could imagine, but it's bloody close.
225 (JB 2008/08) This version was formerly indicated by a PP-Flag PAR,
226 now by PP-flag PARALLEL_HASKELL. The Eden RTS (in GHC-6.x) uses it,
227 as well as future GUM versions. This file has been refurbished to
228 only contain valid code, which is however incomplete, refers to
229 invalid includes etc.
231 ------------------------------------------------------------------------ */
234 schedule (Capability *initialCapability, Task *task)
238 StgThreadReturnCode ret;
241 #if defined(THREADED_RTS)
242 rtsBool first = rtsTrue;
244 rtsBool force_yield = rtsFalse;
246 cap = initialCapability;
248 // Pre-condition: this task owns initialCapability.
249 // The sched_mutex is *NOT* held
250 // NB. on return, we still hold a capability.
252 debugTrace (DEBUG_sched,
253 "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
254 task, initialCapability);
258 // -----------------------------------------------------------
259 // Scheduler loop starts here:
263 // Check whether we have re-entered the RTS from Haskell without
264 // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
266 if (cap->in_haskell) {
267 errorBelch("schedule: re-entered unsafely.\n"
268 " Perhaps a 'foreign import unsafe' should be 'safe'?");
269 stg_exit(EXIT_FAILURE);
272 // The interruption / shutdown sequence.
274 // In order to cleanly shut down the runtime, we want to:
275 // * make sure that all main threads return to their callers
276 // with the state 'Interrupted'.
277 // * clean up all OS threads assocated with the runtime
278 // * free all memory etc.
280 // So the sequence for ^C goes like this:
282 // * ^C handler sets sched_state := SCHED_INTERRUPTING and
283 // arranges for some Capability to wake up
285 // * all threads in the system are halted, and the zombies are
286 // placed on the run queue for cleaning up. We acquire all
287 // the capabilities in order to delete the threads, this is
288 // done by scheduleDoGC() for convenience (because GC already
289 // needs to acquire all the capabilities). We can't kill
290 // threads involved in foreign calls.
292 // * somebody calls shutdownHaskell(), which calls exitScheduler()
294 // * sched_state := SCHED_SHUTTING_DOWN
296 // * all workers exit when the run queue on their capability
297 // drains. All main threads will also exit when their TSO
298 // reaches the head of the run queue and they can return.
300 // * eventually all Capabilities will shut down, and the RTS can
303 // * We might be left with threads blocked in foreign calls,
304 // we should really attempt to kill these somehow (TODO);
306 switch (sched_state) {
309 case SCHED_INTERRUPTING:
310 debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
311 #if defined(THREADED_RTS)
312 discardSparksCap(cap);
314 /* scheduleDoGC() deletes all the threads */
315 cap = scheduleDoGC(cap,task,rtsFalse);
317 // after scheduleDoGC(), we must be shutting down. Either some
318 // other Capability did the final GC, or we did it above,
319 // either way we can fall through to the SCHED_SHUTTING_DOWN
321 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
324 case SCHED_SHUTTING_DOWN:
325 debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
326 // If we are a worker, just exit. If we're a bound thread
327 // then we will exit below when we've removed our TSO from
329 if (task->tso == NULL && emptyRunQueue(cap)) {
334 barf("sched_state: %d", sched_state);
337 scheduleFindWork(cap);
339 /* work pushing, currently relevant only for THREADED_RTS:
340 (pushes threads, wakes up idle capabilities for stealing) */
341 schedulePushWork(cap,task);
343 scheduleDetectDeadlock(cap,task);
345 #if defined(THREADED_RTS)
346 cap = task->cap; // reload cap, it might have changed
349 // Normally, the only way we can get here with no threads to
350 // run is if a keyboard interrupt received during
351 // scheduleCheckBlockedThreads() or scheduleDetectDeadlock().
352 // Additionally, it is not fatal for the
353 // threaded RTS to reach here with no threads to run.
355 // win32: might be here due to awaitEvent() being abandoned
356 // as a result of a console event having been delivered.
358 #if defined(THREADED_RTS)
362 // // don't yield the first time, we want a chance to run this
363 // // thread for a bit, even if there are others banging at the
366 // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
370 scheduleYield(&cap,task,force_yield);
371 force_yield = rtsFalse;
373 if (emptyRunQueue(cap)) continue; // look for work again
376 #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
377 if ( emptyRunQueue(cap) ) {
378 ASSERT(sched_state >= SCHED_INTERRUPTING);
383 // Get a thread to run
385 t = popRunQueue(cap);
387 // Sanity check the thread we're about to run. This can be
388 // expensive if there is lots of thread switching going on...
389 IF_DEBUG(sanity,checkTSO(t));
391 #if defined(THREADED_RTS)
392 // Check whether we can run this thread in the current task.
393 // If not, we have to pass our capability to the right task.
395 Task *bound = t->bound;
399 debugTrace(DEBUG_sched,
400 "### Running thread %lu in bound thread", (unsigned long)t->id);
401 // yes, the Haskell thread is bound to the current native thread
403 debugTrace(DEBUG_sched,
404 "### thread %lu bound to another OS thread", (unsigned long)t->id);
405 // no, bound to a different Haskell thread: pass to that thread
406 pushOnRunQueue(cap,t);
410 // The thread we want to run is unbound.
412 debugTrace(DEBUG_sched,
413 "### this OS thread cannot run thread %lu", (unsigned long)t->id);
414 // no, the current native thread is bound to a different
415 // Haskell thread, so pass it to any worker thread
416 pushOnRunQueue(cap,t);
423 // If we're shutting down, and this thread has not yet been
424 // killed, kill it now. This sometimes happens when a finalizer
425 // thread is created by the final GC, or a thread previously
426 // in a foreign call returns.
427 if (sched_state >= SCHED_INTERRUPTING &&
428 !(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
432 /* context switches are initiated by the timer signal, unless
433 * the user specified "context switch as often as possible", with
436 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
437 && !emptyThreadQueues(cap)) {
438 cap->context_switch = 1;
443 // CurrentTSO is the thread to run. t might be different if we
444 // loop back to run_thread, so make sure to set CurrentTSO after
446 cap->r.rCurrentTSO = t;
448 debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
449 (long)t->id, whatNext_strs[t->what_next]);
451 startHeapProfTimer();
453 // Check for exceptions blocked on this thread
454 maybePerformBlockedException (cap, t);
456 // ----------------------------------------------------------------------
457 // Run the current thread
459 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
460 ASSERT(t->cap == cap);
461 ASSERT(t->bound ? t->bound->cap == cap : 1);
463 prev_what_next = t->what_next;
465 errno = t->saved_errno;
467 SetLastError(t->saved_winerror);
470 cap->in_haskell = rtsTrue;
474 #if defined(THREADED_RTS)
475 if (recent_activity == ACTIVITY_DONE_GC) {
476 // ACTIVITY_DONE_GC means we turned off the timer signal to
477 // conserve power (see #1623). Re-enable it here.
479 prev = xchg((P_)&recent_activity, ACTIVITY_YES);
480 if (prev == ACTIVITY_DONE_GC) {
484 recent_activity = ACTIVITY_YES;
488 postEvent(cap, EVENT_RUN_THREAD, t->id, 0);
490 switch (prev_what_next) {
494 /* Thread already finished, return to scheduler. */
495 ret = ThreadFinished;
501 r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
502 cap = regTableToCapability(r);
507 case ThreadInterpret:
508 cap = interpretBCO(cap);
513 barf("schedule: invalid what_next field");
516 cap->in_haskell = rtsFalse;
518 // The TSO might have moved, eg. if it re-entered the RTS and a GC
519 // happened. So find the new location:
520 t = cap->r.rCurrentTSO;
522 // We have run some Haskell code: there might be blackhole-blocked
523 // threads to wake up now.
524 // Lock-free test here should be ok, we're just setting a flag.
525 if ( blackhole_queue != END_TSO_QUEUE ) {
526 blackholes_need_checking = rtsTrue;
529 // And save the current errno in this thread.
530 // XXX: possibly bogus for SMP because this thread might already
531 // be running again, see code below.
532 t->saved_errno = errno;
534 // Similarly for Windows error code
535 t->saved_winerror = GetLastError();
538 postEvent (cap, EVENT_STOP_THREAD, t->id, ret);
540 #if defined(THREADED_RTS)
541 // If ret is ThreadBlocked, and this Task is bound to the TSO that
542 // blocked, we are in limbo - the TSO is now owned by whatever it
543 // is blocked on, and may in fact already have been woken up,
544 // perhaps even on a different Capability. It may be the case
545 // that task->cap != cap. We better yield this Capability
546 // immediately and return to normaility.
547 if (ret == ThreadBlocked) {
548 debugTrace(DEBUG_sched,
549 "--<< thread %lu (%s) stopped: blocked",
550 (unsigned long)t->id, whatNext_strs[t->what_next]);
551 force_yield = rtsTrue;
556 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
557 ASSERT(t->cap == cap);
559 // ----------------------------------------------------------------------
561 // Costs for the scheduler are assigned to CCS_SYSTEM
563 #if defined(PROFILING)
567 schedulePostRunThread(cap,t);
569 if (ret != StackOverflow) {
570 t = threadStackUnderflow(task,t);
573 ready_to_gc = rtsFalse;
577 ready_to_gc = scheduleHandleHeapOverflow(cap,t);
581 scheduleHandleStackOverflow(cap,task,t);
585 if (scheduleHandleYield(cap, t, prev_what_next)) {
586 // shortcut for switching between compiler/interpreter:
592 scheduleHandleThreadBlocked(t);
596 if (scheduleHandleThreadFinished(cap, task, t)) return cap;
597 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
601 barf("schedule: invalid thread return code %d", (int)ret);
604 if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
605 cap = scheduleDoGC(cap,task,rtsFalse);
607 } /* end of while() */
610 /* ----------------------------------------------------------------------------
611 * Setting up the scheduler loop
612 * ------------------------------------------------------------------------- */
615 schedulePreLoop(void)
617 // initialisation for scheduler - what cannot go into initScheduler()
620 /* -----------------------------------------------------------------------------
623 * Search for work to do, and handle messages from elsewhere.
624 * -------------------------------------------------------------------------- */
627 scheduleFindWork (Capability *cap)
629 scheduleStartSignalHandlers(cap);
631 // Only check the black holes here if we've nothing else to do.
632 // During normal execution, the black hole list only gets checked
633 // at GC time, to avoid repeatedly traversing this possibly long
634 // list each time around the scheduler.
635 if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
637 scheduleCheckWakeupThreads(cap);
639 scheduleCheckBlockedThreads(cap);
641 #if defined(THREADED_RTS)
642 if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); }
646 #if defined(THREADED_RTS)
647 STATIC_INLINE rtsBool
648 shouldYieldCapability (Capability *cap, Task *task)
650 // we need to yield this capability to someone else if..
651 // - another thread is initiating a GC
652 // - another Task is returning from a foreign call
653 // - the thread at the head of the run queue cannot be run
654 // by this Task (it is bound to another Task, or it is unbound
655 // and this task it bound).
656 return (waiting_for_gc ||
657 cap->returning_tasks_hd != NULL ||
658 (!emptyRunQueue(cap) && (task->tso == NULL
659 ? cap->run_queue_hd->bound != NULL
660 : cap->run_queue_hd->bound != task)));
663 // This is the single place where a Task goes to sleep. There are
664 // two reasons it might need to sleep:
665 // - there are no threads to run
666 // - we need to yield this Capability to someone else
667 // (see shouldYieldCapability())
669 // Careful: the scheduler loop is quite delicate. Make sure you run
670 // the tests in testsuite/concurrent (all ways) after modifying this,
671 // and also check the benchmarks in nofib/parallel for regressions.
674 scheduleYield (Capability **pcap, Task *task, rtsBool force_yield)
676 Capability *cap = *pcap;
678 // if we have work, and we don't need to give up the Capability, continue.
680 // The force_yield flag is used when a bound thread blocks. This
681 // is a particularly tricky situation: the current Task does not
682 // own the TSO any more, since it is on some queue somewhere, and
683 // might be woken up or manipulated by another thread at any time.
684 // The TSO and Task might be migrated to another Capability.
685 // Certain invariants might be in doubt, such as task->bound->cap
686 // == cap. We have to yield the current Capability immediately,
687 // no messing around.
690 !shouldYieldCapability(cap,task) &&
691 (!emptyRunQueue(cap) ||
692 !emptyWakeupQueue(cap) ||
693 blackholes_need_checking ||
694 sched_state >= SCHED_INTERRUPTING))
697 // otherwise yield (sleep), and keep yielding if necessary.
699 yieldCapability(&cap,task);
701 while (shouldYieldCapability(cap,task));
703 // note there may still be no threads on the run queue at this
704 // point, the caller has to check.
711 /* -----------------------------------------------------------------------------
714 * Push work to other Capabilities if we have some.
715 * -------------------------------------------------------------------------- */
718 schedulePushWork(Capability *cap USED_IF_THREADS,
719 Task *task USED_IF_THREADS)
721 /* following code not for PARALLEL_HASKELL. I kept the call general,
722 future GUM versions might use pushing in a distributed setup */
723 #if defined(THREADED_RTS)
725 Capability *free_caps[n_capabilities], *cap0;
728 // migration can be turned off with +RTS -qg
729 if (!RtsFlags.ParFlags.migrate) return;
731 // Check whether we have more threads on our run queue, or sparks
732 // in our pool, that we could hand to another Capability.
733 if (cap->run_queue_hd == END_TSO_QUEUE) {
734 if (sparkPoolSizeCap(cap) < 2) return;
736 if (cap->run_queue_hd->_link == END_TSO_QUEUE &&
737 sparkPoolSizeCap(cap) < 1) return;
740 // First grab as many free Capabilities as we can.
741 for (i=0, n_free_caps=0; i < n_capabilities; i++) {
742 cap0 = &capabilities[i];
743 if (cap != cap0 && tryGrabCapability(cap0,task)) {
744 if (!emptyRunQueue(cap0) || cap->returning_tasks_hd != NULL) {
745 // it already has some work, we just grabbed it at
746 // the wrong moment. Or maybe it's deadlocked!
747 releaseCapability(cap0);
749 free_caps[n_free_caps++] = cap0;
754 // we now have n_free_caps free capabilities stashed in
755 // free_caps[]. Share our run queue equally with them. This is
756 // probably the simplest thing we could do; improvements we might
757 // want to do include:
759 // - giving high priority to moving relatively new threads, on
760 // the gournds that they haven't had time to build up a
761 // working set in the cache on this CPU/Capability.
763 // - giving low priority to moving long-lived threads
765 if (n_free_caps > 0) {
766 StgTSO *prev, *t, *next;
767 rtsBool pushed_to_all;
769 debugTrace(DEBUG_sched,
770 "cap %d: %s and %d free capabilities, sharing...",
772 (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)?
773 "excess threads on run queue":"sparks to share (>=2)",
777 pushed_to_all = rtsFalse;
779 if (cap->run_queue_hd != END_TSO_QUEUE) {
780 prev = cap->run_queue_hd;
782 prev->_link = END_TSO_QUEUE;
783 for (; t != END_TSO_QUEUE; t = next) {
785 t->_link = END_TSO_QUEUE;
786 if (t->what_next == ThreadRelocated
787 || t->bound == task // don't move my bound thread
788 || tsoLocked(t)) { // don't move a locked thread
789 setTSOLink(cap, prev, t);
791 } else if (i == n_free_caps) {
792 pushed_to_all = rtsTrue;
795 setTSOLink(cap, prev, t);
798 debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
799 appendToRunQueue(free_caps[i],t);
801 postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
803 if (t->bound) { t->bound->cap = free_caps[i]; }
804 t->cap = free_caps[i];
808 cap->run_queue_tl = prev;
812 /* JB I left this code in place, it would work but is not necessary */
814 // If there are some free capabilities that we didn't push any
815 // threads to, then try to push a spark to each one.
816 if (!pushed_to_all) {
818 // i is the next free capability to push to
819 for (; i < n_free_caps; i++) {
820 if (emptySparkPoolCap(free_caps[i])) {
821 spark = tryStealSpark(cap->sparks);
823 debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
825 postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no);
827 newSpark(&(free_caps[i]->r), spark);
832 #endif /* SPARK_PUSHING */
834 // release the capabilities
835 for (i = 0; i < n_free_caps; i++) {
836 task->cap = free_caps[i];
837 releaseAndWakeupCapability(free_caps[i]);
840 task->cap = cap; // reset to point to our Capability.
842 #endif /* THREADED_RTS */
846 /* ----------------------------------------------------------------------------
847 * Start any pending signal handlers
848 * ------------------------------------------------------------------------- */
850 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
852 scheduleStartSignalHandlers(Capability *cap)
854 if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) {
855 // safe outside the lock
856 startSignalHandlers(cap);
861 scheduleStartSignalHandlers(Capability *cap STG_UNUSED)
866 /* ----------------------------------------------------------------------------
867 * Check for blocked threads that can be woken up.
868 * ------------------------------------------------------------------------- */
871 scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
873 #if !defined(THREADED_RTS)
875 // Check whether any waiting threads need to be woken up. If the
876 // run queue is empty, and there are no other tasks running, we
877 // can wait indefinitely for something to happen.
879 if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
881 awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
887 /* ----------------------------------------------------------------------------
888 * Check for threads woken up by other Capabilities
889 * ------------------------------------------------------------------------- */
892 scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
894 #if defined(THREADED_RTS)
895 // Any threads that were woken up by other Capabilities get
896 // appended to our run queue.
897 if (!emptyWakeupQueue(cap)) {
898 ACQUIRE_LOCK(&cap->lock);
899 if (emptyRunQueue(cap)) {
900 cap->run_queue_hd = cap->wakeup_queue_hd;
901 cap->run_queue_tl = cap->wakeup_queue_tl;
903 setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
904 cap->run_queue_tl = cap->wakeup_queue_tl;
906 cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
907 RELEASE_LOCK(&cap->lock);
912 /* ----------------------------------------------------------------------------
913 * Check for threads blocked on BLACKHOLEs that can be woken up
914 * ------------------------------------------------------------------------- */
916 scheduleCheckBlackHoles (Capability *cap)
918 if ( blackholes_need_checking ) // check without the lock first
920 ACQUIRE_LOCK(&sched_mutex);
921 if ( blackholes_need_checking ) {
922 blackholes_need_checking = rtsFalse;
923 // important that we reset the flag *before* checking the
924 // blackhole queue, otherwise we could get deadlock. This
925 // happens as follows: we wake up a thread that
926 // immediately runs on another Capability, blocks on a
927 // blackhole, and then we reset the blackholes_need_checking flag.
928 checkBlackHoles(cap);
930 RELEASE_LOCK(&sched_mutex);
934 /* ----------------------------------------------------------------------------
935 * Detect deadlock conditions and attempt to resolve them.
936 * ------------------------------------------------------------------------- */
939 scheduleDetectDeadlock (Capability *cap, Task *task)
942 * Detect deadlock: when we have no threads to run, there are no
943 * threads blocked, waiting for I/O, or sleeping, and all the
944 * other tasks are waiting for work, we must have a deadlock of
947 if ( emptyThreadQueues(cap) )
949 #if defined(THREADED_RTS)
951 * In the threaded RTS, we only check for deadlock if there
952 * has been no activity in a complete timeslice. This means
953 * we won't eagerly start a full GC just because we don't have
954 * any threads to run currently.
956 if (recent_activity != ACTIVITY_INACTIVE) return;
959 debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
961 // Garbage collection can release some new threads due to
962 // either (a) finalizers or (b) threads resurrected because
963 // they are unreachable and will therefore be sent an
964 // exception. Any threads thus released will be immediately
966 cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
967 // when force_major == rtsTrue. scheduleDoGC sets
968 // recent_activity to ACTIVITY_DONE_GC and turns off the timer
971 if ( !emptyRunQueue(cap) ) return;
973 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
974 /* If we have user-installed signal handlers, then wait
975 * for signals to arrive rather then bombing out with a
978 if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) {
979 debugTrace(DEBUG_sched,
980 "still deadlocked, waiting for signals...");
984 if (signals_pending()) {
985 startSignalHandlers(cap);
988 // either we have threads to run, or we were interrupted:
989 ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
995 #if !defined(THREADED_RTS)
996 /* Probably a real deadlock. Send the current main thread the
997 * Deadlock exception.
1000 switch (task->tso->why_blocked) {
1002 case BlockedOnBlackHole:
1003 case BlockedOnException:
1005 throwToSingleThreaded(cap, task->tso,
1006 (StgClosure *)nonTermination_closure);
1009 barf("deadlock: main thread blocked in a strange way");
1018 /* ----------------------------------------------------------------------------
1019 * Send pending messages (PARALLEL_HASKELL only)
1020 * ------------------------------------------------------------------------- */
1022 #if defined(PARALLEL_HASKELL)
1024 scheduleSendPendingMessages(void)
1027 # if defined(PAR) // global Mem.Mgmt., omit for now
1028 if (PendingFetches != END_BF_QUEUE) {
1033 if (RtsFlags.ParFlags.BufferTime) {
1034 // if we use message buffering, we must send away all message
1035 // packets which have become too old...
1041 /* ----------------------------------------------------------------------------
1042 * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
1043 * ------------------------------------------------------------------------- */
1045 #if defined(THREADED_RTS)
1047 scheduleActivateSpark(Capability *cap)
1051 createSparkThread(cap);
1052 debugTrace(DEBUG_sched, "creating a spark thread");
1055 #endif // PARALLEL_HASKELL || THREADED_RTS
1057 /* ----------------------------------------------------------------------------
1058 * After running a thread...
1059 * ------------------------------------------------------------------------- */
1062 schedulePostRunThread (Capability *cap, StgTSO *t)
1064 // We have to be able to catch transactions that are in an
1065 // infinite loop as a result of seeing an inconsistent view of
1069 // [a,b] <- mapM readTVar [ta,tb]
1070 // when (a == b) loop
1072 // and a is never equal to b given a consistent view of memory.
1074 if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
1075 if (!stmValidateNestOfTransactions (t -> trec)) {
1076 debugTrace(DEBUG_sched | DEBUG_stm,
1077 "trec %p found wasting its time", t);
1079 // strip the stack back to the
1080 // ATOMICALLY_FRAME, aborting the (nested)
1081 // transaction, and saving the stack of any
1082 // partially-evaluated thunks on the heap.
1083 throwToSingleThreaded_(cap, t, NULL, rtsTrue);
1085 ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
1089 /* some statistics gathering in the parallel case */
1092 /* -----------------------------------------------------------------------------
1093 * Handle a thread that returned to the scheduler with ThreadHeepOverflow
1094 * -------------------------------------------------------------------------- */
1097 scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
1099 // did the task ask for a large block?
1100 if (cap->r.rHpAlloc > BLOCK_SIZE) {
1101 // if so, get one and push it on the front of the nursery.
1105 blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
1107 debugTrace(DEBUG_sched,
1108 "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
1109 (long)t->id, whatNext_strs[t->what_next], blocks);
1111 // don't do this if the nursery is (nearly) full, we'll GC first.
1112 if (cap->r.rCurrentNursery->link != NULL ||
1113 cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
1114 // if the nursery has only one block.
1117 bd = allocGroup( blocks );
1119 cap->r.rNursery->n_blocks += blocks;
1121 // link the new group into the list
1122 bd->link = cap->r.rCurrentNursery;
1123 bd->u.back = cap->r.rCurrentNursery->u.back;
1124 if (cap->r.rCurrentNursery->u.back != NULL) {
1125 cap->r.rCurrentNursery->u.back->link = bd;
1127 #if !defined(THREADED_RTS)
1128 ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
1129 g0s0 == cap->r.rNursery);
1131 cap->r.rNursery->blocks = bd;
1133 cap->r.rCurrentNursery->u.back = bd;
1135 // initialise it as a nursery block. We initialise the
1136 // step, gen_no, and flags field of *every* sub-block in
1137 // this large block, because this is easier than making
1138 // sure that we always find the block head of a large
1139 // block whenever we call Bdescr() (eg. evacuate() and
1140 // isAlive() in the GC would both have to do this, at
1144 for (x = bd; x < bd + blocks; x++) {
1145 x->step = cap->r.rNursery;
1151 // This assert can be a killer if the app is doing lots
1152 // of large block allocations.
1153 IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
1155 // now update the nursery to point to the new block
1156 cap->r.rCurrentNursery = bd;
1158 // we might be unlucky and have another thread get on the
1159 // run queue before us and steal the large block, but in that
1160 // case the thread will just end up requesting another large
1162 pushOnRunQueue(cap,t);
1163 return rtsFalse; /* not actually GC'ing */
1167 debugTrace(DEBUG_sched,
1168 "--<< thread %ld (%s) stopped: HeapOverflow",
1169 (long)t->id, whatNext_strs[t->what_next]);
1171 if (cap->r.rHpLim == NULL || cap->context_switch) {
1172 // Sometimes we miss a context switch, e.g. when calling
1173 // primitives in a tight loop, MAYBE_GC() doesn't check the
1174 // context switch flag, and we end up waiting for a GC.
1175 // See #1984, and concurrent/should_run/1984
1176 cap->context_switch = 0;
1177 addToRunQueue(cap,t);
1179 pushOnRunQueue(cap,t);
1182 /* actual GC is done at the end of the while loop in schedule() */
1185 /* -----------------------------------------------------------------------------
1186 * Handle a thread that returned to the scheduler with ThreadStackOverflow
1187 * -------------------------------------------------------------------------- */
1190 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
1192 debugTrace (DEBUG_sched,
1193 "--<< thread %ld (%s) stopped, StackOverflow",
1194 (long)t->id, whatNext_strs[t->what_next]);
1196 /* just adjust the stack for this thread, then pop it back
1200 /* enlarge the stack */
1201 StgTSO *new_t = threadStackOverflow(cap, t);
1203 /* The TSO attached to this Task may have moved, so update the
1206 if (task->tso == t) {
1209 pushOnRunQueue(cap,new_t);
1213 /* -----------------------------------------------------------------------------
1214 * Handle a thread that returned to the scheduler with ThreadYielding
1215 * -------------------------------------------------------------------------- */
1218 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
1220 // Reset the context switch flag. We don't do this just before
1221 // running the thread, because that would mean we would lose ticks
1222 // during GC, which can lead to unfair scheduling (a thread hogs
1223 // the CPU because the tick always arrives during GC). This way
1224 // penalises threads that do a lot of allocation, but that seems
1225 // better than the alternative.
1226 cap->context_switch = 0;
1228 /* put the thread back on the run queue. Then, if we're ready to
1229 * GC, check whether this is the last task to stop. If so, wake
1230 * up the GC thread. getThread will block during a GC until the
1234 if (t->what_next != prev_what_next) {
1235 debugTrace(DEBUG_sched,
1236 "--<< thread %ld (%s) stopped to switch evaluators",
1237 (long)t->id, whatNext_strs[t->what_next]);
1239 debugTrace(DEBUG_sched,
1240 "--<< thread %ld (%s) stopped, yielding",
1241 (long)t->id, whatNext_strs[t->what_next]);
1246 //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
1248 ASSERT(t->_link == END_TSO_QUEUE);
1250 // Shortcut if we're just switching evaluators: don't bother
1251 // doing stack squeezing (which can be expensive), just run the
1253 if (t->what_next != prev_what_next) {
1257 addToRunQueue(cap,t);
1262 /* -----------------------------------------------------------------------------
1263 * Handle a thread that returned to the scheduler with ThreadBlocked
1264 * -------------------------------------------------------------------------- */
1267 scheduleHandleThreadBlocked( StgTSO *t
1274 // We don't need to do anything. The thread is blocked, and it
1275 // has tidied up its stack and placed itself on whatever queue
1276 // it needs to be on.
1278 // ASSERT(t->why_blocked != NotBlocked);
1279 // Not true: for example,
1280 // - in THREADED_RTS, the thread may already have been woken
1281 // up by another Capability. This actually happens: try
1282 // conc023 +RTS -N2.
1283 // - the thread may have woken itself up already, because
1284 // threadPaused() might have raised a blocked throwTo
1285 // exception, see maybePerformBlockedException().
1288 if (traceClass(DEBUG_sched)) {
1289 debugTraceBegin("--<< thread %lu (%s) stopped: ",
1290 (unsigned long)t->id, whatNext_strs[t->what_next]);
1291 printThreadBlockage(t);
1297 /* -----------------------------------------------------------------------------
1298 * Handle a thread that returned to the scheduler with ThreadFinished
1299 * -------------------------------------------------------------------------- */
1302 scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
1304 /* Need to check whether this was a main thread, and if so,
1305 * return with the return value.
1307 * We also end up here if the thread kills itself with an
1308 * uncaught exception, see Exception.cmm.
1310 debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished",
1311 (unsigned long)t->id, whatNext_strs[t->what_next]);
1313 // blocked exceptions can now complete, even if the thread was in
1314 // blocked mode (see #2910). This unconditionally calls
1315 // lockTSO(), which ensures that we don't miss any threads that
1316 // are engaged in throwTo() with this thread as a target.
1317 awakenBlockedExceptionQueue (cap, t);
1320 // Check whether the thread that just completed was a bound
1321 // thread, and if so return with the result.
1323 // There is an assumption here that all thread completion goes
1324 // through this point; we need to make sure that if a thread
1325 // ends up in the ThreadKilled state, that it stays on the run
1326 // queue so it can be dealt with here.
1331 if (t->bound != task) {
1332 #if !defined(THREADED_RTS)
1333 // Must be a bound thread that is not the topmost one. Leave
1334 // it on the run queue until the stack has unwound to the
1335 // point where we can deal with this. Leaving it on the run
1336 // queue also ensures that the garbage collector knows about
1337 // this thread and its return value (it gets dropped from the
1338 // step->threads list so there's no other way to find it).
1339 appendToRunQueue(cap,t);
1342 // this cannot happen in the threaded RTS, because a
1343 // bound thread can only be run by the appropriate Task.
1344 barf("finished bound thread that isn't mine");
1348 ASSERT(task->tso == t);
1350 if (t->what_next == ThreadComplete) {
1352 // NOTE: return val is tso->sp[1] (see StgStartup.hc)
1353 *(task->ret) = (StgClosure *)task->tso->sp[1];
1355 task->stat = Success;
1358 *(task->ret) = NULL;
1360 if (sched_state >= SCHED_INTERRUPTING) {
1361 if (heap_overflow) {
1362 task->stat = HeapExhausted;
1364 task->stat = Interrupted;
1367 task->stat = Killed;
1371 removeThreadLabel((StgWord)task->tso->id);
1373 return rtsTrue; // tells schedule() to return
1379 /* -----------------------------------------------------------------------------
1380 * Perform a heap census
1381 * -------------------------------------------------------------------------- */
1384 scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
1386 // When we have +RTS -i0 and we're heap profiling, do a census at
1387 // every GC. This lets us get repeatable runs for debugging.
1388 if (performHeapProfile ||
1389 (RtsFlags.ProfFlags.profileInterval==0 &&
1390 RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
1397 /* -----------------------------------------------------------------------------
1398 * Perform a garbage collection if necessary
1399 * -------------------------------------------------------------------------- */
1402 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
1404 rtsBool heap_census;
1406 /* extern static volatile StgWord waiting_for_gc;
1407 lives inside capability.c */
1408 rtsBool gc_type, prev_pending_gc;
1412 if (sched_state == SCHED_SHUTTING_DOWN) {
1413 // The final GC has already been done, and the system is
1414 // shutting down. We'll probably deadlock if we try to GC
1420 if (sched_state < SCHED_INTERRUPTING
1421 && RtsFlags.ParFlags.parGcEnabled
1422 && N >= RtsFlags.ParFlags.parGcGen
1423 && ! oldest_gen->steps[0].mark)
1425 gc_type = PENDING_GC_PAR;
1427 gc_type = PENDING_GC_SEQ;
1430 // In order to GC, there must be no threads running Haskell code.
1431 // Therefore, the GC thread needs to hold *all* the capabilities,
1432 // and release them after the GC has completed.
1434 // This seems to be the simplest way: previous attempts involved
1435 // making all the threads with capabilities give up their
1436 // capabilities and sleep except for the *last* one, which
1437 // actually did the GC. But it's quite hard to arrange for all
1438 // the other tasks to sleep and stay asleep.
1441 /* Other capabilities are prevented from running yet more Haskell
1442 threads if waiting_for_gc is set. Tested inside
1443 yieldCapability() and releaseCapability() in Capability.c */
1445 prev_pending_gc = cas(&waiting_for_gc, 0, gc_type);
1446 if (prev_pending_gc) {
1448 debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...",
1451 yieldCapability(&cap,task);
1452 } while (waiting_for_gc);
1453 return cap; // NOTE: task->cap might have changed here
1456 setContextSwitches();
1458 // The final shutdown GC is always single-threaded, because it's
1459 // possible that some of the Capabilities have no worker threads.
1461 if (gc_type == PENDING_GC_SEQ)
1463 postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
1464 // single-threaded GC: grab all the capabilities
1465 for (i=0; i < n_capabilities; i++) {
1466 debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
1467 if (cap != &capabilities[i]) {
1468 Capability *pcap = &capabilities[i];
1469 // we better hope this task doesn't get migrated to
1470 // another Capability while we're waiting for this one.
1471 // It won't, because load balancing happens while we have
1472 // all the Capabilities, but even so it's a slightly
1473 // unsavoury invariant.
1475 waitForReturnCapability(&pcap, task);
1476 if (pcap != &capabilities[i]) {
1477 barf("scheduleDoGC: got the wrong capability");
1484 // multi-threaded GC: make sure all the Capabilities donate one
1486 postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
1487 debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
1489 waitForGcThreads(cap);
1493 // so this happens periodically:
1494 if (cap) scheduleCheckBlackHoles(cap);
1496 IF_DEBUG(scheduler, printAllThreads());
1498 delete_threads_and_gc:
1500 * We now have all the capabilities; if we're in an interrupting
1501 * state, then we should take the opportunity to delete all the
1502 * threads in the system.
1504 if (sched_state == SCHED_INTERRUPTING) {
1505 deleteAllThreads(cap);
1506 sched_state = SCHED_SHUTTING_DOWN;
1509 heap_census = scheduleNeedHeapProfile(rtsTrue);
1511 #if defined(THREADED_RTS)
1512 postEvent(cap, EVENT_GC_START, 0, 0);
1513 debugTrace(DEBUG_sched, "doing GC");
1514 // reset waiting_for_gc *before* GC, so that when the GC threads
1515 // emerge they don't immediately re-enter the GC.
1517 GarbageCollect(force_major || heap_census, gc_type, cap);
1519 GarbageCollect(force_major || heap_census, 0, cap);
1521 postEvent(cap, EVENT_GC_END, 0, 0);
1523 if (recent_activity == ACTIVITY_INACTIVE && force_major)
1525 // We are doing a GC because the system has been idle for a
1526 // timeslice and we need to check for deadlock. Record the
1527 // fact that we've done a GC and turn off the timer signal;
1528 // it will get re-enabled if we run any threads after the GC.
1529 recent_activity = ACTIVITY_DONE_GC;
1534 // the GC might have taken long enough for the timer to set
1535 // recent_activity = ACTIVITY_INACTIVE, but we aren't
1536 // necessarily deadlocked:
1537 recent_activity = ACTIVITY_YES;
1540 #if defined(THREADED_RTS)
1541 if (gc_type == PENDING_GC_PAR)
1543 releaseGCThreads(cap);
1548 debugTrace(DEBUG_sched, "performing heap census");
1550 performHeapProfile = rtsFalse;
1553 if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
1554 // GC set the heap_overflow flag, so we should proceed with
1555 // an orderly shutdown now. Ultimately we want the main
1556 // thread to return to its caller with HeapExhausted, at which
1557 // point the caller should call hs_exit(). The first step is
1558 // to delete all the threads.
1560 // Another way to do this would be to raise an exception in
1561 // the main thread, which we really should do because it gives
1562 // the program a chance to clean up. But how do we find the
1563 // main thread? It should presumably be the same one that
1564 // gets ^C exceptions, but that's all done on the Haskell side
1565 // (GHC.TopHandler).
1566 sched_state = SCHED_INTERRUPTING;
1567 goto delete_threads_and_gc;
1572 Once we are all together... this would be the place to balance all
1573 spark pools. No concurrent stealing or adding of new sparks can
1574 occur. Should be defined in Sparks.c. */
1575 balanceSparkPoolsCaps(n_capabilities, capabilities);
1578 #if defined(THREADED_RTS)
1579 if (gc_type == PENDING_GC_SEQ) {
1580 // release our stash of capabilities.
1581 for (i = 0; i < n_capabilities; i++) {
1582 if (cap != &capabilities[i]) {
1583 task->cap = &capabilities[i];
1584 releaseCapability(&capabilities[i]);
1598 /* ---------------------------------------------------------------------------
1599 * Singleton fork(). Do not copy any running threads.
1600 * ------------------------------------------------------------------------- */
1603 forkProcess(HsStablePtr *entry
1604 #ifndef FORKPROCESS_PRIMOP_SUPPORTED
1609 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
1616 #if defined(THREADED_RTS)
1617 if (RtsFlags.ParFlags.nNodes > 1) {
1618 errorBelch("forking not supported with +RTS -N<n> greater than 1");
1619 stg_exit(EXIT_FAILURE);
1623 debugTrace(DEBUG_sched, "forking!");
1625 // ToDo: for SMP, we should probably acquire *all* the capabilities
1628 // no funny business: hold locks while we fork, otherwise if some
1629 // other thread is holding a lock when the fork happens, the data
1630 // structure protected by the lock will forever be in an
1631 // inconsistent state in the child. See also #1391.
1632 ACQUIRE_LOCK(&sched_mutex);
1633 ACQUIRE_LOCK(&cap->lock);
1634 ACQUIRE_LOCK(&cap->running_task->lock);
1638 if (pid) { // parent
1640 RELEASE_LOCK(&sched_mutex);
1641 RELEASE_LOCK(&cap->lock);
1642 RELEASE_LOCK(&cap->running_task->lock);
1644 // just return the pid
1650 #if defined(THREADED_RTS)
1651 initMutex(&sched_mutex);
1652 initMutex(&cap->lock);
1653 initMutex(&cap->running_task->lock);
1656 // Now, all OS threads except the thread that forked are
1657 // stopped. We need to stop all Haskell threads, including
1658 // those involved in foreign calls. Also we need to delete
1659 // all Tasks, because they correspond to OS threads that are
1662 for (s = 0; s < total_steps; s++) {
1663 for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
1664 if (t->what_next == ThreadRelocated) {
1667 next = t->global_link;
1668 // don't allow threads to catch the ThreadKilled
1669 // exception, but we do want to raiseAsync() because these
1670 // threads may be evaluating thunks that we need later.
1671 deleteThread_(cap,t);
1676 // Empty the run queue. It seems tempting to let all the
1677 // killed threads stay on the run queue as zombies to be
1678 // cleaned up later, but some of them correspond to bound
1679 // threads for which the corresponding Task does not exist.
1680 cap->run_queue_hd = END_TSO_QUEUE;
1681 cap->run_queue_tl = END_TSO_QUEUE;
1683 // Any suspended C-calling Tasks are no more, their OS threads
1685 cap->suspended_ccalling_tasks = NULL;
1687 // Empty the threads lists. Otherwise, the garbage
1688 // collector may attempt to resurrect some of these threads.
1689 for (s = 0; s < total_steps; s++) {
1690 all_steps[s].threads = END_TSO_QUEUE;
1693 // Wipe the task list, except the current Task.
1694 ACQUIRE_LOCK(&sched_mutex);
1695 for (task = all_tasks; task != NULL; task=task->all_link) {
1696 if (task != cap->running_task) {
1697 #if defined(THREADED_RTS)
1698 initMutex(&task->lock); // see #1391
1703 RELEASE_LOCK(&sched_mutex);
1705 #if defined(THREADED_RTS)
1706 // Wipe our spare workers list, they no longer exist. New
1707 // workers will be created if necessary.
1708 cap->spare_workers = NULL;
1709 cap->returning_tasks_hd = NULL;
1710 cap->returning_tasks_tl = NULL;
1713 // On Unix, all timers are reset in the child, so we need to start
1718 cap = rts_evalStableIO(cap, entry, NULL); // run the action
1719 rts_checkSchedStatus("forkProcess",cap);
1722 hs_exit(); // clean up and exit
1723 stg_exit(EXIT_SUCCESS);
1725 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
1726 barf("forkProcess#: primop not supported on this platform, sorry!\n");
1730 /* ---------------------------------------------------------------------------
1731 * Delete all the threads in the system
1732 * ------------------------------------------------------------------------- */
1735 deleteAllThreads ( Capability *cap )
1737 // NOTE: only safe to call if we own all capabilities.
1742 debugTrace(DEBUG_sched,"deleting all threads");
1743 for (s = 0; s < total_steps; s++) {
1744 for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
1745 if (t->what_next == ThreadRelocated) {
1748 next = t->global_link;
1749 deleteThread(cap,t);
1754 // The run queue now contains a bunch of ThreadKilled threads. We
1755 // must not throw these away: the main thread(s) will be in there
1756 // somewhere, and the main scheduler loop has to deal with it.
1757 // Also, the run queue is the only thing keeping these threads from
1758 // being GC'd, and we don't want the "main thread has been GC'd" panic.
1760 #if !defined(THREADED_RTS)
1761 ASSERT(blocked_queue_hd == END_TSO_QUEUE);
1762 ASSERT(sleeping_queue == END_TSO_QUEUE);
1766 /* -----------------------------------------------------------------------------
1767 Managing the suspended_ccalling_tasks list.
1768 Locks required: sched_mutex
1769 -------------------------------------------------------------------------- */
1772 suspendTask (Capability *cap, Task *task)
1774 ASSERT(task->next == NULL && task->prev == NULL);
1775 task->next = cap->suspended_ccalling_tasks;
1777 if (cap->suspended_ccalling_tasks) {
1778 cap->suspended_ccalling_tasks->prev = task;
1780 cap->suspended_ccalling_tasks = task;
1784 recoverSuspendedTask (Capability *cap, Task *task)
1787 task->prev->next = task->next;
1789 ASSERT(cap->suspended_ccalling_tasks == task);
1790 cap->suspended_ccalling_tasks = task->next;
1793 task->next->prev = task->prev;
1795 task->next = task->prev = NULL;
1798 /* ---------------------------------------------------------------------------
1799 * Suspending & resuming Haskell threads.
1801 * When making a "safe" call to C (aka _ccall_GC), the task gives back
1802 * its capability before calling the C function. This allows another
1803 * task to pick up the capability and carry on running Haskell
1804 * threads. It also means that if the C call blocks, it won't lock
1807 * The Haskell thread making the C call is put to sleep for the
1808 * duration of the call, on the susepended_ccalling_threads queue. We
1809 * give out a token to the task, which it can use to resume the thread
1810 * on return from the C function.
1811 * ------------------------------------------------------------------------- */
1814 suspendThread (StgRegTable *reg)
1821 StgWord32 saved_winerror;
1824 saved_errno = errno;
1826 saved_winerror = GetLastError();
1829 /* assume that *reg is a pointer to the StgRegTable part of a Capability.
1831 cap = regTableToCapability(reg);
1833 task = cap->running_task;
1834 tso = cap->r.rCurrentTSO;
1836 postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL);
1837 debugTrace(DEBUG_sched,
1838 "thread %lu did a safe foreign call",
1839 (unsigned long)cap->r.rCurrentTSO->id);
1841 // XXX this might not be necessary --SDM
1842 tso->what_next = ThreadRunGHC;
1844 threadPaused(cap,tso);
1846 if ((tso->flags & TSO_BLOCKEX) == 0) {
1847 tso->why_blocked = BlockedOnCCall;
1848 tso->flags |= TSO_BLOCKEX;
1849 tso->flags &= ~TSO_INTERRUPTIBLE;
1851 tso->why_blocked = BlockedOnCCall_NoUnblockExc;
1854 // Hand back capability
1855 task->suspended_tso = tso;
1857 ACQUIRE_LOCK(&cap->lock);
1859 suspendTask(cap,task);
1860 cap->in_haskell = rtsFalse;
1861 releaseCapability_(cap,rtsFalse);
1863 RELEASE_LOCK(&cap->lock);
1865 #if defined(THREADED_RTS)
1866 /* Preparing to leave the RTS, so ensure there's a native thread/task
1867 waiting to take over.
1869 debugTrace(DEBUG_sched, "thread %lu: leaving RTS", (unsigned long)tso->id);
1872 errno = saved_errno;
1874 SetLastError(saved_winerror);
1880 resumeThread (void *task_)
1887 StgWord32 saved_winerror;
1890 saved_errno = errno;
1892 saved_winerror = GetLastError();
1896 // Wait for permission to re-enter the RTS with the result.
1897 waitForReturnCapability(&cap,task);
1898 // we might be on a different capability now... but if so, our
1899 // entry on the suspended_ccalling_tasks list will also have been
1902 // Remove the thread from the suspended list
1903 recoverSuspendedTask(cap,task);
1905 tso = task->suspended_tso;
1906 task->suspended_tso = NULL;
1907 tso->_link = END_TSO_QUEUE; // no write barrier reqd
1909 postEvent(cap, EVENT_RUN_THREAD, tso->id, 0);
1910 debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
1912 if (tso->why_blocked == BlockedOnCCall) {
1913 // avoid locking the TSO if we don't have to
1914 if (tso->blocked_exceptions != END_TSO_QUEUE) {
1915 awakenBlockedExceptionQueue(cap,tso);
1917 tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
1920 /* Reset blocking status */
1921 tso->why_blocked = NotBlocked;
1923 cap->r.rCurrentTSO = tso;
1924 cap->in_haskell = rtsTrue;
1925 errno = saved_errno;
1927 SetLastError(saved_winerror);
1930 /* We might have GC'd, mark the TSO dirty again */
1933 IF_DEBUG(sanity, checkTSO(tso));
1938 /* ---------------------------------------------------------------------------
1941 * scheduleThread puts a thread on the end of the runnable queue.
1942 * This will usually be done immediately after a thread is created.
1943 * The caller of scheduleThread must create the thread using e.g.
1944 * createThread and push an appropriate closure
1945 * on this thread's stack before the scheduler is invoked.
1946 * ------------------------------------------------------------------------ */
1949 scheduleThread(Capability *cap, StgTSO *tso)
1951 // The thread goes at the *end* of the run-queue, to avoid possible
1952 // starvation of any threads already on the queue.
1953 appendToRunQueue(cap,tso);
1957 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
1959 #if defined(THREADED_RTS)
1960 tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
1961 // move this thread from now on.
1962 cpu %= RtsFlags.ParFlags.nNodes;
1963 if (cpu == cap->no) {
1964 appendToRunQueue(cap,tso);
1966 postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no);
1967 wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
1970 appendToRunQueue(cap,tso);
1975 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
1979 // We already created/initialised the Task
1980 task = cap->running_task;
1982 // This TSO is now a bound thread; make the Task and TSO
1983 // point to each other.
1989 task->stat = NoStatus;
1991 appendToRunQueue(cap,tso);
1993 debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
1995 cap = schedule(cap,task);
1997 ASSERT(task->stat != NoStatus);
1998 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
2000 debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
2004 /* ----------------------------------------------------------------------------
2006 * ------------------------------------------------------------------------- */
2008 #if defined(THREADED_RTS)
2009 void OSThreadProcAttr
2010 workerStart(Task *task)
2014 // See startWorkerTask().
2015 ACQUIRE_LOCK(&task->lock);
2017 RELEASE_LOCK(&task->lock);
2019 if (RtsFlags.ParFlags.setAffinity) {
2020 setThreadAffinity(cap->no, n_capabilities);
2023 // set the thread-local pointer to the Task:
2026 // schedule() runs without a lock.
2027 cap = schedule(cap,task);
2029 // On exit from schedule(), we have a Capability, but possibly not
2030 // the same one we started with.
2032 // During shutdown, the requirement is that after all the
2033 // Capabilities are shut down, all workers that are shutting down
2034 // have finished workerTaskStop(). This is why we hold on to
2035 // cap->lock until we've finished workerTaskStop() below.
2037 // There may be workers still involved in foreign calls; those
2038 // will just block in waitForReturnCapability() because the
2039 // Capability has been shut down.
2041 ACQUIRE_LOCK(&cap->lock);
2042 releaseCapability_(cap,rtsFalse);
2043 workerTaskStop(task);
2044 RELEASE_LOCK(&cap->lock);
2048 /* ---------------------------------------------------------------------------
2051 * Initialise the scheduler. This resets all the queues - if the
2052 * queues contained any threads, they'll be garbage collected at the
2055 * ------------------------------------------------------------------------ */
2060 #if !defined(THREADED_RTS)
2061 blocked_queue_hd = END_TSO_QUEUE;
2062 blocked_queue_tl = END_TSO_QUEUE;
2063 sleeping_queue = END_TSO_QUEUE;
2066 blackhole_queue = END_TSO_QUEUE;
2068 sched_state = SCHED_RUNNING;
2069 recent_activity = ACTIVITY_YES;
2071 #if defined(THREADED_RTS)
2072 /* Initialise the mutex and condition variables used by
2074 initMutex(&sched_mutex);
2077 ACQUIRE_LOCK(&sched_mutex);
2079 /* A capability holds the state a native thread needs in
2080 * order to execute STG code. At least one capability is
2081 * floating around (only THREADED_RTS builds have more than one).
2087 #if defined(THREADED_RTS)
2091 #if defined(THREADED_RTS)
2093 * Eagerly start one worker to run each Capability, except for
2094 * Capability 0. The idea is that we're probably going to start a
2095 * bound thread on Capability 0 pretty soon, so we don't want a
2096 * worker task hogging it.
2101 for (i = 1; i < n_capabilities; i++) {
2102 cap = &capabilities[i];
2103 ACQUIRE_LOCK(&cap->lock);
2104 startWorkerTask(cap, workerStart);
2105 RELEASE_LOCK(&cap->lock);
2110 RELEASE_LOCK(&sched_mutex);
2115 rtsBool wait_foreign
2116 #if !defined(THREADED_RTS)
2117 __attribute__((unused))
2120 /* see Capability.c, shutdownCapability() */
2124 task = newBoundTask();
2126 // If we haven't killed all the threads yet, do it now.
2127 if (sched_state < SCHED_SHUTTING_DOWN) {
2128 sched_state = SCHED_INTERRUPTING;
2129 waitForReturnCapability(&task->cap,task);
2130 scheduleDoGC(task->cap,task,rtsFalse);
2131 releaseCapability(task->cap);
2133 sched_state = SCHED_SHUTTING_DOWN;
2135 #if defined(THREADED_RTS)
2139 for (i = 0; i < n_capabilities; i++) {
2140 shutdownCapability(&capabilities[i], task, wait_foreign);
2142 boundTaskExiting(task);
2148 freeScheduler( void )
2152 ACQUIRE_LOCK(&sched_mutex);
2153 still_running = freeTaskManager();
2154 // We can only free the Capabilities if there are no Tasks still
2155 // running. We might have a Task about to return from a foreign
2156 // call into waitForReturnCapability(), for example (actually,
2157 // this should be the *only* thing that a still-running Task can
2158 // do at this point, and it will block waiting for the
2160 if (still_running == 0) {
2162 if (n_capabilities != 1) {
2163 stgFree(capabilities);
2166 RELEASE_LOCK(&sched_mutex);
2167 #if defined(THREADED_RTS)
2168 closeMutex(&sched_mutex);
2172 /* -----------------------------------------------------------------------------
2175 This is the interface to the garbage collector from Haskell land.
2176 We provide this so that external C code can allocate and garbage
2177 collect when called from Haskell via _ccall_GC.
2178 -------------------------------------------------------------------------- */
2181 performGC_(rtsBool force_major)
2185 // We must grab a new Task here, because the existing Task may be
2186 // associated with a particular Capability, and chained onto the
2187 // suspended_ccalling_tasks queue.
2188 task = newBoundTask();
2190 waitForReturnCapability(&task->cap,task);
2191 scheduleDoGC(task->cap,task,force_major);
2192 releaseCapability(task->cap);
2193 boundTaskExiting(task);
2199 performGC_(rtsFalse);
2203 performMajorGC(void)
2205 performGC_(rtsTrue);
2208 /* -----------------------------------------------------------------------------
2211 If the thread has reached its maximum stack size, then raise the
2212 StackOverflow exception in the offending thread. Otherwise
2213 relocate the TSO into a larger chunk of memory and adjust its stack
2215 -------------------------------------------------------------------------- */
2218 threadStackOverflow(Capability *cap, StgTSO *tso)
2220 nat new_stack_size, stack_words;
2225 IF_DEBUG(sanity,checkTSO(tso));
2227 // don't allow throwTo() to modify the blocked_exceptions queue
2228 // while we are moving the TSO:
2229 lockClosure((StgClosure *)tso);
2231 if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
2232 // NB. never raise a StackOverflow exception if the thread is
2233 // inside Control.Exceptino.block. It is impractical to protect
2234 // against stack overflow exceptions, since virtually anything
2235 // can raise one (even 'catch'), so this is the only sensible
2236 // thing to do here. See bug #767.
2238 debugTrace(DEBUG_gc,
2239 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
2240 (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
2242 /* If we're debugging, just print out the top of the stack */
2243 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2246 // Send this thread the StackOverflow exception
2248 throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
2252 /* Try to double the current stack size. If that takes us over the
2253 * maximum stack size for this thread, then use the maximum instead
2254 * (that is, unless we're already at or over the max size and we
2255 * can't raise the StackOverflow exception (see above), in which
2256 * case just double the size). Finally round up so the TSO ends up as
2257 * a whole number of blocks.
2259 if (tso->stack_size >= tso->max_stack_size) {
2260 new_stack_size = tso->stack_size * 2;
2262 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2264 new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2265 TSO_STRUCT_SIZE)/sizeof(W_);
2266 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2267 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2269 debugTrace(DEBUG_sched,
2270 "increasing stack size from %ld words to %d.",
2271 (long)tso->stack_size, new_stack_size);
2273 dest = (StgTSO *)allocateLocal(cap,new_tso_size);
2274 TICK_ALLOC_TSO(new_stack_size,0);
2276 /* copy the TSO block and the old stack into the new area */
2277 memcpy(dest,tso,TSO_STRUCT_SIZE);
2278 stack_words = tso->stack + tso->stack_size - tso->sp;
2279 new_sp = (P_)dest + new_tso_size - stack_words;
2280 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2282 /* relocate the stack pointers... */
2284 dest->stack_size = new_stack_size;
2286 /* Mark the old TSO as relocated. We have to check for relocated
2287 * TSOs in the garbage collector and any primops that deal with TSOs.
2289 * It's important to set the sp value to just beyond the end
2290 * of the stack, so we don't attempt to scavenge any part of the
2293 tso->what_next = ThreadRelocated;
2294 setTSOLink(cap,tso,dest);
2295 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2296 tso->why_blocked = NotBlocked;
2301 IF_DEBUG(sanity,checkTSO(dest));
2303 IF_DEBUG(scheduler,printTSO(dest));
2310 threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
2312 bdescr *bd, *new_bd;
2313 lnat free_w, tso_size_w;
2316 tso_size_w = tso_sizeW(tso);
2318 if (tso_size_w < MBLOCK_SIZE_W ||
2319 // TSO is less than 2 mblocks (since the first mblock is
2320 // shorter than MBLOCK_SIZE_W)
2321 (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
2322 // or TSO is not a whole number of megablocks (ensuring
2323 // precondition of splitLargeBlock() below)
2324 (tso_size_w <= round_up_to_mblocks(RtsFlags.GcFlags.initialStkSize)) ||
2325 // or TSO is smaller than the minimum stack size (rounded up)
2326 (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4)
2327 // or stack is using more than 1/4 of the available space
2333 // don't allow throwTo() to modify the blocked_exceptions queue
2334 // while we are moving the TSO:
2335 lockClosure((StgClosure *)tso);
2337 // this is the number of words we'll free
2338 free_w = round_to_mblocks(tso_size_w/2);
2340 bd = Bdescr((StgPtr)tso);
2341 new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
2342 bd->free = bd->start + TSO_STRUCT_SIZEW;
2344 new_tso = (StgTSO *)new_bd->start;
2345 memcpy(new_tso,tso,TSO_STRUCT_SIZE);
2346 new_tso->stack_size = new_bd->free - new_tso->stack;
2348 debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
2349 (long)tso->id, tso_size_w, tso_sizeW(new_tso));
2351 tso->what_next = ThreadRelocated;
2352 tso->_link = new_tso; // no write barrier reqd: same generation
2354 // The TSO attached to this Task may have moved, so update the
2356 if (task->tso == tso) {
2357 task->tso = new_tso;
2363 IF_DEBUG(sanity,checkTSO(new_tso));
2368 /* ---------------------------------------------------------------------------
2370 - usually called inside a signal handler so it mustn't do anything fancy.
2371 ------------------------------------------------------------------------ */
2374 interruptStgRts(void)
2376 sched_state = SCHED_INTERRUPTING;
2377 setContextSwitches();
2378 #if defined(THREADED_RTS)
2383 /* -----------------------------------------------------------------------------
2386 This function causes at least one OS thread to wake up and run the
2387 scheduler loop. It is invoked when the RTS might be deadlocked, or
2388 an external event has arrived that may need servicing (eg. a
2389 keyboard interrupt).
2391 In the single-threaded RTS we don't do anything here; we only have
2392 one thread anyway, and the event that caused us to want to wake up
2393 will have interrupted any blocking system call in progress anyway.
2394 -------------------------------------------------------------------------- */
2396 #if defined(THREADED_RTS)
2397 void wakeUpRts(void)
2399 // This forces the IO Manager thread to wakeup, which will
2400 // in turn ensure that some OS thread wakes up and runs the
2401 // scheduler loop, which will cause a GC and deadlock check.
2406 /* -----------------------------------------------------------------------------
2409 * Check the blackhole_queue for threads that can be woken up. We do
2410 * this periodically: before every GC, and whenever the run queue is
2413 * An elegant solution might be to just wake up all the blocked
2414 * threads with awakenBlockedQueue occasionally: they'll go back to
2415 * sleep again if the object is still a BLACKHOLE. Unfortunately this
2416 * doesn't give us a way to tell whether we've actually managed to
2417 * wake up any threads, so we would be busy-waiting.
2419 * -------------------------------------------------------------------------- */
2422 checkBlackHoles (Capability *cap)
2425 rtsBool any_woke_up = rtsFalse;
2428 // blackhole_queue is global:
2429 ASSERT_LOCK_HELD(&sched_mutex);
2431 debugTrace(DEBUG_sched, "checking threads blocked on black holes");
2433 // ASSUMES: sched_mutex
2434 prev = &blackhole_queue;
2435 t = blackhole_queue;
2436 while (t != END_TSO_QUEUE) {
2437 if (t->what_next == ThreadRelocated) {
2441 ASSERT(t->why_blocked == BlockedOnBlackHole);
2442 type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
2443 if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
2444 IF_DEBUG(sanity,checkTSO(t));
2445 t = unblockOne(cap, t);
2447 any_woke_up = rtsTrue;
2457 /* -----------------------------------------------------------------------------
2460 This is used for interruption (^C) and forking, and corresponds to
2461 raising an exception but without letting the thread catch the
2463 -------------------------------------------------------------------------- */
2466 deleteThread (Capability *cap, StgTSO *tso)
2468 // NOTE: must only be called on a TSO that we have exclusive
2469 // access to, because we will call throwToSingleThreaded() below.
2470 // The TSO must be on the run queue of the Capability we own, or
2471 // we must own all Capabilities.
2473 if (tso->why_blocked != BlockedOnCCall &&
2474 tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
2475 throwToSingleThreaded(cap,tso,NULL);
2479 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
2481 deleteThread_(Capability *cap, StgTSO *tso)
2482 { // for forkProcess only:
2483 // like deleteThread(), but we delete threads in foreign calls, too.
2485 if (tso->why_blocked == BlockedOnCCall ||
2486 tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
2487 unblockOne(cap,tso);
2488 tso->what_next = ThreadKilled;
2490 deleteThread(cap,tso);
2495 /* -----------------------------------------------------------------------------
2496 raiseExceptionHelper
2498 This function is called by the raise# primitve, just so that we can
2499 move some of the tricky bits of raising an exception from C-- into
2500 C. Who knows, it might be a useful re-useable thing here too.
2501 -------------------------------------------------------------------------- */
2504 raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
2506 Capability *cap = regTableToCapability(reg);
2507 StgThunk *raise_closure = NULL;
2509 StgRetInfoTable *info;
2511 // This closure represents the expression 'raise# E' where E
2512 // is the exception raise. It is used to overwrite all the
2513 // thunks which are currently under evaluataion.
2516 // OLD COMMENT (we don't have MIN_UPD_SIZE now):
2517 // LDV profiling: stg_raise_info has THUNK as its closure
2518 // type. Since a THUNK takes at least MIN_UPD_SIZE words in its
2519 // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
2520 // 1 does not cause any problem unless profiling is performed.
2521 // However, when LDV profiling goes on, we need to linearly scan
2522 // small object pool, where raise_closure is stored, so we should
2523 // use MIN_UPD_SIZE.
2525 // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
2526 // sizeofW(StgClosure)+1);
2530 // Walk up the stack, looking for the catch frame. On the way,
2531 // we update any closures pointed to from update frames with the
2532 // raise closure that we just built.
2536 info = get_ret_itbl((StgClosure *)p);
2537 next = p + stack_frame_sizeW((StgClosure *)p);
2538 switch (info->i.type) {
2541 // Only create raise_closure if we need to.
2542 if (raise_closure == NULL) {
2544 (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
2545 SET_HDR(raise_closure, &stg_raise_info, CCCS);
2546 raise_closure->payload[0] = exception;
2548 UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
2552 case ATOMICALLY_FRAME:
2553 debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
2555 return ATOMICALLY_FRAME;
2561 case CATCH_STM_FRAME:
2562 debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
2564 return CATCH_STM_FRAME;
2570 case CATCH_RETRY_FRAME:
2579 /* -----------------------------------------------------------------------------
2580 findRetryFrameHelper
2582 This function is called by the retry# primitive. It traverses the stack
2583 leaving tso->sp referring to the frame which should handle the retry.
2585 This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#)
2586 or should be a ATOMICALLY_FRAME (if the retry# reaches the top level).
2588 We skip CATCH_STM_FRAMEs (aborting and rolling back the nested tx that they
2589 create) because retries are not considered to be exceptions, despite the
2590 similar implementation.
2592 We should not expect to see CATCH_FRAME or STOP_FRAME because those should
2593 not be created within memory transactions.
2594 -------------------------------------------------------------------------- */
2597 findRetryFrameHelper (StgTSO *tso)
2600 StgRetInfoTable *info;
2604 info = get_ret_itbl((StgClosure *)p);
2605 next = p + stack_frame_sizeW((StgClosure *)p);
2606 switch (info->i.type) {
2608 case ATOMICALLY_FRAME:
2609 debugTrace(DEBUG_stm,
2610 "found ATOMICALLY_FRAME at %p during retry", p);
2612 return ATOMICALLY_FRAME;
2614 case CATCH_RETRY_FRAME:
2615 debugTrace(DEBUG_stm,
2616 "found CATCH_RETRY_FRAME at %p during retrry", p);
2618 return CATCH_RETRY_FRAME;
2620 case CATCH_STM_FRAME: {
2621 StgTRecHeader *trec = tso -> trec;
2622 StgTRecHeader *outer = stmGetEnclosingTRec(trec);
2623 debugTrace(DEBUG_stm,
2624 "found CATCH_STM_FRAME at %p during retry", p);
2625 debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
2626 stmAbortTransaction(tso -> cap, trec);
2627 stmFreeAbortedTRec(tso -> cap, trec);
2628 tso -> trec = outer;
2635 ASSERT(info->i.type != CATCH_FRAME);
2636 ASSERT(info->i.type != STOP_FRAME);
2643 /* -----------------------------------------------------------------------------
2644 resurrectThreads is called after garbage collection on the list of
2645 threads found to be garbage. Each of these threads will be woken
2646 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
2647 on an MVar, or NonTermination if the thread was blocked on a Black
2650 Locks: assumes we hold *all* the capabilities.
2651 -------------------------------------------------------------------------- */
2654 resurrectThreads (StgTSO *threads)
2660 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2661 next = tso->global_link;
2663 step = Bdescr((P_)tso)->step;
2664 tso->global_link = step->threads;
2665 step->threads = tso;
2667 debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
2669 // Wake up the thread on the Capability it was last on
2672 switch (tso->why_blocked) {
2674 case BlockedOnException:
2675 /* Called by GC - sched_mutex lock is currently held. */
2676 throwToSingleThreaded(cap, tso,
2677 (StgClosure *)blockedOnDeadMVar_closure);
2679 case BlockedOnBlackHole:
2680 throwToSingleThreaded(cap, tso,
2681 (StgClosure *)nonTermination_closure);
2684 throwToSingleThreaded(cap, tso,
2685 (StgClosure *)blockedIndefinitely_closure);
2688 /* This might happen if the thread was blocked on a black hole
2689 * belonging to a thread that we've just woken up (raiseAsync
2690 * can wake up threads, remember...).
2694 barf("resurrectThreads: thread blocked in a strange way");
2699 /* -----------------------------------------------------------------------------
2700 performPendingThrowTos is called after garbage collection, and
2701 passed a list of threads that were found to have pending throwTos
2702 (tso->blocked_exceptions was not empty), and were blocked.
2703 Normally this doesn't happen, because we would deliver the
2704 exception directly if the target thread is blocked, but there are
2705 small windows where it might occur on a multiprocessor (see
2708 NB. we must be holding all the capabilities at this point, just
2709 like resurrectThreads().
2710 -------------------------------------------------------------------------- */
2713 performPendingThrowTos (StgTSO *threads)
2719 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2720 next = tso->global_link;
2722 step = Bdescr((P_)tso)->step;
2723 tso->global_link = step->threads;
2724 step->threads = tso;
2726 debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);
2729 maybePerformBlockedException(cap, tso);