1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2005
5 * The scheduler and thread-related functionality
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
14 #include "BlockAlloc.h"
15 #include "OSThreads.h"
20 #include "StgMiscClosures.h"
21 #include "Interpreter.h"
22 #include "Exception.h"
24 #include "RtsSignals.h"
30 #include "ThreadLabels.h"
31 #include "LdvProfile.h"
34 #include "Proftimer.h"
37 #if defined(GRAN) || defined(PARALLEL_HASKELL)
38 # include "GranSimRts.h"
40 # include "ParallelRts.h"
41 # include "Parallel.h"
42 # include "ParallelDebug.h"
47 #include "Capability.h"
49 #include "AwaitEvent.h"
51 #ifdef HAVE_SYS_TYPES_H
52 #include <sys/types.h>
66 // Turn off inlining when debugging - it obfuscates things
69 # define STATIC_INLINE static
73 #define USED_WHEN_THREADED_RTS
74 #define USED_WHEN_NON_THREADED_RTS STG_UNUSED
76 #define USED_WHEN_THREADED_RTS STG_UNUSED
77 #define USED_WHEN_NON_THREADED_RTS
83 #define USED_WHEN_SMP STG_UNUSED
86 /* -----------------------------------------------------------------------------
88 * -------------------------------------------------------------------------- */
92 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
93 /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */
96 In GranSim we have a runnable and a blocked queue for each processor.
97 In order to minimise code changes new arrays run_queue_hds/tls
98 are created. run_queue_hd is then a short cut (macro) for
99 run_queue_hds[CurrentProc] (see GranSim.h).
102 StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
103 StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
104 StgTSO *ccalling_threadss[MAX_PROC];
105 /* We use the same global list of threads (all_threads) in GranSim as in
106 the std RTS (i.e. we are cheating). However, we don't use this list in
107 the GranSim specific code at the moment (so we are only potentially
112 #if !defined(THREADED_RTS)
113 // Blocked/sleeping thrads
114 StgTSO *blocked_queue_hd = NULL;
115 StgTSO *blocked_queue_tl = NULL;
116 StgTSO *sleeping_queue = NULL; // perhaps replace with a hash table?
119 /* Threads blocked on blackholes.
120 * LOCK: sched_mutex+capability, or all capabilities
122 StgTSO *blackhole_queue = NULL;
125 /* The blackhole_queue should be checked for threads to wake up. See
126 * Schedule.h for more thorough comment.
127 * LOCK: none (doesn't matter if we miss an update)
129 rtsBool blackholes_need_checking = rtsFalse;
131 /* Linked list of all threads.
132 * Used for detecting garbage collected threads.
133 * LOCK: sched_mutex+capability, or all capabilities
135 StgTSO *all_threads = NULL;
137 /* flag set by signal handler to precipitate a context switch
138 * LOCK: none (just an advisory flag)
140 int context_switch = 0;
142 /* flag that tracks whether we have done any execution in this time slice.
143 * LOCK: currently none, perhaps we should lock (but needs to be
144 * updated in the fast path of the scheduler).
146 nat recent_activity = ACTIVITY_YES;
148 /* if this flag is set as well, give up execution
149 * LOCK: none (changes once, from false->true)
151 rtsBool interrupted = rtsFalse;
153 /* Next thread ID to allocate.
156 static StgThreadID next_thread_id = 1;
158 /* The smallest stack size that makes any sense is:
159 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
160 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
161 * + 1 (the closure to enter)
163 * + 1 (spare slot req'd by stg_ap_v_ret)
165 * A thread with this stack will bomb immediately with a stack
166 * overflow, which will increase its stack size.
168 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3)
174 /* This is used in `TSO.h' and gcc 2.96 insists that this variable actually
175 * exists - earlier gccs apparently didn't.
181 * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
182 * in an MT setting, needed to signal that a worker thread shouldn't hang around
183 * in the scheduler when it is out of work.
185 rtsBool shutting_down_scheduler = rtsFalse;
188 * This mutex protects most of the global scheduler data in
189 * the THREADED_RTS and (inc. SMP) runtime.
191 #if defined(THREADED_RTS)
192 Mutex sched_mutex = INIT_MUTEX_VAR;
195 #if defined(PARALLEL_HASKELL)
197 rtsTime TimeOfLastYield;
198 rtsBool emitSchedule = rtsTrue;
201 /* -----------------------------------------------------------------------------
202 * static function prototypes
203 * -------------------------------------------------------------------------- */
205 static Capability *schedule (Capability *initialCapability, Task *task);
208 // These function all encapsulate parts of the scheduler loop, and are
209 // abstracted only to make the structure and control flow of the
210 // scheduler clearer.
212 static void schedulePreLoop (void);
213 static void scheduleStartSignalHandlers (void);
214 static void scheduleCheckBlockedThreads (Capability *cap);
215 static void scheduleCheckBlackHoles (Capability *cap);
216 static void scheduleDetectDeadlock (Capability *cap, Task *task);
218 static StgTSO *scheduleProcessEvent(rtsEvent *event);
220 #if defined(PARALLEL_HASKELL)
221 static StgTSO *scheduleSendPendingMessages(void);
222 static void scheduleActivateSpark(void);
223 static rtsBool scheduleGetRemoteWork(rtsBool *receivedFinish);
225 #if defined(PAR) || defined(GRAN)
226 static void scheduleGranParReport(void);
228 static void schedulePostRunThread(void);
229 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
230 static void scheduleHandleStackOverflow( Capability *cap, Task *task,
232 static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
233 nat prev_what_next );
234 static void scheduleHandleThreadBlocked( StgTSO *t );
235 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
237 static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
238 static void scheduleDoGC(Capability *cap, Task *task, rtsBool force_major);
240 static void unblockThread(Capability *cap, StgTSO *tso);
241 static rtsBool checkBlackHoles(Capability *cap);
242 static void AllRoots(evac_fn evac);
244 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
246 static void raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
247 rtsBool stop_at_atomically);
249 static void deleteThread (Capability *cap, StgTSO *tso);
250 static void deleteRunQueue (Capability *cap);
253 static void printThreadBlockage(StgTSO *tso);
254 static void printThreadStatus(StgTSO *tso);
255 void printThreadQueue(StgTSO *tso);
258 #if defined(PARALLEL_HASKELL)
259 StgTSO * createSparkThread(rtsSpark spark);
260 StgTSO * activateSpark (rtsSpark spark);
264 static char *whatNext_strs[] = {
274 /* -----------------------------------------------------------------------------
275 * Putting a thread on the run queue: different scheduling policies
276 * -------------------------------------------------------------------------- */
279 addToRunQueue( Capability *cap, StgTSO *t )
281 #if defined(PARALLEL_HASKELL)
282 if (RtsFlags.ParFlags.doFairScheduling) {
283 // this does round-robin scheduling; good for concurrency
284 appendToRunQueue(cap,t);
286 // this does unfair scheduling; good for parallelism
287 pushOnRunQueue(cap,t);
290 // this does round-robin scheduling; good for concurrency
291 appendToRunQueue(cap,t);
295 /* ---------------------------------------------------------------------------
296 Main scheduling loop.
298 We use round-robin scheduling, each thread returning to the
299 scheduler loop when one of these conditions is detected:
302 * timer expires (thread yields)
308 In a GranSim setup this loop iterates over the global event queue.
309 This revolves around the global event queue, which determines what
310 to do next. Therefore, it's more complicated than either the
311 concurrent or the parallel (GUM) setup.
314 GUM iterates over incoming messages.
315 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
316 and sends out a fish whenever it has nothing to do; in-between
317 doing the actual reductions (shared code below) it processes the
318 incoming messages and deals with delayed operations
319 (see PendingFetches).
320 This is not the ugliest code you could imagine, but it's bloody close.
322 ------------------------------------------------------------------------ */
325 schedule (Capability *initialCapability, Task *task)
329 StgThreadReturnCode ret;
332 #elif defined(PARALLEL_HASKELL)
335 rtsBool receivedFinish = rtsFalse;
337 nat tp_size, sp_size; // stats only
342 rtsBool first = rtsTrue;
344 cap = initialCapability;
346 // Pre-condition: this task owns initialCapability.
347 // The sched_mutex is *NOT* held
348 // NB. on return, we still hold a capability.
351 sched_belch("### NEW SCHEDULER LOOP (task: %p, cap: %p)",
352 task, initialCapability);
357 // -----------------------------------------------------------
358 // Scheduler loop starts here:
360 #if defined(PARALLEL_HASKELL)
361 #define TERMINATION_CONDITION (!receivedFinish)
363 #define TERMINATION_CONDITION ((event = get_next_event()) != (rtsEvent*)NULL)
365 #define TERMINATION_CONDITION rtsTrue
368 while (TERMINATION_CONDITION) {
371 /* Choose the processor with the next event */
372 CurrentProc = event->proc;
373 CurrentTSO = event->tso;
376 #if defined(THREADED_RTS)
378 // don't yield the first time, we want a chance to run this
379 // thread for a bit, even if there are others banging at the
383 // Yield the capability to higher-priority tasks if necessary.
384 yieldCapability(&cap, task);
388 ASSERT(cap->running_task == task);
389 ASSERT(task->cap == cap);
390 ASSERT(myTask() == task);
392 // Check whether we have re-entered the RTS from Haskell without
393 // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
395 if (cap->in_haskell) {
396 errorBelch("schedule: re-entered unsafely.\n"
397 " Perhaps a 'foreign import unsafe' should be 'safe'?");
398 stg_exit(EXIT_FAILURE);
402 // Test for interruption. If interrupted==rtsTrue, then either
403 // we received a keyboard interrupt (^C), or the scheduler is
404 // trying to shut down all the tasks (shutting_down_scheduler) in
409 if (shutting_down_scheduler) {
410 IF_DEBUG(scheduler, sched_belch("shutting down"));
411 // If we are a worker, just exit. If we're a bound thread
412 // then we will exit below when we've removed our TSO from
414 if (task->tso == NULL) {
418 IF_DEBUG(scheduler, sched_belch("interrupted"));
422 #if defined(not_yet) && defined(SMP)
424 // Top up the run queue from our spark pool. We try to make the
425 // number of threads in the run queue equal to the number of
426 // free capabilities.
430 if (emptyRunQueue()) {
431 spark = findSpark(rtsFalse);
433 break; /* no more sparks in the pool */
435 createSparkThread(spark);
437 sched_belch("==^^ turning spark of closure %p into a thread",
438 (StgClosure *)spark));
444 scheduleStartSignalHandlers();
446 // Only check the black holes here if we've nothing else to do.
447 // During normal execution, the black hole list only gets checked
448 // at GC time, to avoid repeatedly traversing this possibly long
449 // list each time around the scheduler.
450 if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
452 scheduleCheckBlockedThreads(cap);
454 scheduleDetectDeadlock(cap,task);
456 // Normally, the only way we can get here with no threads to
457 // run is if a keyboard interrupt received during
458 // scheduleCheckBlockedThreads() or scheduleDetectDeadlock().
459 // Additionally, it is not fatal for the
460 // threaded RTS to reach here with no threads to run.
462 // win32: might be here due to awaitEvent() being abandoned
463 // as a result of a console event having been delivered.
464 if ( emptyRunQueue(cap) ) {
465 #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
468 continue; // nothing to do
471 #if defined(PARALLEL_HASKELL)
472 scheduleSendPendingMessages();
473 if (emptyRunQueue(cap) && scheduleActivateSpark())
477 ASSERT(next_fish_to_send_at==0); // i.e. no delayed fishes left!
480 /* If we still have no work we need to send a FISH to get a spark
482 if (emptyRunQueue(cap)) {
483 if (!scheduleGetRemoteWork(&receivedFinish)) continue;
484 ASSERT(rtsFalse); // should not happen at the moment
486 // from here: non-empty run queue.
487 // TODO: merge above case with this, only one call processMessages() !
488 if (PacketsWaiting()) { /* process incoming messages, if
489 any pending... only in else
490 because getRemoteWork waits for
492 receivedFinish = processMessages();
497 scheduleProcessEvent(event);
501 // Get a thread to run
503 t = popRunQueue(cap);
505 #if defined(GRAN) || defined(PAR)
506 scheduleGranParReport(); // some kind of debuging output
508 // Sanity check the thread we're about to run. This can be
509 // expensive if there is lots of thread switching going on...
510 IF_DEBUG(sanity,checkTSO(t));
513 #if defined(THREADED_RTS)
514 // Check whether we can run this thread in the current task.
515 // If not, we have to pass our capability to the right task.
517 Task *bound = t->bound;
522 sched_belch("### Running thread %d in bound thread",
524 // yes, the Haskell thread is bound to the current native thread
527 sched_belch("### thread %d bound to another OS thread",
529 // no, bound to a different Haskell thread: pass to that thread
530 pushOnRunQueue(cap,t);
534 // The thread we want to run is unbound.
537 sched_belch("### this OS thread cannot run thread %d", t->id));
538 // no, the current native thread is bound to a different
539 // Haskell thread, so pass it to any worker thread
540 pushOnRunQueue(cap,t);
547 cap->r.rCurrentTSO = t;
549 /* context switches are initiated by the timer signal, unless
550 * the user specified "context switch as often as possible", with
553 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
554 && !emptyThreadQueues(cap)) {
560 IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...",
561 (long)t->id, whatNext_strs[t->what_next]));
563 #if defined(PROFILING)
564 startHeapProfTimer();
567 // ----------------------------------------------------------------------
568 // Run the current thread
570 prev_what_next = t->what_next;
572 errno = t->saved_errno;
573 cap->in_haskell = rtsTrue;
575 recent_activity = ACTIVITY_YES;
577 switch (prev_what_next) {
581 /* Thread already finished, return to scheduler. */
582 ret = ThreadFinished;
588 r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
589 cap = regTableToCapability(r);
594 case ThreadInterpret:
595 cap = interpretBCO(cap);
600 barf("schedule: invalid what_next field");
603 cap->in_haskell = rtsFalse;
606 // If ret is ThreadBlocked, and this Task is bound to the TSO that
607 // blocked, we are in limbo - the TSO is now owned by whatever it
608 // is blocked on, and may in fact already have been woken up,
609 // perhaps even on a different Capability. It may be the case
610 // that task->cap != cap. We better yield this Capability
611 // immediately and return to normaility.
612 if (ret == ThreadBlocked) continue;
615 ASSERT(cap->running_task == task);
616 ASSERT(task->cap == cap);
617 ASSERT(myTask() == task);
619 // The TSO might have moved, eg. if it re-entered the RTS and a GC
620 // happened. So find the new location:
621 t = cap->r.rCurrentTSO;
623 // And save the current errno in this thread.
624 t->saved_errno = errno;
626 // ----------------------------------------------------------------------
628 // Costs for the scheduler are assigned to CCS_SYSTEM
629 #if defined(PROFILING)
634 // We have run some Haskell code: there might be blackhole-blocked
635 // threads to wake up now.
636 // Lock-free test here should be ok, we're just setting a flag.
637 if ( blackhole_queue != END_TSO_QUEUE ) {
638 blackholes_need_checking = rtsTrue;
641 #if defined(THREADED_RTS)
642 IF_DEBUG(scheduler,debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()););
643 #elif !defined(GRAN) && !defined(PARALLEL_HASKELL)
644 IF_DEBUG(scheduler,debugBelch("sched: "););
647 schedulePostRunThread();
649 ready_to_gc = rtsFalse;
653 ready_to_gc = scheduleHandleHeapOverflow(cap,t);
657 scheduleHandleStackOverflow(cap,task,t);
661 if (scheduleHandleYield(cap, t, prev_what_next)) {
662 // shortcut for switching between compiler/interpreter:
668 scheduleHandleThreadBlocked(t);
672 if (scheduleHandleThreadFinished(cap, task, t)) return cap;
676 barf("schedule: invalid thread return code %d", (int)ret);
679 if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
680 if (ready_to_gc) { scheduleDoGC(cap,task,rtsFalse); }
681 } /* end of while() */
683 IF_PAR_DEBUG(verbose,
684 debugBelch("== Leaving schedule() after having received Finish\n"));
687 /* ----------------------------------------------------------------------------
688 * Setting up the scheduler loop
689 * ------------------------------------------------------------------------- */
692 schedulePreLoop(void)
695 /* set up first event to get things going */
696 /* ToDo: assign costs for system setup and init MainTSO ! */
697 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
699 CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
702 debugBelch("GRAN: Init CurrentTSO (in schedule) = %p\n",
704 G_TSO(CurrentTSO, 5));
706 if (RtsFlags.GranFlags.Light) {
707 /* Save current time; GranSim Light only */
708 CurrentTSO->gran.clock = CurrentTime[CurrentProc];
713 /* ----------------------------------------------------------------------------
714 * Start any pending signal handlers
715 * ------------------------------------------------------------------------- */
718 scheduleStartSignalHandlers(void)
720 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
721 if (signals_pending()) { // safe outside the lock
722 startSignalHandlers();
727 /* ----------------------------------------------------------------------------
728 * Check for blocked threads that can be woken up.
729 * ------------------------------------------------------------------------- */
732 scheduleCheckBlockedThreads(Capability *cap USED_WHEN_NON_THREADED_RTS)
734 #if !defined(THREADED_RTS)
736 // Check whether any waiting threads need to be woken up. If the
737 // run queue is empty, and there are no other tasks running, we
738 // can wait indefinitely for something to happen.
740 if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
742 awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
748 /* ----------------------------------------------------------------------------
749 * Check for threads blocked on BLACKHOLEs that can be woken up
750 * ------------------------------------------------------------------------- */
752 scheduleCheckBlackHoles (Capability *cap)
754 if ( blackholes_need_checking ) // check without the lock first
756 ACQUIRE_LOCK(&sched_mutex);
757 if ( blackholes_need_checking ) {
758 checkBlackHoles(cap);
759 blackholes_need_checking = rtsFalse;
761 RELEASE_LOCK(&sched_mutex);
765 /* ----------------------------------------------------------------------------
766 * Detect deadlock conditions and attempt to resolve them.
767 * ------------------------------------------------------------------------- */
770 scheduleDetectDeadlock (Capability *cap, Task *task)
773 #if defined(PARALLEL_HASKELL)
774 // ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
779 * Detect deadlock: when we have no threads to run, there are no
780 * threads blocked, waiting for I/O, or sleeping, and all the
781 * other tasks are waiting for work, we must have a deadlock of
784 if ( emptyThreadQueues(cap) )
786 #if defined(THREADED_RTS)
788 * In the threaded RTS, we only check for deadlock if there
789 * has been no activity in a complete timeslice. This means
790 * we won't eagerly start a full GC just because we don't have
791 * any threads to run currently.
793 if (recent_activity != ACTIVITY_INACTIVE) return;
796 IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
798 // Garbage collection can release some new threads due to
799 // either (a) finalizers or (b) threads resurrected because
800 // they are unreachable and will therefore be sent an
801 // exception. Any threads thus released will be immediately
803 scheduleDoGC( cap, task, rtsTrue/*force major GC*/ );
804 recent_activity = ACTIVITY_DONE_GC;
806 if ( !emptyRunQueue(cap) ) return;
808 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
809 /* If we have user-installed signal handlers, then wait
810 * for signals to arrive rather then bombing out with a
813 if ( anyUserHandlers() ) {
815 sched_belch("still deadlocked, waiting for signals..."));
819 if (signals_pending()) {
820 startSignalHandlers();
823 // either we have threads to run, or we were interrupted:
824 ASSERT(!emptyRunQueue(cap) || interrupted);
828 #if !defined(THREADED_RTS)
829 /* Probably a real deadlock. Send the current main thread the
830 * Deadlock exception.
833 switch (task->tso->why_blocked) {
835 case BlockedOnBlackHole:
836 case BlockedOnException:
838 raiseAsync(cap, task->tso, (StgClosure *)NonTermination_closure);
841 barf("deadlock: main thread blocked in a strange way");
849 /* ----------------------------------------------------------------------------
850 * Process an event (GRAN only)
851 * ------------------------------------------------------------------------- */
855 scheduleProcessEvent(rtsEvent *event)
859 if (RtsFlags.GranFlags.Light)
860 GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
862 /* adjust time based on time-stamp */
863 if (event->time > CurrentTime[CurrentProc] &&
864 event->evttype != ContinueThread)
865 CurrentTime[CurrentProc] = event->time;
867 /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
868 if (!RtsFlags.GranFlags.Light)
871 IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
873 /* main event dispatcher in GranSim */
874 switch (event->evttype) {
875 /* Should just be continuing execution */
877 IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
878 /* ToDo: check assertion
879 ASSERT(run_queue_hd != (StgTSO*)NULL &&
880 run_queue_hd != END_TSO_QUEUE);
882 /* Ignore ContinueThreads for fetching threads (if synchr comm) */
883 if (!RtsFlags.GranFlags.DoAsyncFetch &&
884 procStatus[CurrentProc]==Fetching) {
885 debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
886 CurrentTSO->id, CurrentTSO, CurrentProc);
889 /* Ignore ContinueThreads for completed threads */
890 if (CurrentTSO->what_next == ThreadComplete) {
891 debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n",
892 CurrentTSO->id, CurrentTSO, CurrentProc);
895 /* Ignore ContinueThreads for threads that are being migrated */
896 if (PROCS(CurrentTSO)==Nowhere) {
897 debugBelch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)\n",
898 CurrentTSO->id, CurrentTSO, CurrentProc);
901 /* The thread should be at the beginning of the run queue */
902 if (CurrentTSO!=run_queue_hds[CurrentProc]) {
903 debugBelch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread\n",
904 CurrentTSO->id, CurrentTSO, CurrentProc);
905 break; // run the thread anyway
908 new_event(proc, proc, CurrentTime[proc],
910 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
912 */ /* Catches superfluous CONTINUEs -- should be unnecessary */
913 break; // now actually run the thread; DaH Qu'vam yImuHbej
916 do_the_fetchnode(event);
917 goto next_thread; /* handle next event in event queue */
920 do_the_globalblock(event);
921 goto next_thread; /* handle next event in event queue */
924 do_the_fetchreply(event);
925 goto next_thread; /* handle next event in event queue */
927 case UnblockThread: /* Move from the blocked queue to the tail of */
928 do_the_unblock(event);
929 goto next_thread; /* handle next event in event queue */
931 case ResumeThread: /* Move from the blocked queue to the tail of */
932 /* the runnable queue ( i.e. Qu' SImqa'lu') */
933 event->tso->gran.blocktime +=
934 CurrentTime[CurrentProc] - event->tso->gran.blockedat;
935 do_the_startthread(event);
936 goto next_thread; /* handle next event in event queue */
939 do_the_startthread(event);
940 goto next_thread; /* handle next event in event queue */
943 do_the_movethread(event);
944 goto next_thread; /* handle next event in event queue */
947 do_the_movespark(event);
948 goto next_thread; /* handle next event in event queue */
951 do_the_findwork(event);
952 goto next_thread; /* handle next event in event queue */
955 barf("Illegal event type %u\n", event->evttype);
958 /* This point was scheduler_loop in the old RTS */
960 IF_DEBUG(gran, debugBelch("GRAN: after main switch\n"));
962 TimeOfLastEvent = CurrentTime[CurrentProc];
963 TimeOfNextEvent = get_time_of_next_event();
964 IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
965 // CurrentTSO = ThreadQueueHd;
967 IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n",
970 if (RtsFlags.GranFlags.Light)
971 GranSimLight_leave_system(event, &ActiveTSO);
973 EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
976 debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice));
978 /* in a GranSim setup the TSO stays on the run queue */
980 /* Take a thread from the run queue. */
981 POP_RUN_QUEUE(t); // take_off_run_queue(t);
984 debugBelch("GRAN: About to run current thread, which is\n");
987 context_switch = 0; // turned on via GranYield, checking events and time slice
990 DumpGranEvent(GR_SCHEDULE, t));
992 procStatus[CurrentProc] = Busy;
996 /* ----------------------------------------------------------------------------
997 * Send pending messages (PARALLEL_HASKELL only)
998 * ------------------------------------------------------------------------- */
1000 #if defined(PARALLEL_HASKELL)
1002 scheduleSendPendingMessages(void)
1008 # if defined(PAR) // global Mem.Mgmt., omit for now
1009 if (PendingFetches != END_BF_QUEUE) {
1014 if (RtsFlags.ParFlags.BufferTime) {
1015 // if we use message buffering, we must send away all message
1016 // packets which have become too old...
1022 /* ----------------------------------------------------------------------------
1023 * Activate spark threads (PARALLEL_HASKELL only)
1024 * ------------------------------------------------------------------------- */
1026 #if defined(PARALLEL_HASKELL)
1028 scheduleActivateSpark(void)
1031 ASSERT(emptyRunQueue());
1032 /* We get here if the run queue is empty and want some work.
1033 We try to turn a spark into a thread, and add it to the run queue,
1034 from where it will be picked up in the next iteration of the scheduler
1038 /* :-[ no local threads => look out for local sparks */
1039 /* the spark pool for the current PE */
1040 pool = &(cap.r.rSparks); // JB: cap = (old) MainCap
1041 if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
1042 pool->hd < pool->tl) {
1044 * ToDo: add GC code check that we really have enough heap afterwards!!
1046 * If we're here (no runnable threads) and we have pending
1047 * sparks, we must have a space problem. Get enough space
1048 * to turn one of those pending sparks into a
1052 spark = findSpark(rtsFalse); /* get a spark */
1053 if (spark != (rtsSpark) NULL) {
1054 tso = createThreadFromSpark(spark); /* turn the spark into a thread */
1055 IF_PAR_DEBUG(fish, // schedule,
1056 debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
1057 tso->id, tso, advisory_thread_count));
1059 if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
1060 IF_PAR_DEBUG(fish, // schedule,
1061 debugBelch("==^^ failed to create thread from spark @ %lx\n",
1063 return rtsFalse; /* failed to generate a thread */
1064 } /* otherwise fall through & pick-up new tso */
1066 IF_PAR_DEBUG(fish, // schedule,
1067 debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n",
1068 spark_queue_len(pool)));
1069 return rtsFalse; /* failed to generate a thread */
1071 return rtsTrue; /* success in generating a thread */
1072 } else { /* no more threads permitted or pool empty */
1073 return rtsFalse; /* failed to generateThread */
1076 tso = NULL; // avoid compiler warning only
1077 return rtsFalse; /* dummy in non-PAR setup */
1080 #endif // PARALLEL_HASKELL
1082 /* ----------------------------------------------------------------------------
1083 * Get work from a remote node (PARALLEL_HASKELL only)
1084 * ------------------------------------------------------------------------- */
1086 #if defined(PARALLEL_HASKELL)
1088 scheduleGetRemoteWork(rtsBool *receivedFinish)
1090 ASSERT(emptyRunQueue());
1092 if (RtsFlags.ParFlags.BufferTime) {
1093 IF_PAR_DEBUG(verbose,
1094 debugBelch("...send all pending data,"));
1097 for (i=1; i<=nPEs; i++)
1098 sendImmediately(i); // send all messages away immediately
1102 //++EDEN++ idle() , i.e. send all buffers, wait for work
1103 // suppress fishing in EDEN... just look for incoming messages
1104 // (blocking receive)
1105 IF_PAR_DEBUG(verbose,
1106 debugBelch("...wait for incoming messages...\n"));
1107 *receivedFinish = processMessages(); // blocking receive...
1109 // and reenter scheduling loop after having received something
1110 // (return rtsFalse below)
1112 # else /* activate SPARKS machinery */
1113 /* We get here, if we have no work, tried to activate a local spark, but still
1114 have no work. We try to get a remote spark, by sending a FISH message.
1115 Thread migration should be added here, and triggered when a sequence of
1116 fishes returns without work. */
1117 delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll);
1119 /* =8-[ no local sparks => look for work on other PEs */
1121 * We really have absolutely no work. Send out a fish
1122 * (there may be some out there already), and wait for
1123 * something to arrive. We clearly can't run any threads
1124 * until a SCHEDULE or RESUME arrives, and so that's what
1125 * we're hoping to see. (Of course, we still have to
1126 * respond to other types of messages.)
1128 rtsTime now = msTime() /*CURRENT_TIME*/;
1129 IF_PAR_DEBUG(verbose,
1130 debugBelch("-- now=%ld\n", now));
1131 IF_PAR_DEBUG(fish, // verbose,
1132 if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
1133 (last_fish_arrived_at!=0 &&
1134 last_fish_arrived_at+delay > now)) {
1135 debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n",
1136 now, last_fish_arrived_at+delay,
1137 last_fish_arrived_at,
1141 if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
1142 advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when?
1143 if (last_fish_arrived_at==0 ||
1144 (last_fish_arrived_at+delay <= now)) { // send FISH now!
1145 /* outstandingFishes is set in sendFish, processFish;
1146 avoid flooding system with fishes via delay */
1147 next_fish_to_send_at = 0;
1149 /* ToDo: this should be done in the main scheduling loop to avoid the
1150 busy wait here; not so bad if fish delay is very small */
1151 int iq = 0; // DEBUGGING -- HWL
1152 next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send
1153 /* send a fish when ready, but process messages that arrive in the meantime */
1155 if (PacketsWaiting()) {
1157 *receivedFinish = processMessages();
1160 } while (!*receivedFinish || now<next_fish_to_send_at);
1161 // JB: This means the fish could become obsolete, if we receive
1162 // work. Better check for work again?
1163 // last line: while (!receivedFinish || !haveWork || now<...)
1164 // next line: if (receivedFinish || haveWork )
1166 if (*receivedFinish) // no need to send a FISH if we are finishing anyway
1167 return rtsFalse; // NB: this will leave scheduler loop
1168 // immediately after return!
1170 IF_PAR_DEBUG(fish, // verbose,
1171 debugBelch("--$$ <%llu> sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count));
1175 // JB: IMHO, this should all be hidden inside sendFish(...)
1177 sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY,
1180 // Global statistics: count no. of fishes
1181 if (RtsFlags.ParFlags.ParStats.Global &&
1182 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
1183 globalParStats.tot_fish_mess++;
1187 /* delayed fishes must have been sent by now! */
1188 next_fish_to_send_at = 0;
1191 *receivedFinish = processMessages();
1192 # endif /* SPARKS */
1195 /* NB: this function always returns rtsFalse, meaning the scheduler
1196 loop continues with the next iteration;
1198 return code means success in finding work; we enter this function
1199 if there is no local work, thus have to send a fish which takes
1200 time until it arrives with work; in the meantime we should process
1201 messages in the main loop;
1204 #endif // PARALLEL_HASKELL
1206 /* ----------------------------------------------------------------------------
1207 * PAR/GRAN: Report stats & debugging info(?)
1208 * ------------------------------------------------------------------------- */
1210 #if defined(PAR) || defined(GRAN)
1212 scheduleGranParReport(void)
1214 ASSERT(run_queue_hd != END_TSO_QUEUE);
1216 /* Take a thread from the run queue, if we have work */
1217 POP_RUN_QUEUE(t); // take_off_run_queue(END_TSO_QUEUE);
1219 /* If this TSO has got its outport closed in the meantime,
1220 * it mustn't be run. Instead, we have to clean it up as if it was finished.
1221 * It has to be marked as TH_DEAD for this purpose.
1222 * If it is TH_TERM instead, it is supposed to have finished in the normal way.
1224 JB: TODO: investigate wether state change field could be nuked
1225 entirely and replaced by the normal tso state (whatnext
1226 field). All we want to do is to kill tsos from outside.
1229 /* ToDo: write something to the log-file
1230 if (RTSflags.ParFlags.granSimStats && !sameThread)
1231 DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
1235 /* the spark pool for the current PE */
1236 pool = &(cap.r.rSparks); // cap = (old) MainCap
1239 debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
1240 run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
1243 debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
1244 run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
1246 if (RtsFlags.ParFlags.ParStats.Full &&
1247 (t->par.sparkname != (StgInt)0) && // only log spark generated threads
1248 (emitSchedule || // forced emit
1249 (t && LastTSO && t->id != LastTSO->id))) {
1251 we are running a different TSO, so write a schedule event to log file
1252 NB: If we use fair scheduling we also have to write a deschedule
1253 event for LastTSO; with unfair scheduling we know that the
1254 previous tso has blocked whenever we switch to another tso, so
1255 we don't need it in GUM for now
1257 IF_PAR_DEBUG(fish, // schedule,
1258 debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname));
1260 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
1261 GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
1262 emitSchedule = rtsFalse;
1267 /* ----------------------------------------------------------------------------
1268 * After running a thread...
1269 * ------------------------------------------------------------------------- */
1272 schedulePostRunThread(void)
1275 /* HACK 675: if the last thread didn't yield, make sure to print a
1276 SCHEDULE event to the log file when StgRunning the next thread, even
1277 if it is the same one as before */
1279 TimeOfLastYield = CURRENT_TIME;
1282 /* some statistics gathering in the parallel case */
1284 #if defined(GRAN) || defined(PAR) || defined(EDEN)
1288 IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
1289 globalGranStats.tot_heapover++;
1291 globalParStats.tot_heapover++;
1298 DumpGranEvent(GR_DESCHEDULE, t));
1299 globalGranStats.tot_stackover++;
1302 // DumpGranEvent(GR_DESCHEDULE, t);
1303 globalParStats.tot_stackover++;
1307 case ThreadYielding:
1310 DumpGranEvent(GR_DESCHEDULE, t));
1311 globalGranStats.tot_yields++;
1314 // DumpGranEvent(GR_DESCHEDULE, t);
1315 globalParStats.tot_yields++;
1322 debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
1323 t->id, t, whatNext_strs[t->what_next], t->block_info.closure,
1324 (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1325 if (t->block_info.closure!=(StgClosure*)NULL)
1326 print_bq(t->block_info.closure);
1329 // ??? needed; should emit block before
1331 DumpGranEvent(GR_DESCHEDULE, t));
1332 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1335 ASSERT(procStatus[CurrentProc]==Busy ||
1336 ((procStatus[CurrentProc]==Fetching) &&
1337 (t->block_info.closure!=(StgClosure*)NULL)));
1338 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1339 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1340 procStatus[CurrentProc]==Fetching))
1341 procStatus[CurrentProc] = Idle;
1344 //++PAR++ blockThread() writes the event (change?)
1348 case ThreadFinished:
1352 barf("parGlobalStats: unknown return code");
1358 /* -----------------------------------------------------------------------------
1359 * Handle a thread that returned to the scheduler with ThreadHeepOverflow
1360 * -------------------------------------------------------------------------- */
1363 scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
1365 // did the task ask for a large block?
1366 if (cap->r.rHpAlloc > BLOCK_SIZE) {
1367 // if so, get one and push it on the front of the nursery.
1371 blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
1374 debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
1375 (long)t->id, whatNext_strs[t->what_next], blocks));
1377 // don't do this if the nursery is (nearly) full, we'll GC first.
1378 if (cap->r.rCurrentNursery->link != NULL ||
1379 cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
1380 // if the nursery has only one block.
1383 bd = allocGroup( blocks );
1385 cap->r.rNursery->n_blocks += blocks;
1387 // link the new group into the list
1388 bd->link = cap->r.rCurrentNursery;
1389 bd->u.back = cap->r.rCurrentNursery->u.back;
1390 if (cap->r.rCurrentNursery->u.back != NULL) {
1391 cap->r.rCurrentNursery->u.back->link = bd;
1394 ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
1395 g0s0 == cap->r.rNursery);
1397 cap->r.rNursery->blocks = bd;
1399 cap->r.rCurrentNursery->u.back = bd;
1401 // initialise it as a nursery block. We initialise the
1402 // step, gen_no, and flags field of *every* sub-block in
1403 // this large block, because this is easier than making
1404 // sure that we always find the block head of a large
1405 // block whenever we call Bdescr() (eg. evacuate() and
1406 // isAlive() in the GC would both have to do this, at
1410 for (x = bd; x < bd + blocks; x++) {
1411 x->step = cap->r.rNursery;
1417 // This assert can be a killer if the app is doing lots
1418 // of large block allocations.
1419 IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
1421 // now update the nursery to point to the new block
1422 cap->r.rCurrentNursery = bd;
1424 // we might be unlucky and have another thread get on the
1425 // run queue before us and steal the large block, but in that
1426 // case the thread will just end up requesting another large
1428 pushOnRunQueue(cap,t);
1429 return rtsFalse; /* not actually GC'ing */
1434 debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
1435 (long)t->id, whatNext_strs[t->what_next]));
1437 ASSERT(!is_on_queue(t,CurrentProc));
1438 #elif defined(PARALLEL_HASKELL)
1439 /* Currently we emit a DESCHEDULE event before GC in GUM.
1440 ToDo: either add separate event to distinguish SYSTEM time from rest
1441 or just nuke this DESCHEDULE (and the following SCHEDULE) */
1442 if (0 && RtsFlags.ParFlags.ParStats.Full) {
1443 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
1444 GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
1445 emitSchedule = rtsTrue;
1449 pushOnRunQueue(cap,t);
1451 /* actual GC is done at the end of the while loop in schedule() */
1454 /* -----------------------------------------------------------------------------
1455 * Handle a thread that returned to the scheduler with ThreadStackOverflow
1456 * -------------------------------------------------------------------------- */
1459 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
1461 IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n",
1462 (long)t->id, whatNext_strs[t->what_next]));
1463 /* just adjust the stack for this thread, then pop it back
1467 /* enlarge the stack */
1468 StgTSO *new_t = threadStackOverflow(cap, t);
1470 /* This TSO has moved, so update any pointers to it from the
1471 * main thread stack. It better not be on any other queues...
1472 * (it shouldn't be).
1474 if (task->tso != NULL) {
1477 pushOnRunQueue(cap,new_t);
1481 /* -----------------------------------------------------------------------------
1482 * Handle a thread that returned to the scheduler with ThreadYielding
1483 * -------------------------------------------------------------------------- */
1486 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
1488 // Reset the context switch flag. We don't do this just before
1489 // running the thread, because that would mean we would lose ticks
1490 // during GC, which can lead to unfair scheduling (a thread hogs
1491 // the CPU because the tick always arrives during GC). This way
1492 // penalises threads that do a lot of allocation, but that seems
1493 // better than the alternative.
1496 /* put the thread back on the run queue. Then, if we're ready to
1497 * GC, check whether this is the last task to stop. If so, wake
1498 * up the GC thread. getThread will block during a GC until the
1502 if (t->what_next != prev_what_next) {
1503 debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n",
1504 (long)t->id, whatNext_strs[t->what_next]);
1506 debugBelch("--<< thread %ld (%s) stopped, yielding\n",
1507 (long)t->id, whatNext_strs[t->what_next]);
1512 //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
1514 ASSERT(t->link == END_TSO_QUEUE);
1516 // Shortcut if we're just switching evaluators: don't bother
1517 // doing stack squeezing (which can be expensive), just run the
1519 if (t->what_next != prev_what_next) {
1524 ASSERT(!is_on_queue(t,CurrentProc));
1527 //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
1528 checkThreadQsSanity(rtsTrue));
1532 addToRunQueue(cap,t);
1535 /* add a ContinueThread event to actually process the thread */
1536 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1538 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1540 debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
1547 /* -----------------------------------------------------------------------------
1548 * Handle a thread that returned to the scheduler with ThreadBlocked
1549 * -------------------------------------------------------------------------- */
1552 scheduleHandleThreadBlocked( StgTSO *t
1553 #if !defined(GRAN) && !defined(DEBUG)
1560 debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n",
1561 t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1562 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1564 // ??? needed; should emit block before
1566 DumpGranEvent(GR_DESCHEDULE, t));
1567 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1570 ASSERT(procStatus[CurrentProc]==Busy ||
1571 ((procStatus[CurrentProc]==Fetching) &&
1572 (t->block_info.closure!=(StgClosure*)NULL)));
1573 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1574 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1575 procStatus[CurrentProc]==Fetching))
1576 procStatus[CurrentProc] = Idle;
1580 debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n",
1581 t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
1584 if (t->block_info.closure!=(StgClosure*)NULL)
1585 print_bq(t->block_info.closure));
1587 /* Send a fetch (if BlockedOnGA) and dump event to log file */
1590 /* whatever we schedule next, we must log that schedule */
1591 emitSchedule = rtsTrue;
1595 // We don't need to do anything. The thread is blocked, and it
1596 // has tidied up its stack and placed itself on whatever queue
1597 // it needs to be on.
1600 ASSERT(t->why_blocked != NotBlocked);
1601 // This might not be true under SMP: we don't have
1602 // exclusive access to this TSO, so someone might have
1603 // woken it up by now. This actually happens: try
1604 // conc023 +RTS -N2.
1608 debugBelch("--<< thread %d (%s) stopped: ",
1609 t->id, whatNext_strs[t->what_next]);
1610 printThreadBlockage(t);
1613 /* Only for dumping event to log file
1614 ToDo: do I need this in GranSim, too?
1620 /* -----------------------------------------------------------------------------
1621 * Handle a thread that returned to the scheduler with ThreadFinished
1622 * -------------------------------------------------------------------------- */
1625 scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
1627 /* Need to check whether this was a main thread, and if so,
1628 * return with the return value.
1630 * We also end up here if the thread kills itself with an
1631 * uncaught exception, see Exception.cmm.
1633 IF_DEBUG(scheduler,debugBelch("--++ thread %d (%s) finished\n",
1634 t->id, whatNext_strs[t->what_next]));
1637 endThread(t, CurrentProc); // clean-up the thread
1638 #elif defined(PARALLEL_HASKELL)
1639 /* For now all are advisory -- HWL */
1640 //if(t->priority==AdvisoryPriority) ??
1641 advisory_thread_count--; // JB: Caution with this counter, buggy!
1644 if(t->dist.priority==RevalPriority)
1648 # if defined(EDENOLD)
1649 // the thread could still have an outport... (BUG)
1650 if (t->eden.outport != -1) {
1651 // delete the outport for the tso which has finished...
1652 IF_PAR_DEBUG(eden_ports,
1653 debugBelch("WARNING: Scheduler removes outport %d for TSO %d.\n",
1654 t->eden.outport, t->id));
1657 // thread still in the process (HEAVY BUG! since outport has just been closed...)
1658 if (t->eden.epid != -1) {
1659 IF_PAR_DEBUG(eden_ports,
1660 debugBelch("WARNING: Scheduler removes TSO %d from process %d .\n",
1661 t->id, t->eden.epid));
1662 removeTSOfromProcess(t);
1667 if (RtsFlags.ParFlags.ParStats.Full &&
1668 !RtsFlags.ParFlags.ParStats.Suppressed)
1669 DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
1671 // t->par only contains statistics: left out for now...
1673 debugBelch("**** end thread: ended sparked thread %d (%lx); sparkname: %lx\n",
1674 t->id,t,t->par.sparkname));
1676 #endif // PARALLEL_HASKELL
1679 // Check whether the thread that just completed was a bound
1680 // thread, and if so return with the result.
1682 // There is an assumption here that all thread completion goes
1683 // through this point; we need to make sure that if a thread
1684 // ends up in the ThreadKilled state, that it stays on the run
1685 // queue so it can be dealt with here.
1690 if (t->bound != task) {
1691 #if !defined(THREADED_RTS)
1692 // Must be a bound thread that is not the topmost one. Leave
1693 // it on the run queue until the stack has unwound to the
1694 // point where we can deal with this. Leaving it on the run
1695 // queue also ensures that the garbage collector knows about
1696 // this thread and its return value (it gets dropped from the
1697 // all_threads list so there's no other way to find it).
1698 appendToRunQueue(cap,t);
1701 // this cannot happen in the threaded RTS, because a
1702 // bound thread can only be run by the appropriate Task.
1703 barf("finished bound thread that isn't mine");
1707 ASSERT(task->tso == t);
1709 if (t->what_next == ThreadComplete) {
1711 // NOTE: return val is tso->sp[1] (see StgStartup.hc)
1712 *(task->ret) = (StgClosure *)task->tso->sp[1];
1714 task->stat = Success;
1717 *(task->ret) = NULL;
1720 task->stat = Interrupted;
1722 task->stat = Killed;
1726 removeThreadLabel((StgWord)task->tso->id);
1728 return rtsTrue; // tells schedule() to return
1734 /* -----------------------------------------------------------------------------
1735 * Perform a heap census, if PROFILING
1736 * -------------------------------------------------------------------------- */
1739 scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED )
1741 #if defined(PROFILING)
1742 // When we have +RTS -i0 and we're heap profiling, do a census at
1743 // every GC. This lets us get repeatable runs for debugging.
1744 if (performHeapProfile ||
1745 (RtsFlags.ProfFlags.profileInterval==0 &&
1746 RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
1747 GarbageCollect(GetRoots, rtsTrue);
1749 performHeapProfile = rtsFalse;
1750 return rtsTrue; // true <=> we already GC'd
1756 /* -----------------------------------------------------------------------------
1757 * Perform a garbage collection if necessary
1758 * -------------------------------------------------------------------------- */
1761 scheduleDoGC( Capability *cap, Task *task USED_WHEN_SMP, rtsBool force_major )
1765 static volatile StgWord waiting_for_gc;
1766 rtsBool was_waiting;
1771 // In order to GC, there must be no threads running Haskell code.
1772 // Therefore, the GC thread needs to hold *all* the capabilities,
1773 // and release them after the GC has completed.
1775 // This seems to be the simplest way: previous attempts involved
1776 // making all the threads with capabilities give up their
1777 // capabilities and sleep except for the *last* one, which
1778 // actually did the GC. But it's quite hard to arrange for all
1779 // the other tasks to sleep and stay asleep.
1782 was_waiting = cas(&waiting_for_gc, 0, 1);
1783 if (was_waiting) return;
1785 for (i=0; i < n_capabilities; i++) {
1786 IF_DEBUG(scheduler, sched_belch("ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities));
1787 if (cap != &capabilities[i]) {
1788 Capability *pcap = &capabilities[i];
1789 // we better hope this task doesn't get migrated to
1790 // another Capability while we're waiting for this one.
1791 // It won't, because load balancing happens while we have
1792 // all the Capabilities, but even so it's a slightly
1793 // unsavoury invariant.
1795 waitForReturnCapability(&pcap, task);
1796 if (pcap != &capabilities[i]) {
1797 barf("scheduleDoGC: got the wrong capability");
1802 waiting_for_gc = rtsFalse;
1805 /* Kick any transactions which are invalid back to their
1806 * atomically frames. When next scheduled they will try to
1807 * commit, this commit will fail and they will retry.
1812 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
1813 if (t->what_next == ThreadRelocated) {
1816 next = t->global_link;
1817 if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
1818 if (!stmValidateNestOfTransactions (t -> trec)) {
1819 IF_DEBUG(stm, sched_belch("trec %p found wasting its time", t));
1821 // strip the stack back to the
1822 // ATOMICALLY_FRAME, aborting the (nested)
1823 // transaction, and saving the stack of any
1824 // partially-evaluated thunks on the heap.
1825 raiseAsync_(cap, t, NULL, rtsTrue);
1828 ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
1836 // so this happens periodically:
1837 scheduleCheckBlackHoles(cap);
1839 IF_DEBUG(scheduler, printAllThreads());
1841 /* everybody back, start the GC.
1842 * Could do it in this thread, or signal a condition var
1843 * to do it in another thread. Either way, we need to
1844 * broadcast on gc_pending_cond afterward.
1846 #if defined(THREADED_RTS)
1847 IF_DEBUG(scheduler,sched_belch("doing GC"));
1849 GarbageCollect(GetRoots, force_major);
1852 // release our stash of capabilities.
1853 for (i = 0; i < n_capabilities; i++) {
1854 if (cap != &capabilities[i]) {
1855 task->cap = &capabilities[i];
1856 releaseCapability(&capabilities[i]);
1863 /* add a ContinueThread event to continue execution of current thread */
1864 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1866 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1868 debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
1874 /* ---------------------------------------------------------------------------
1875 * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
1876 * used by Control.Concurrent for error checking.
1877 * ------------------------------------------------------------------------- */
1880 rtsSupportsBoundThreads(void)
1882 #if defined(THREADED_RTS)
1889 /* ---------------------------------------------------------------------------
1890 * isThreadBound(tso): check whether tso is bound to an OS thread.
1891 * ------------------------------------------------------------------------- */
1894 isThreadBound(StgTSO* tso USED_WHEN_THREADED_RTS)
1896 #if defined(THREADED_RTS)
1897 return (tso->bound != NULL);
1902 /* ---------------------------------------------------------------------------
1903 * Singleton fork(). Do not copy any running threads.
1904 * ------------------------------------------------------------------------- */
1906 #if !defined(mingw32_HOST_OS) && !defined(SMP)
1907 #define FORKPROCESS_PRIMOP_SUPPORTED
1910 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
1912 deleteThreadImmediately(Capability *cap, StgTSO *tso);
1915 forkProcess(HsStablePtr *entry
1916 #ifndef FORKPROCESS_PRIMOP_SUPPORTED
1921 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
1927 IF_DEBUG(scheduler,sched_belch("forking!"));
1929 // ToDo: for SMP, we should probably acquire *all* the capabilities
1934 if (pid) { // parent
1936 // just return the pid
1941 // delete all threads
1942 cap->run_queue_hd = END_TSO_QUEUE;
1943 cap->run_queue_tl = END_TSO_QUEUE;
1945 for (t = all_threads; t != END_TSO_QUEUE; t = next) {
1948 // don't allow threads to catch the ThreadKilled exception
1949 deleteThreadImmediately(cap,t);
1952 // wipe the main thread list
1953 while ((task = all_tasks) != NULL) {
1954 all_tasks = task->all_link;
1958 cap = rts_evalStableIO(cap, entry, NULL); // run the action
1959 rts_checkSchedStatus("forkProcess",cap);
1962 hs_exit(); // clean up and exit
1963 stg_exit(EXIT_SUCCESS);
1965 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
1966 barf("forkProcess#: primop not supported on this platform, sorry!\n");
1971 /* ---------------------------------------------------------------------------
1972 * Delete the threads on the run queue of the current capability.
1973 * ------------------------------------------------------------------------- */
1976 deleteRunQueue (Capability *cap)
1979 for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = next) {
1980 ASSERT(t->what_next != ThreadRelocated);
1982 deleteThread(cap, t);
1986 /* startThread and insertThread are now in GranSim.c -- HWL */
1989 /* -----------------------------------------------------------------------------
1990 Managing the suspended_ccalling_tasks list.
1991 Locks required: sched_mutex
1992 -------------------------------------------------------------------------- */
1995 suspendTask (Capability *cap, Task *task)
1997 ASSERT(task->next == NULL && task->prev == NULL);
1998 task->next = cap->suspended_ccalling_tasks;
2000 if (cap->suspended_ccalling_tasks) {
2001 cap->suspended_ccalling_tasks->prev = task;
2003 cap->suspended_ccalling_tasks = task;
2007 recoverSuspendedTask (Capability *cap, Task *task)
2010 task->prev->next = task->next;
2012 ASSERT(cap->suspended_ccalling_tasks == task);
2013 cap->suspended_ccalling_tasks = task->next;
2016 task->next->prev = task->prev;
2018 task->next = task->prev = NULL;
2021 /* ---------------------------------------------------------------------------
2022 * Suspending & resuming Haskell threads.
2024 * When making a "safe" call to C (aka _ccall_GC), the task gives back
2025 * its capability before calling the C function. This allows another
2026 * task to pick up the capability and carry on running Haskell
2027 * threads. It also means that if the C call blocks, it won't lock
2030 * The Haskell thread making the C call is put to sleep for the
2031 * duration of the call, on the susepended_ccalling_threads queue. We
2032 * give out a token to the task, which it can use to resume the thread
2033 * on return from the C function.
2034 * ------------------------------------------------------------------------- */
2037 suspendThread (StgRegTable *reg)
2040 int saved_errno = errno;
2044 /* assume that *reg is a pointer to the StgRegTable part of a Capability.
2046 cap = regTableToCapability(reg);
2048 task = cap->running_task;
2049 tso = cap->r.rCurrentTSO;
2052 sched_belch("thread %d did a safe foreign call", cap->r.rCurrentTSO->id));
2054 // XXX this might not be necessary --SDM
2055 tso->what_next = ThreadRunGHC;
2059 if(tso->blocked_exceptions == NULL) {
2060 tso->why_blocked = BlockedOnCCall;
2061 tso->blocked_exceptions = END_TSO_QUEUE;
2063 tso->why_blocked = BlockedOnCCall_NoUnblockExc;
2066 // Hand back capability
2067 task->suspended_tso = tso;
2069 ACQUIRE_LOCK(&cap->lock);
2071 suspendTask(cap,task);
2072 cap->in_haskell = rtsFalse;
2073 releaseCapability_(cap);
2075 RELEASE_LOCK(&cap->lock);
2077 #if defined(THREADED_RTS)
2078 /* Preparing to leave the RTS, so ensure there's a native thread/task
2079 waiting to take over.
2081 IF_DEBUG(scheduler, sched_belch("thread %d: leaving RTS", tso->id));
2084 errno = saved_errno;
2089 resumeThread (void *task_)
2093 int saved_errno = errno;
2097 // Wait for permission to re-enter the RTS with the result.
2098 waitForReturnCapability(&cap,task);
2099 // we might be on a different capability now... but if so, our
2100 // entry on the suspended_ccalling_tasks list will also have been
2103 // Remove the thread from the suspended list
2104 recoverSuspendedTask(cap,task);
2106 tso = task->suspended_tso;
2107 task->suspended_tso = NULL;
2108 tso->link = END_TSO_QUEUE;
2109 IF_DEBUG(scheduler, sched_belch("thread %d: re-entering RTS", tso->id));
2111 if (tso->why_blocked == BlockedOnCCall) {
2112 awakenBlockedQueue(cap,tso->blocked_exceptions);
2113 tso->blocked_exceptions = NULL;
2116 /* Reset blocking status */
2117 tso->why_blocked = NotBlocked;
2119 cap->r.rCurrentTSO = tso;
2120 cap->in_haskell = rtsTrue;
2121 errno = saved_errno;
2126 /* ---------------------------------------------------------------------------
2127 * Comparing Thread ids.
2129 * This is used from STG land in the implementation of the
2130 * instances of Eq/Ord for ThreadIds.
2131 * ------------------------------------------------------------------------ */
2134 cmp_thread(StgPtr tso1, StgPtr tso2)
2136 StgThreadID id1 = ((StgTSO *)tso1)->id;
2137 StgThreadID id2 = ((StgTSO *)tso2)->id;
2139 if (id1 < id2) return (-1);
2140 if (id1 > id2) return 1;
2144 /* ---------------------------------------------------------------------------
2145 * Fetching the ThreadID from an StgTSO.
2147 * This is used in the implementation of Show for ThreadIds.
2148 * ------------------------------------------------------------------------ */
2150 rts_getThreadId(StgPtr tso)
2152 return ((StgTSO *)tso)->id;
2157 labelThread(StgPtr tso, char *label)
2162 /* Caveat: Once set, you can only set the thread name to "" */
2163 len = strlen(label)+1;
2164 buf = stgMallocBytes(len * sizeof(char), "Schedule.c:labelThread()");
2165 strncpy(buf,label,len);
2166 /* Update will free the old memory for us */
2167 updateThreadLabel(((StgTSO *)tso)->id,buf);
2171 /* ---------------------------------------------------------------------------
2172 Create a new thread.
2174 The new thread starts with the given stack size. Before the
2175 scheduler can run, however, this thread needs to have a closure
2176 (and possibly some arguments) pushed on its stack. See
2177 pushClosure() in Schedule.h.
2179 createGenThread() and createIOThread() (in SchedAPI.h) are
2180 convenient packaged versions of this function.
2182 currently pri (priority) is only used in a GRAN setup -- HWL
2183 ------------------------------------------------------------------------ */
2185 /* currently pri (priority) is only used in a GRAN setup -- HWL */
2187 createThread(nat size, StgInt pri)
2190 createThread(Capability *cap, nat size)
2196 /* sched_mutex is *not* required */
2198 /* First check whether we should create a thread at all */
2199 #if defined(PARALLEL_HASKELL)
2200 /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
2201 if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
2203 debugBelch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)\n",
2204 RtsFlags.ParFlags.maxThreads, advisory_thread_count);
2205 return END_TSO_QUEUE;
2211 ASSERT(!RtsFlags.GranFlags.Light || CurrentProc==0);
2214 // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW
2216 /* catch ridiculously small stack sizes */
2217 if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
2218 size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
2221 stack_size = size - TSO_STRUCT_SIZEW;
2223 tso = (StgTSO *)allocateLocal(cap, size);
2224 TICK_ALLOC_TSO(stack_size, 0);
2226 SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);
2228 SET_GRAN_HDR(tso, ThisPE);
2231 // Always start with the compiled code evaluator
2232 tso->what_next = ThreadRunGHC;
2234 tso->why_blocked = NotBlocked;
2235 tso->blocked_exceptions = NULL;
2237 tso->saved_errno = 0;
2240 tso->stack_size = stack_size;
2241 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
2243 tso->sp = (P_)&(tso->stack) + stack_size;
2245 tso->trec = NO_TREC;
2248 tso->prof.CCCS = CCS_MAIN;
2251 /* put a stop frame on the stack */
2252 tso->sp -= sizeofW(StgStopFrame);
2253 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
2254 tso->link = END_TSO_QUEUE;
2258 /* uses more flexible routine in GranSim */
2259 insertThread(tso, CurrentProc);
2261 /* In a non-GranSim setup the pushing of a TSO onto the runq is separated
2267 if (RtsFlags.GranFlags.GranSimStats.Full)
2268 DumpGranEvent(GR_START,tso);
2269 #elif defined(PARALLEL_HASKELL)
2270 if (RtsFlags.ParFlags.ParStats.Full)
2271 DumpGranEvent(GR_STARTQ,tso);
2272 /* HACk to avoid SCHEDULE
2276 /* Link the new thread on the global thread list.
2278 ACQUIRE_LOCK(&sched_mutex);
2279 tso->id = next_thread_id++; // while we have the mutex
2280 tso->global_link = all_threads;
2282 RELEASE_LOCK(&sched_mutex);
2285 tso->dist.priority = MandatoryPriority; //by default that is...
2289 tso->gran.pri = pri;
2291 tso->gran.magic = TSO_MAGIC; // debugging only
2293 tso->gran.sparkname = 0;
2294 tso->gran.startedat = CURRENT_TIME;
2295 tso->gran.exported = 0;
2296 tso->gran.basicblocks = 0;
2297 tso->gran.allocs = 0;
2298 tso->gran.exectime = 0;
2299 tso->gran.fetchtime = 0;
2300 tso->gran.fetchcount = 0;
2301 tso->gran.blocktime = 0;
2302 tso->gran.blockcount = 0;
2303 tso->gran.blockedat = 0;
2304 tso->gran.globalsparks = 0;
2305 tso->gran.localsparks = 0;
2306 if (RtsFlags.GranFlags.Light)
2307 tso->gran.clock = Now; /* local clock */
2309 tso->gran.clock = 0;
2311 IF_DEBUG(gran,printTSO(tso));
2312 #elif defined(PARALLEL_HASKELL)
2314 tso->par.magic = TSO_MAGIC; // debugging only
2316 tso->par.sparkname = 0;
2317 tso->par.startedat = CURRENT_TIME;
2318 tso->par.exported = 0;
2319 tso->par.basicblocks = 0;
2320 tso->par.allocs = 0;
2321 tso->par.exectime = 0;
2322 tso->par.fetchtime = 0;
2323 tso->par.fetchcount = 0;
2324 tso->par.blocktime = 0;
2325 tso->par.blockcount = 0;
2326 tso->par.blockedat = 0;
2327 tso->par.globalsparks = 0;
2328 tso->par.localsparks = 0;
2332 globalGranStats.tot_threads_created++;
2333 globalGranStats.threads_created_on_PE[CurrentProc]++;
2334 globalGranStats.tot_sq_len += spark_queue_len(CurrentProc);
2335 globalGranStats.tot_sq_probes++;
2336 #elif defined(PARALLEL_HASKELL)
2337 // collect parallel global statistics (currently done together with GC stats)
2338 if (RtsFlags.ParFlags.ParStats.Global &&
2339 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
2340 //debugBelch("Creating thread %d @ %11.2f\n", tso->id, usertime());
2341 globalParStats.tot_threads_created++;
2347 sched_belch("==__ schedule: Created TSO %d (%p);",
2348 CurrentProc, tso, tso->id));
2349 #elif defined(PARALLEL_HASKELL)
2350 IF_PAR_DEBUG(verbose,
2351 sched_belch("==__ schedule: Created TSO %d (%p); %d threads active",
2352 (long)tso->id, tso, advisory_thread_count));
2354 IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
2355 (long)tso->id, (long)tso->stack_size));
2362 all parallel thread creation calls should fall through the following routine.
2365 createThreadFromSpark(rtsSpark spark)
2367 ASSERT(spark != (rtsSpark)NULL);
2368 // JB: TAKE CARE OF THIS COUNTER! BUGGY
2369 if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads)
2371 barf("{createSparkThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
2372 RtsFlags.ParFlags.maxThreads, advisory_thread_count);
2373 return END_TSO_QUEUE;
2377 tso = createThread(RtsFlags.GcFlags.initialStkSize);
2378 if (tso==END_TSO_QUEUE)
2379 barf("createSparkThread: Cannot create TSO");
2381 tso->priority = AdvisoryPriority;
2383 pushClosure(tso,spark);
2385 advisory_thread_count++; // JB: TAKE CARE OF THIS COUNTER! BUGGY
2392 Turn a spark into a thread.
2393 ToDo: fix for SMP (needs to acquire SCHED_MUTEX!)
2397 activateSpark (rtsSpark spark)
2401 tso = createSparkThread(spark);
2402 if (RtsFlags.ParFlags.ParStats.Full) {
2403 //ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ...
2404 IF_PAR_DEBUG(verbose,
2405 debugBelch("==^^ activateSpark: turning spark of closure %p (%s) into a thread\n",
2406 (StgClosure *)spark, info_type((StgClosure *)spark)));
2408 // ToDo: fwd info on local/global spark to thread -- HWL
2409 // tso->gran.exported = spark->exported;
2410 // tso->gran.locked = !spark->global;
2411 // tso->gran.sparkname = spark->name;
2417 /* ---------------------------------------------------------------------------
2420 * scheduleThread puts a thread on the end of the runnable queue.
2421 * This will usually be done immediately after a thread is created.
2422 * The caller of scheduleThread must create the thread using e.g.
2423 * createThread and push an appropriate closure
2424 * on this thread's stack before the scheduler is invoked.
2425 * ------------------------------------------------------------------------ */
2428 scheduleThread(Capability *cap, StgTSO *tso)
2430 // The thread goes at the *end* of the run-queue, to avoid possible
2431 // starvation of any threads already on the queue.
2432 appendToRunQueue(cap,tso);
2436 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
2440 // We already created/initialised the Task
2441 task = cap->running_task;
2443 // This TSO is now a bound thread; make the Task and TSO
2444 // point to each other.
2449 task->stat = NoStatus;
2451 appendToRunQueue(cap,tso);
2453 IF_DEBUG(scheduler, sched_belch("new bound thread (%d)", tso->id));
2456 /* GranSim specific init */
2457 CurrentTSO = m->tso; // the TSO to run
2458 procStatus[MainProc] = Busy; // status of main PE
2459 CurrentProc = MainProc; // PE to run it on
2462 cap = schedule(cap,task);
2464 ASSERT(task->stat != NoStatus);
2466 IF_DEBUG(scheduler, sched_belch("bound thread (%d) finished", task->tso->id));
2470 /* ----------------------------------------------------------------------------
2472 * ------------------------------------------------------------------------- */
2474 #if defined(THREADED_RTS)
2476 workerStart(Task *task)
2480 // See startWorkerTask().
2481 ACQUIRE_LOCK(&task->lock);
2483 RELEASE_LOCK(&task->lock);
2485 // set the thread-local pointer to the Task:
2488 // schedule() runs without a lock.
2489 cap = schedule(cap,task);
2491 // On exit from schedule(), we have a Capability.
2492 releaseCapability(cap);
2497 /* ---------------------------------------------------------------------------
2500 * Initialise the scheduler. This resets all the queues - if the
2501 * queues contained any threads, they'll be garbage collected at the
2504 * ------------------------------------------------------------------------ */
2511 for (i=0; i<=MAX_PROC; i++) {
2512 run_queue_hds[i] = END_TSO_QUEUE;
2513 run_queue_tls[i] = END_TSO_QUEUE;
2514 blocked_queue_hds[i] = END_TSO_QUEUE;
2515 blocked_queue_tls[i] = END_TSO_QUEUE;
2516 ccalling_threadss[i] = END_TSO_QUEUE;
2517 blackhole_queue[i] = END_TSO_QUEUE;
2518 sleeping_queue = END_TSO_QUEUE;
2520 #elif !defined(THREADED_RTS)
2521 blocked_queue_hd = END_TSO_QUEUE;
2522 blocked_queue_tl = END_TSO_QUEUE;
2523 sleeping_queue = END_TSO_QUEUE;
2526 blackhole_queue = END_TSO_QUEUE;
2527 all_threads = END_TSO_QUEUE;
2532 RtsFlags.ConcFlags.ctxtSwitchTicks =
2533 RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS;
2535 #if defined(THREADED_RTS)
2536 /* Initialise the mutex and condition variables used by
2538 initMutex(&sched_mutex);
2541 ACQUIRE_LOCK(&sched_mutex);
2543 /* A capability holds the state a native thread needs in
2544 * order to execute STG code. At least one capability is
2545 * floating around (only SMP builds have more than one).
2553 * Eagerly start one worker to run each Capability, except for
2554 * Capability 0. The idea is that we're probably going to start a
2555 * bound thread on Capability 0 pretty soon, so we don't want a
2556 * worker task hogging it.
2561 for (i = 1; i < n_capabilities; i++) {
2562 cap = &capabilities[i];
2563 ACQUIRE_LOCK(&cap->lock);
2564 startWorkerTask(cap, workerStart);
2565 RELEASE_LOCK(&cap->lock);
2570 #if /* defined(SMP) ||*/ defined(PARALLEL_HASKELL)
2574 RELEASE_LOCK(&sched_mutex);
2578 exitScheduler( void )
2580 interrupted = rtsTrue;
2581 shutting_down_scheduler = rtsTrue;
2583 #if defined(THREADED_RTS)
2588 ACQUIRE_LOCK(&sched_mutex);
2589 task = newBoundTask();
2590 RELEASE_LOCK(&sched_mutex);
2592 for (i = 0; i < n_capabilities; i++) {
2593 shutdownCapability(&capabilities[i], task);
2595 boundTaskExiting(task);
2601 /* ---------------------------------------------------------------------------
2602 Where are the roots that we know about?
2604 - all the threads on the runnable queue
2605 - all the threads on the blocked queue
2606 - all the threads on the sleeping queue
2607 - all the thread currently executing a _ccall_GC
2608 - all the "main threads"
2610 ------------------------------------------------------------------------ */
2612 /* This has to be protected either by the scheduler monitor, or by the
2613 garbage collection monitor (probably the latter).
2618 GetRoots( evac_fn evac )
2625 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
2626 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
2627 evac((StgClosure **)&run_queue_hds[i]);
2628 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
2629 evac((StgClosure **)&run_queue_tls[i]);
2631 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
2632 evac((StgClosure **)&blocked_queue_hds[i]);
2633 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
2634 evac((StgClosure **)&blocked_queue_tls[i]);
2635 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
2636 evac((StgClosure **)&ccalling_threads[i]);
2643 for (i = 0; i < n_capabilities; i++) {
2644 cap = &capabilities[i];
2645 evac((StgClosure **)&cap->run_queue_hd);
2646 evac((StgClosure **)&cap->run_queue_tl);
2648 for (task = cap->suspended_ccalling_tasks; task != NULL;
2650 evac((StgClosure **)&task->suspended_tso);
2654 #if !defined(THREADED_RTS)
2655 evac((StgClosure **)&blocked_queue_hd);
2656 evac((StgClosure **)&blocked_queue_tl);
2657 evac((StgClosure **)&sleeping_queue);
2661 evac((StgClosure **)&blackhole_queue);
2663 #if defined(PARALLEL_HASKELL) || defined(GRAN)
2664 markSparkQueue(evac);
2667 #if defined(RTS_USER_SIGNALS)
2668 // mark the signal handlers (signals should be already blocked)
2669 markSignalHandlers(evac);
2673 /* -----------------------------------------------------------------------------
2676 This is the interface to the garbage collector from Haskell land.
2677 We provide this so that external C code can allocate and garbage
2678 collect when called from Haskell via _ccall_GC.
2680 It might be useful to provide an interface whereby the programmer
2681 can specify more roots (ToDo).
2683 This needs to be protected by the GC condition variable above. KH.
2684 -------------------------------------------------------------------------- */
2686 static void (*extra_roots)(evac_fn);
2692 // ToDo: we have to grab all the capabilities here.
2693 errorBelch("performGC not supported in threaded RTS (yet)");
2694 stg_exit(EXIT_FAILURE);
2696 /* Obligated to hold this lock upon entry */
2697 GarbageCollect(GetRoots,rtsFalse);
2701 performMajorGC(void)
2704 errorBelch("performMayjorGC not supported in threaded RTS (yet)");
2705 stg_exit(EXIT_FAILURE);
2707 GarbageCollect(GetRoots,rtsTrue);
2711 AllRoots(evac_fn evac)
2713 GetRoots(evac); // the scheduler's roots
2714 extra_roots(evac); // the user's roots
2718 performGCWithRoots(void (*get_roots)(evac_fn))
2721 errorBelch("performGCWithRoots not supported in threaded RTS (yet)");
2722 stg_exit(EXIT_FAILURE);
2724 extra_roots = get_roots;
2725 GarbageCollect(AllRoots,rtsFalse);
2728 /* -----------------------------------------------------------------------------
2731 If the thread has reached its maximum stack size, then raise the
2732 StackOverflow exception in the offending thread. Otherwise
2733 relocate the TSO into a larger chunk of memory and adjust its stack
2735 -------------------------------------------------------------------------- */
2738 threadStackOverflow(Capability *cap, StgTSO *tso)
2740 nat new_stack_size, stack_words;
2745 IF_DEBUG(sanity,checkTSO(tso));
2746 if (tso->stack_size >= tso->max_stack_size) {
2749 debugBelch("@@ threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)\n",
2750 (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
2751 /* If we're debugging, just print out the top of the stack */
2752 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2755 /* Send this thread the StackOverflow exception */
2756 raiseAsync(cap, tso, (StgClosure *)stackOverflow_closure);
2760 /* Try to double the current stack size. If that takes us over the
2761 * maximum stack size for this thread, then use the maximum instead.
2762 * Finally round up so the TSO ends up as a whole number of blocks.
2764 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2765 new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2766 TSO_STRUCT_SIZE)/sizeof(W_);
2767 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2768 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2770 IF_DEBUG(scheduler, sched_belch("increasing stack size from %ld words to %d.\n", tso->stack_size, new_stack_size));
2772 dest = (StgTSO *)allocate(new_tso_size);
2773 TICK_ALLOC_TSO(new_stack_size,0);
2775 /* copy the TSO block and the old stack into the new area */
2776 memcpy(dest,tso,TSO_STRUCT_SIZE);
2777 stack_words = tso->stack + tso->stack_size - tso->sp;
2778 new_sp = (P_)dest + new_tso_size - stack_words;
2779 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2781 /* relocate the stack pointers... */
2783 dest->stack_size = new_stack_size;
2785 /* Mark the old TSO as relocated. We have to check for relocated
2786 * TSOs in the garbage collector and any primops that deal with TSOs.
2788 * It's important to set the sp value to just beyond the end
2789 * of the stack, so we don't attempt to scavenge any part of the
2792 tso->what_next = ThreadRelocated;
2794 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2795 tso->why_blocked = NotBlocked;
2797 IF_PAR_DEBUG(verbose,
2798 debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
2799 tso->id, tso, tso->stack_size);
2800 /* If we're debugging, just print out the top of the stack */
2801 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2804 IF_DEBUG(sanity,checkTSO(tso));
2806 IF_DEBUG(scheduler,printTSO(dest));
2812 /* ---------------------------------------------------------------------------
2813 Wake up a queue that was blocked on some resource.
2814 ------------------------------------------------------------------------ */
2818 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2821 #elif defined(PARALLEL_HASKELL)
2823 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2825 /* write RESUME events to log file and
2826 update blocked and fetch time (depending on type of the orig closure) */
2827 if (RtsFlags.ParFlags.ParStats.Full) {
2828 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
2829 GR_RESUMEQ, ((StgTSO *)bqe), ((StgTSO *)bqe)->block_info.closure,
2830 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
2831 if (emptyRunQueue())
2832 emitSchedule = rtsTrue;
2834 switch (get_itbl(node)->type) {
2836 ((StgTSO *)bqe)->par.fetchtime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2841 ((StgTSO *)bqe)->par.blocktime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2848 barf("{unblockOne}Daq Qagh: unexpected closure in blocking queue");
2855 StgBlockingQueueElement *
2856 unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
2859 PEs node_loc, tso_loc;
2861 node_loc = where_is(node); // should be lifted out of loop
2862 tso = (StgTSO *)bqe; // wastes an assignment to get the type right
2863 tso_loc = where_is((StgClosure *)tso);
2864 if (IS_LOCAL_TO(PROCS(node),tso_loc)) { // TSO is local
2865 /* !fake_fetch => TSO is on CurrentProc is same as IS_LOCAL_TO */
2866 ASSERT(CurrentProc!=node_loc || tso_loc==CurrentProc);
2867 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.lunblocktime;
2868 // insertThread(tso, node_loc);
2869 new_event(tso_loc, tso_loc, CurrentTime[CurrentProc],
2871 tso, node, (rtsSpark*)NULL);
2872 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2875 } else { // TSO is remote (actually should be FMBQ)
2876 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.mpacktime +
2877 RtsFlags.GranFlags.Costs.gunblocktime +
2878 RtsFlags.GranFlags.Costs.latency;
2879 new_event(tso_loc, CurrentProc, CurrentTime[CurrentProc],
2881 tso, node, (rtsSpark*)NULL);
2882 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2885 /* the thread-queue-overhead is accounted for in either Resume or UnblockThread */
2887 debugBelch(" %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,",
2888 (node_loc==tso_loc ? "Local" : "Global"),
2889 tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
2890 tso->block_info.closure = NULL;
2891 IF_DEBUG(scheduler,debugBelch("-- Waking up thread %ld (%p)\n",
2894 #elif defined(PARALLEL_HASKELL)
2895 StgBlockingQueueElement *
2896 unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
2898 StgBlockingQueueElement *next;
2900 switch (get_itbl(bqe)->type) {
2902 ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked);
2903 /* if it's a TSO just push it onto the run_queue */
2905 ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
2906 APPEND_TO_RUN_QUEUE((StgTSO *)bqe);
2908 unblockCount(bqe, node);
2909 /* reset blocking status after dumping event */
2910 ((StgTSO *)bqe)->why_blocked = NotBlocked;
2914 /* if it's a BLOCKED_FETCH put it on the PendingFetches list */
2916 bqe->link = (StgBlockingQueueElement *)PendingFetches;
2917 PendingFetches = (StgBlockedFetch *)bqe;
2921 /* can ignore this case in a non-debugging setup;
2922 see comments on RBHSave closures above */
2924 /* check that the closure is an RBHSave closure */
2925 ASSERT(get_itbl((StgClosure *)bqe) == &stg_RBH_Save_0_info ||
2926 get_itbl((StgClosure *)bqe) == &stg_RBH_Save_1_info ||
2927 get_itbl((StgClosure *)bqe) == &stg_RBH_Save_2_info);
2931 barf("{unblockOne}Daq Qagh: Unexpected IP (%#lx; %s) in blocking queue at %#lx\n",
2932 get_itbl((StgClosure *)bqe), info_type((StgClosure *)bqe),
2936 IF_PAR_DEBUG(bq, debugBelch(", %p (%s)\n", bqe, info_type((StgClosure*)bqe)));
2942 unblockOne(Capability *cap, StgTSO *tso)
2946 ASSERT(get_itbl(tso)->type == TSO);
2947 ASSERT(tso->why_blocked != NotBlocked);
2948 tso->why_blocked = NotBlocked;
2950 tso->link = END_TSO_QUEUE;
2952 // We might have just migrated this TSO to our Capability:
2954 tso->bound->cap = cap;
2957 appendToRunQueue(cap,tso);
2959 // we're holding a newly woken thread, make sure we context switch
2960 // quickly so we can migrate it if necessary.
2962 IF_DEBUG(scheduler,sched_belch("waking up thread %ld", (long)tso->id));
2969 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
2971 StgBlockingQueueElement *bqe;
2976 debugBelch("##-_ AwBQ for node %p on PE %d @ %ld by TSO %d (%p): \n", \
2977 node, CurrentProc, CurrentTime[CurrentProc],
2978 CurrentTSO->id, CurrentTSO));
2980 node_loc = where_is(node);
2982 ASSERT(q == END_BQ_QUEUE ||
2983 get_itbl(q)->type == TSO || // q is either a TSO or an RBHSave
2984 get_itbl(q)->type == CONSTR); // closure (type constructor)
2985 ASSERT(is_unique(node));
2987 /* FAKE FETCH: magically copy the node to the tso's proc;
2988 no Fetch necessary because in reality the node should not have been
2989 moved to the other PE in the first place
2991 if (CurrentProc!=node_loc) {
2993 debugBelch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)\n",
2994 node, node_loc, CurrentProc, CurrentTSO->id,
2995 // CurrentTSO, where_is(CurrentTSO),
2996 node->header.gran.procs));
2997 node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc);
2999 debugBelch("## new bitmask of node %p is %#x\n",
3000 node, node->header.gran.procs));
3001 if (RtsFlags.GranFlags.GranSimStats.Global) {
3002 globalGranStats.tot_fake_fetches++;
3007 // ToDo: check: ASSERT(CurrentProc==node_loc);
3008 while (get_itbl(bqe)->type==TSO) { // q != END_TSO_QUEUE) {
3011 bqe points to the current element in the queue
3012 next points to the next element in the queue
3014 //tso = (StgTSO *)bqe; // wastes an assignment to get the type right
3015 //tso_loc = where_is(tso);
3017 bqe = unblockOne(bqe, node);
3020 /* if this is the BQ of an RBH, we have to put back the info ripped out of
3021 the closure to make room for the anchor of the BQ */
3022 if (bqe!=END_BQ_QUEUE) {
3023 ASSERT(get_itbl(node)->type == RBH && get_itbl(bqe)->type == CONSTR);
3025 ASSERT((info_ptr==&RBH_Save_0_info) ||
3026 (info_ptr==&RBH_Save_1_info) ||
3027 (info_ptr==&RBH_Save_2_info));
3029 /* cf. convertToRBH in RBH.c for writing the RBHSave closure */
3030 ((StgRBH *)node)->blocking_queue = (StgBlockingQueueElement *)((StgRBHSave *)bqe)->payload[0];
3031 ((StgRBH *)node)->mut_link = (StgMutClosure *)((StgRBHSave *)bqe)->payload[1];
3034 debugBelch("## Filled in RBH_Save for %p (%s) at end of AwBQ\n",
3035 node, info_type(node)));
3038 /* statistics gathering */
3039 if (RtsFlags.GranFlags.GranSimStats.Global) {
3040 // globalGranStats.tot_bq_processing_time += bq_processing_time;
3041 globalGranStats.tot_bq_len += len; // total length of all bqs awakened
3042 // globalGranStats.tot_bq_len_local += len_local; // same for local TSOs only
3043 globalGranStats.tot_awbq++; // total no. of bqs awakened
3046 debugBelch("## BQ Stats of %p: [%d entries] %s\n",
3047 node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : ""));
3049 #elif defined(PARALLEL_HASKELL)
3051 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
3053 StgBlockingQueueElement *bqe;
3055 IF_PAR_DEBUG(verbose,
3056 debugBelch("##-_ AwBQ for node %p on [%x]: \n",
3060 if(get_itbl(q)->type == CONSTR || q==END_BQ_QUEUE) {
3061 IF_PAR_DEBUG(verbose, debugBelch("## ... nothing to unblock so lets just return. RFP (BUG?)\n"));
3066 ASSERT(q == END_BQ_QUEUE ||
3067 get_itbl(q)->type == TSO ||
3068 get_itbl(q)->type == BLOCKED_FETCH ||
3069 get_itbl(q)->type == CONSTR);
3072 while (get_itbl(bqe)->type==TSO ||
3073 get_itbl(bqe)->type==BLOCKED_FETCH) {
3074 bqe = unblockOne(bqe, node);
3078 #else /* !GRAN && !PARALLEL_HASKELL */
3081 awakenBlockedQueue(Capability *cap, StgTSO *tso)
3083 if (tso == NULL) return; // hack; see bug #1235728, and comments in
3085 while (tso != END_TSO_QUEUE) {
3086 tso = unblockOne(cap,tso);
3091 /* ---------------------------------------------------------------------------
3093 - usually called inside a signal handler so it mustn't do anything fancy.
3094 ------------------------------------------------------------------------ */
3097 interruptStgRts(void)
3101 #if defined(THREADED_RTS)
3102 prodAllCapabilities();
3106 /* -----------------------------------------------------------------------------
3109 This is for use when we raise an exception in another thread, which
3111 This has nothing to do with the UnblockThread event in GranSim. -- HWL
3112 -------------------------------------------------------------------------- */
3114 #if defined(GRAN) || defined(PARALLEL_HASKELL)
3116 NB: only the type of the blocking queue is different in GranSim and GUM
3117 the operations on the queue-elements are the same
3118 long live polymorphism!
3120 Locks: sched_mutex is held upon entry and exit.
3124 unblockThread(Capability *cap, StgTSO *tso)
3126 StgBlockingQueueElement *t, **last;
3128 switch (tso->why_blocked) {
3131 return; /* not blocked */
3134 // Be careful: nothing to do here! We tell the scheduler that the thread
3135 // is runnable and we leave it to the stack-walking code to abort the
3136 // transaction while unwinding the stack. We should perhaps have a debugging
3137 // test to make sure that this really happens and that the 'zombie' transaction
3138 // does not get committed.
3142 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
3144 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
3145 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
3147 last = (StgBlockingQueueElement **)&mvar->head;
3148 for (t = (StgBlockingQueueElement *)mvar->head;
3150 last = &t->link, last_tso = t, t = t->link) {
3151 if (t == (StgBlockingQueueElement *)tso) {
3152 *last = (StgBlockingQueueElement *)tso->link;
3153 if (mvar->tail == tso) {
3154 mvar->tail = (StgTSO *)last_tso;
3159 barf("unblockThread (MVAR): TSO not found");
3162 case BlockedOnBlackHole:
3163 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
3165 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
3167 last = &bq->blocking_queue;
3168 for (t = bq->blocking_queue;
3170 last = &t->link, t = t->link) {
3171 if (t == (StgBlockingQueueElement *)tso) {
3172 *last = (StgBlockingQueueElement *)tso->link;
3176 barf("unblockThread (BLACKHOLE): TSO not found");
3179 case BlockedOnException:
3181 StgTSO *target = tso->block_info.tso;
3183 ASSERT(get_itbl(target)->type == TSO);
3185 if (target->what_next == ThreadRelocated) {
3186 target = target->link;
3187 ASSERT(get_itbl(target)->type == TSO);
3190 ASSERT(target->blocked_exceptions != NULL);
3192 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
3193 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
3195 last = &t->link, t = t->link) {
3196 ASSERT(get_itbl(t)->type == TSO);
3197 if (t == (StgBlockingQueueElement *)tso) {
3198 *last = (StgBlockingQueueElement *)tso->link;
3202 barf("unblockThread (Exception): TSO not found");
3206 case BlockedOnWrite:
3207 #if defined(mingw32_HOST_OS)
3208 case BlockedOnDoProc:
3211 /* take TSO off blocked_queue */
3212 StgBlockingQueueElement *prev = NULL;
3213 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
3214 prev = t, t = t->link) {
3215 if (t == (StgBlockingQueueElement *)tso) {
3217 blocked_queue_hd = (StgTSO *)t->link;
3218 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
3219 blocked_queue_tl = END_TSO_QUEUE;
3222 prev->link = t->link;
3223 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
3224 blocked_queue_tl = (StgTSO *)prev;
3227 #if defined(mingw32_HOST_OS)
3228 /* (Cooperatively) signal that the worker thread should abort
3231 abandonWorkRequest(tso->block_info.async_result->reqID);
3236 barf("unblockThread (I/O): TSO not found");
3239 case BlockedOnDelay:
3241 /* take TSO off sleeping_queue */
3242 StgBlockingQueueElement *prev = NULL;
3243 for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE;
3244 prev = t, t = t->link) {
3245 if (t == (StgBlockingQueueElement *)tso) {
3247 sleeping_queue = (StgTSO *)t->link;
3249 prev->link = t->link;
3254 barf("unblockThread (delay): TSO not found");
3258 barf("unblockThread");
3262 tso->link = END_TSO_QUEUE;
3263 tso->why_blocked = NotBlocked;
3264 tso->block_info.closure = NULL;
3265 pushOnRunQueue(cap,tso);
3269 unblockThread(Capability *cap, StgTSO *tso)
3273 /* To avoid locking unnecessarily. */
3274 if (tso->why_blocked == NotBlocked) {
3278 switch (tso->why_blocked) {
3281 // Be careful: nothing to do here! We tell the scheduler that the thread
3282 // is runnable and we leave it to the stack-walking code to abort the
3283 // transaction while unwinding the stack. We should perhaps have a debugging
3284 // test to make sure that this really happens and that the 'zombie' transaction
3285 // does not get committed.
3289 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
3291 StgTSO *last_tso = END_TSO_QUEUE;
3292 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
3295 for (t = mvar->head; t != END_TSO_QUEUE;
3296 last = &t->link, last_tso = t, t = t->link) {
3299 if (mvar->tail == tso) {
3300 mvar->tail = last_tso;
3305 barf("unblockThread (MVAR): TSO not found");
3308 case BlockedOnBlackHole:
3310 last = &blackhole_queue;
3311 for (t = blackhole_queue; t != END_TSO_QUEUE;
3312 last = &t->link, t = t->link) {
3318 barf("unblockThread (BLACKHOLE): TSO not found");
3321 case BlockedOnException:
3323 StgTSO *target = tso->block_info.tso;
3325 ASSERT(get_itbl(target)->type == TSO);
3327 while (target->what_next == ThreadRelocated) {
3328 target = target->link;
3329 ASSERT(get_itbl(target)->type == TSO);
3332 ASSERT(target->blocked_exceptions != NULL);
3334 last = &target->blocked_exceptions;
3335 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
3336 last = &t->link, t = t->link) {
3337 ASSERT(get_itbl(t)->type == TSO);
3343 barf("unblockThread (Exception): TSO not found");
3346 #if !defined(THREADED_RTS)
3348 case BlockedOnWrite:
3349 #if defined(mingw32_HOST_OS)
3350 case BlockedOnDoProc:
3353 StgTSO *prev = NULL;
3354 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
3355 prev = t, t = t->link) {
3358 blocked_queue_hd = t->link;
3359 if (blocked_queue_tl == t) {
3360 blocked_queue_tl = END_TSO_QUEUE;
3363 prev->link = t->link;
3364 if (blocked_queue_tl == t) {
3365 blocked_queue_tl = prev;
3368 #if defined(mingw32_HOST_OS)
3369 /* (Cooperatively) signal that the worker thread should abort
3372 abandonWorkRequest(tso->block_info.async_result->reqID);
3377 barf("unblockThread (I/O): TSO not found");
3380 case BlockedOnDelay:
3382 StgTSO *prev = NULL;
3383 for (t = sleeping_queue; t != END_TSO_QUEUE;
3384 prev = t, t = t->link) {
3387 sleeping_queue = t->link;
3389 prev->link = t->link;
3394 barf("unblockThread (delay): TSO not found");
3399 barf("unblockThread");
3403 tso->link = END_TSO_QUEUE;
3404 tso->why_blocked = NotBlocked;
3405 tso->block_info.closure = NULL;
3406 appendToRunQueue(cap,tso);
3410 /* -----------------------------------------------------------------------------
3413 * Check the blackhole_queue for threads that can be woken up. We do
3414 * this periodically: before every GC, and whenever the run queue is
3417 * An elegant solution might be to just wake up all the blocked
3418 * threads with awakenBlockedQueue occasionally: they'll go back to
3419 * sleep again if the object is still a BLACKHOLE. Unfortunately this
3420 * doesn't give us a way to tell whether we've actually managed to
3421 * wake up any threads, so we would be busy-waiting.
3423 * -------------------------------------------------------------------------- */
3426 checkBlackHoles (Capability *cap)
3429 rtsBool any_woke_up = rtsFalse;
3432 // blackhole_queue is global:
3433 ASSERT_LOCK_HELD(&sched_mutex);
3435 IF_DEBUG(scheduler, sched_belch("checking threads blocked on black holes"));
3437 // ASSUMES: sched_mutex
3438 prev = &blackhole_queue;
3439 t = blackhole_queue;
3440 while (t != END_TSO_QUEUE) {
3441 ASSERT(t->why_blocked == BlockedOnBlackHole);
3442 type = get_itbl(t->block_info.closure)->type;
3443 if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
3444 IF_DEBUG(sanity,checkTSO(t));
3445 t = unblockOne(cap, t);
3446 // urk, the threads migrate to the current capability
3447 // here, but we'd like to keep them on the original one.
3449 any_woke_up = rtsTrue;
3459 /* -----------------------------------------------------------------------------
3462 * The following function implements the magic for raising an
3463 * asynchronous exception in an existing thread.
3465 * We first remove the thread from any queue on which it might be
3466 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
3468 * We strip the stack down to the innermost CATCH_FRAME, building
3469 * thunks in the heap for all the active computations, so they can
3470 * be restarted if necessary. When we reach a CATCH_FRAME, we build
3471 * an application of the handler to the exception, and push it on
3472 * the top of the stack.
3474 * How exactly do we save all the active computations? We create an
3475 * AP_STACK for every UpdateFrame on the stack. Entering one of these
3476 * AP_STACKs pushes everything from the corresponding update frame
3477 * upwards onto the stack. (Actually, it pushes everything up to the
3478 * next update frame plus a pointer to the next AP_STACK object.
3479 * Entering the next AP_STACK object pushes more onto the stack until we
3480 * reach the last AP_STACK object - at which point the stack should look
3481 * exactly as it did when we killed the TSO and we can continue
3482 * execution by entering the closure on top of the stack.
3484 * We can also kill a thread entirely - this happens if either (a) the
3485 * exception passed to raiseAsync is NULL, or (b) there's no
3486 * CATCH_FRAME on the stack. In either case, we strip the entire
3487 * stack and replace the thread with a zombie.
3489 * ToDo: in SMP mode, this function is only safe if either (a) we hold
3490 * all the Capabilities (eg. in GC), or (b) we own the Capability that
3491 * the TSO is currently blocked on or on the run queue of.
3493 * -------------------------------------------------------------------------- */
3496 raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception)
3498 raiseAsync_(cap, tso, exception, rtsFalse);
3502 raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
3503 rtsBool stop_at_atomically)
3505 StgRetInfoTable *info;
3508 // Thread already dead?
3509 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
3514 sched_belch("raising exception in thread %ld.", (long)tso->id));
3516 // Remove it from any blocking queues
3517 unblockThread(cap,tso);
3521 // The stack freezing code assumes there's a closure pointer on
3522 // the top of the stack, so we have to arrange that this is the case...
3524 if (sp[0] == (W_)&stg_enter_info) {
3528 sp[0] = (W_)&stg_dummy_ret_closure;
3534 // 1. Let the top of the stack be the "current closure"
3536 // 2. Walk up the stack until we find either an UPDATE_FRAME or a
3539 // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the
3540 // current closure applied to the chunk of stack up to (but not
3541 // including) the update frame. This closure becomes the "current
3542 // closure". Go back to step 2.
3544 // 4. If it's a CATCH_FRAME, then leave the exception handler on
3545 // top of the stack applied to the exception.
3547 // 5. If it's a STOP_FRAME, then kill the thread.
3549 // NB: if we pass an ATOMICALLY_FRAME then abort the associated
3556 info = get_ret_itbl((StgClosure *)frame);
3558 while (info->i.type != UPDATE_FRAME
3559 && (info->i.type != CATCH_FRAME || exception == NULL)
3560 && info->i.type != STOP_FRAME
3561 && (info->i.type != ATOMICALLY_FRAME || stop_at_atomically == rtsFalse))
3563 if (info->i.type == CATCH_RETRY_FRAME || info->i.type == ATOMICALLY_FRAME) {
3564 // IF we find an ATOMICALLY_FRAME then we abort the
3565 // current transaction and propagate the exception. In
3566 // this case (unlike ordinary exceptions) we do not care
3567 // whether the transaction is valid or not because its
3568 // possible validity cannot have caused the exception
3569 // and will not be visible after the abort.
3571 debugBelch("Found atomically block delivering async exception\n"));
3572 stmAbortTransaction(tso -> trec);
3573 tso -> trec = stmGetEnclosingTRec(tso -> trec);
3575 frame += stack_frame_sizeW((StgClosure *)frame);
3576 info = get_ret_itbl((StgClosure *)frame);
3579 switch (info->i.type) {
3581 case ATOMICALLY_FRAME:
3582 ASSERT(stop_at_atomically);
3583 ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
3584 stmCondemnTransaction(tso -> trec);
3588 // R1 is not a register: the return convention for IO in
3589 // this case puts the return value on the stack, so we
3590 // need to set up the stack to return to the atomically
3591 // frame properly...
3592 tso->sp = frame - 2;
3593 tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
3594 tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
3596 tso->what_next = ThreadRunGHC;
3600 // If we find a CATCH_FRAME, and we've got an exception to raise,
3601 // then build the THUNK raise(exception), and leave it on
3602 // top of the CATCH_FRAME ready to enter.
3606 StgCatchFrame *cf = (StgCatchFrame *)frame;
3610 // we've got an exception to raise, so let's pass it to the
3611 // handler in this frame.
3613 raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+MIN_UPD_SIZE);
3614 TICK_ALLOC_SE_THK(1,0);
3615 SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
3616 raise->payload[0] = exception;
3618 // throw away the stack from Sp up to the CATCH_FRAME.
3622 /* Ensure that async excpetions are blocked now, so we don't get
3623 * a surprise exception before we get around to executing the
3626 if (tso->blocked_exceptions == NULL) {
3627 tso->blocked_exceptions = END_TSO_QUEUE;
3630 /* Put the newly-built THUNK on top of the stack, ready to execute
3631 * when the thread restarts.
3634 sp[-1] = (W_)&stg_enter_info;
3636 tso->what_next = ThreadRunGHC;
3637 IF_DEBUG(sanity, checkTSO(tso));
3646 // First build an AP_STACK consisting of the stack chunk above the
3647 // current update frame, with the top word on the stack as the
3650 words = frame - sp - 1;
3651 ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words));
3654 ap->fun = (StgClosure *)sp[0];
3656 for(i=0; i < (nat)words; ++i) {
3657 ap->payload[i] = (StgClosure *)*sp++;
3660 SET_HDR(ap,&stg_AP_STACK_info,
3661 ((StgClosure *)frame)->header.prof.ccs /* ToDo */);
3662 TICK_ALLOC_UP_THK(words+1,0);
3665 debugBelch("sched: Updating ");
3666 printPtr((P_)((StgUpdateFrame *)frame)->updatee);
3667 debugBelch(" with ");
3668 printObj((StgClosure *)ap);
3671 // Replace the updatee with an indirection - happily
3672 // this will also wake up any threads currently
3673 // waiting on the result.
3675 // Warning: if we're in a loop, more than one update frame on
3676 // the stack may point to the same object. Be careful not to
3677 // overwrite an IND_OLDGEN in this case, because we'll screw
3678 // up the mutable lists. To be on the safe side, don't
3679 // overwrite any kind of indirection at all. See also
3680 // threadSqueezeStack in GC.c, where we have to make a similar
3683 if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) {
3684 // revert the black hole
3685 UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee,
3688 sp += sizeofW(StgUpdateFrame) - 1;
3689 sp[0] = (W_)ap; // push onto stack
3694 // We've stripped the entire stack, the thread is now dead.
3695 sp += sizeofW(StgStopFrame);
3696 tso->what_next = ThreadKilled;
3707 /* -----------------------------------------------------------------------------
3710 This is used for interruption (^C) and forking, and corresponds to
3711 raising an exception but without letting the thread catch the
3713 -------------------------------------------------------------------------- */
3716 deleteThread (Capability *cap, StgTSO *tso)
3718 if (tso->why_blocked != BlockedOnCCall &&
3719 tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
3720 raiseAsync(cap,tso,NULL);
3724 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
3726 deleteThreadImmediately(Capability *cap, StgTSO *tso)
3727 { // for forkProcess only:
3728 // delete thread without giving it a chance to catch the KillThread exception
3730 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
3734 if (tso->why_blocked != BlockedOnCCall &&
3735 tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
3736 unblockThread(cap,tso);
3739 tso->what_next = ThreadKilled;
3743 /* -----------------------------------------------------------------------------
3744 raiseExceptionHelper
3746 This function is called by the raise# primitve, just so that we can
3747 move some of the tricky bits of raising an exception from C-- into
3748 C. Who knows, it might be a useful re-useable thing here too.
3749 -------------------------------------------------------------------------- */
3752 raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
3754 Capability *cap = regTableToCapability(reg);
3755 StgThunk *raise_closure = NULL;
3757 StgRetInfoTable *info;
3759 // This closure represents the expression 'raise# E' where E
3760 // is the exception raise. It is used to overwrite all the
3761 // thunks which are currently under evaluataion.
3765 // LDV profiling: stg_raise_info has THUNK as its closure
3766 // type. Since a THUNK takes at least MIN_UPD_SIZE words in its
3767 // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
3768 // 1 does not cause any problem unless profiling is performed.
3769 // However, when LDV profiling goes on, we need to linearly scan
3770 // small object pool, where raise_closure is stored, so we should
3771 // use MIN_UPD_SIZE.
3773 // raise_closure = (StgClosure *)RET_STGCALL1(P_,allocate,
3774 // sizeofW(StgClosure)+1);
3778 // Walk up the stack, looking for the catch frame. On the way,
3779 // we update any closures pointed to from update frames with the
3780 // raise closure that we just built.
3784 info = get_ret_itbl((StgClosure *)p);
3785 next = p + stack_frame_sizeW((StgClosure *)p);
3786 switch (info->i.type) {
3789 // Only create raise_closure if we need to.
3790 if (raise_closure == NULL) {
3792 (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+MIN_UPD_SIZE);
3793 SET_HDR(raise_closure, &stg_raise_info, CCCS);
3794 raise_closure->payload[0] = exception;
3796 UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
3800 case ATOMICALLY_FRAME:
3801 IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p\n", p));
3803 return ATOMICALLY_FRAME;
3809 case CATCH_STM_FRAME:
3810 IF_DEBUG(stm, debugBelch("Found CATCH_STM_FRAME at %p\n", p));
3812 return CATCH_STM_FRAME;
3818 case CATCH_RETRY_FRAME:
3827 /* -----------------------------------------------------------------------------
3828 findRetryFrameHelper
3830 This function is called by the retry# primitive. It traverses the stack
3831 leaving tso->sp referring to the frame which should handle the retry.
3833 This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#)
3834 or should be a ATOMICALLY_FRAME (if the retry# reaches the top level).
3836 We skip CATCH_STM_FRAMEs because retries are not considered to be exceptions,
3837 despite the similar implementation.
3839 We should not expect to see CATCH_FRAME or STOP_FRAME because those should
3840 not be created within memory transactions.
3841 -------------------------------------------------------------------------- */
3844 findRetryFrameHelper (StgTSO *tso)
3847 StgRetInfoTable *info;
3851 info = get_ret_itbl((StgClosure *)p);
3852 next = p + stack_frame_sizeW((StgClosure *)p);
3853 switch (info->i.type) {
3855 case ATOMICALLY_FRAME:
3856 IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p during retrry\n", p));
3858 return ATOMICALLY_FRAME;
3860 case CATCH_RETRY_FRAME:
3861 IF_DEBUG(stm, debugBelch("Found CATCH_RETRY_FRAME at %p during retrry\n", p));
3863 return CATCH_RETRY_FRAME;
3865 case CATCH_STM_FRAME:
3867 ASSERT(info->i.type != CATCH_FRAME);
3868 ASSERT(info->i.type != STOP_FRAME);
3875 /* -----------------------------------------------------------------------------
3876 resurrectThreads is called after garbage collection on the list of
3877 threads found to be garbage. Each of these threads will be woken
3878 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
3879 on an MVar, or NonTermination if the thread was blocked on a Black
3882 Locks: assumes we hold *all* the capabilities.
3883 -------------------------------------------------------------------------- */
3886 resurrectThreads (StgTSO *threads)
3891 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
3892 next = tso->global_link;
3893 tso->global_link = all_threads;
3895 IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id));
3897 // Wake up the thread on the Capability it was last on for a
3898 // bound thread, or last_free_capability otherwise.
3900 cap = tso->bound->cap;
3902 cap = last_free_capability;
3905 switch (tso->why_blocked) {
3907 case BlockedOnException:
3908 /* Called by GC - sched_mutex lock is currently held. */
3909 raiseAsync(cap, tso,(StgClosure *)BlockedOnDeadMVar_closure);
3911 case BlockedOnBlackHole:
3912 raiseAsync(cap, tso,(StgClosure *)NonTermination_closure);
3915 raiseAsync(cap, tso,(StgClosure *)BlockedIndefinitely_closure);
3918 /* This might happen if the thread was blocked on a black hole
3919 * belonging to a thread that we've just woken up (raiseAsync
3920 * can wake up threads, remember...).
3924 barf("resurrectThreads: thread blocked in a strange way");
3929 /* ----------------------------------------------------------------------------
3930 * Debugging: why is a thread blocked
3931 * [Also provides useful information when debugging threaded programs
3932 * at the Haskell source code level, so enable outside of DEBUG. --sof 7/02]
3933 ------------------------------------------------------------------------- */
3937 printThreadBlockage(StgTSO *tso)
3939 switch (tso->why_blocked) {
3941 debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
3943 case BlockedOnWrite:
3944 debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
3946 #if defined(mingw32_HOST_OS)
3947 case BlockedOnDoProc:
3948 debugBelch("is blocked on proc (request: %ld)", tso->block_info.async_result->reqID);
3951 case BlockedOnDelay:
3952 debugBelch("is blocked until %ld", (long)(tso->block_info.target));
3955 debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
3957 case BlockedOnException:
3958 debugBelch("is blocked on delivering an exception to thread %d",
3959 tso->block_info.tso->id);
3961 case BlockedOnBlackHole:
3962 debugBelch("is blocked on a black hole");
3965 debugBelch("is not blocked");
3967 #if defined(PARALLEL_HASKELL)
3969 debugBelch("is blocked on global address; local FM_BQ is %p (%s)",
3970 tso->block_info.closure, info_type(tso->block_info.closure));
3972 case BlockedOnGA_NoSend:
3973 debugBelch("is blocked on global address (no send); local FM_BQ is %p (%s)",
3974 tso->block_info.closure, info_type(tso->block_info.closure));
3977 case BlockedOnCCall:
3978 debugBelch("is blocked on an external call");
3980 case BlockedOnCCall_NoUnblockExc:
3981 debugBelch("is blocked on an external call (exceptions were already blocked)");
3984 debugBelch("is blocked on an STM operation");
3987 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
3988 tso->why_blocked, tso->id, tso);
3993 printThreadStatus(StgTSO *tso)
3995 switch (tso->what_next) {
3997 debugBelch("has been killed");
3999 case ThreadComplete:
4000 debugBelch("has completed");
4003 printThreadBlockage(tso);
4008 printAllThreads(void)
4013 char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
4014 ullong_format_string(TIME_ON_PROC(CurrentProc),
4015 time_string, rtsFalse/*no commas!*/);
4017 debugBelch("all threads at [%s]:\n", time_string);
4018 # elif defined(PARALLEL_HASKELL)
4019 char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
4020 ullong_format_string(CURRENT_TIME,
4021 time_string, rtsFalse/*no commas!*/);
4023 debugBelch("all threads at [%s]:\n", time_string);
4025 debugBelch("all threads:\n");
4028 for (t = all_threads; t != END_TSO_QUEUE; ) {
4029 debugBelch("\tthread %4d @ %p ", t->id, (void *)t);
4031 void *label = lookupThreadLabel(t->id);
4032 if (label) debugBelch("[\"%s\"] ",(char *)label);
4034 if (t->what_next == ThreadRelocated) {
4035 debugBelch("has been relocated...\n");
4038 printThreadStatus(t);
4047 printThreadQueue(StgTSO *t)
4050 for (; t != END_TSO_QUEUE; t = t->link) {
4051 debugBelch("\tthread %d @ %p ", t->id, (void *)t);
4052 if (t->what_next == ThreadRelocated) {
4053 debugBelch("has been relocated...\n");
4055 printThreadStatus(t);
4060 debugBelch("%d threads on queue\n", i);
4064 Print a whole blocking queue attached to node (debugging only).
4066 # if defined(PARALLEL_HASKELL)
4068 print_bq (StgClosure *node)
4070 StgBlockingQueueElement *bqe;
4074 debugBelch("## BQ of closure %p (%s): ",
4075 node, info_type(node));
4077 /* should cover all closures that may have a blocking queue */
4078 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
4079 get_itbl(node)->type == FETCH_ME_BQ ||
4080 get_itbl(node)->type == RBH ||
4081 get_itbl(node)->type == MVAR);
4083 ASSERT(node!=(StgClosure*)NULL); // sanity check
4085 print_bqe(((StgBlockingQueue*)node)->blocking_queue);
4089 Print a whole blocking queue starting with the element bqe.
4092 print_bqe (StgBlockingQueueElement *bqe)
4097 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
4099 for (end = (bqe==END_BQ_QUEUE);
4100 !end; // iterate until bqe points to a CONSTR
4101 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE),
4102 bqe = end ? END_BQ_QUEUE : bqe->link) {
4103 ASSERT(bqe != END_BQ_QUEUE); // sanity check
4104 ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check
4105 /* types of closures that may appear in a blocking queue */
4106 ASSERT(get_itbl(bqe)->type == TSO ||
4107 get_itbl(bqe)->type == BLOCKED_FETCH ||
4108 get_itbl(bqe)->type == CONSTR);
4109 /* only BQs of an RBH end with an RBH_Save closure */
4110 //ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
4112 switch (get_itbl(bqe)->type) {
4114 debugBelch(" TSO %u (%x),",
4115 ((StgTSO *)bqe)->id, ((StgTSO *)bqe));
4118 debugBelch(" BF (node=%p, ga=((%x, %d, %x)),",
4119 ((StgBlockedFetch *)bqe)->node,
4120 ((StgBlockedFetch *)bqe)->ga.payload.gc.gtid,
4121 ((StgBlockedFetch *)bqe)->ga.payload.gc.slot,
4122 ((StgBlockedFetch *)bqe)->ga.weight);
4125 debugBelch(" %s (IP %p),",
4126 (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" :
4127 get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" :
4128 get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" :
4129 "RBH_Save_?"), get_itbl(bqe));
4132 barf("Unexpected closure type %s in blocking queue", // of %p (%s)",
4133 info_type((StgClosure *)bqe)); // , node, info_type(node));
4139 # elif defined(GRAN)
4141 print_bq (StgClosure *node)
4143 StgBlockingQueueElement *bqe;
4144 PEs node_loc, tso_loc;
4147 /* should cover all closures that may have a blocking queue */
4148 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
4149 get_itbl(node)->type == FETCH_ME_BQ ||
4150 get_itbl(node)->type == RBH);
4152 ASSERT(node!=(StgClosure*)NULL); // sanity check
4153 node_loc = where_is(node);
4155 debugBelch("## BQ of closure %p (%s) on [PE %d]: ",
4156 node, info_type(node), node_loc);
4159 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
4161 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
4162 !end; // iterate until bqe points to a CONSTR
4163 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
4164 ASSERT(bqe != END_BQ_QUEUE); // sanity check
4165 ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check
4166 /* types of closures that may appear in a blocking queue */
4167 ASSERT(get_itbl(bqe)->type == TSO ||
4168 get_itbl(bqe)->type == CONSTR);
4169 /* only BQs of an RBH end with an RBH_Save closure */
4170 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
4172 tso_loc = where_is((StgClosure *)bqe);
4173 switch (get_itbl(bqe)->type) {
4175 debugBelch(" TSO %d (%p) on [PE %d],",
4176 ((StgTSO *)bqe)->id, (StgTSO *)bqe, tso_loc);
4179 debugBelch(" %s (IP %p),",
4180 (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" :
4181 get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" :
4182 get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" :
4183 "RBH_Save_?"), get_itbl(bqe));
4186 barf("Unexpected closure type %s in blocking queue of %p (%s)",
4187 info_type((StgClosure *)bqe), node, info_type(node));
4195 #if defined(PARALLEL_HASKELL)
4202 for (i=0, tso=run_queue_hd;
4203 tso != END_TSO_QUEUE;
4204 i++, tso=tso->link) {
4213 sched_belch(char *s, ...)
4218 debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId());
4219 #elif defined(PARALLEL_HASKELL)
4222 debugBelch("sched: ");