1 /* ---------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.74 2000/08/03 11:28:35 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
8 * The main scheduling code in GranSim is quite different from that in std
9 * (concurrent) Haskell: while concurrent Haskell just iterates over the
10 * threads in the runnable queue, GranSim is event driven, i.e. it iterates
11 * over the events in the global event queue. -- HWL
12 * --------------------------------------------------------------------------*/
14 //@node Main scheduling code, , ,
15 //@section Main scheduling code
17 /* Version with scheduler monitor support for SMPs.
19 This design provides a high-level API to create and schedule threads etc.
20 as documented in the SMP design document.
22 It uses a monitor design controlled by a single mutex to exercise control
23 over accesses to shared data structures, and builds on the Posix threads
26 The majority of state is shared. In order to keep essential per-task state,
27 there is a Capability structure, which contains all the information
28 needed to run a thread: its STG registers, a pointer to its TSO, a
29 nursery etc. During STG execution, a pointer to the capability is
30 kept in a register (BaseReg).
32 In a non-SMP build, there is one global capability, namely MainRegTable.
39 //* Variables and Data structures::
40 //* Main scheduling loop::
41 //* Suspend and Resume::
43 //* Garbage Collextion Routines::
44 //* Blocking Queue Routines::
45 //* Exception Handling Routines::
46 //* Debugging Routines::
50 //@node Includes, Variables and Data structures, Main scheduling code, Main scheduling code
51 //@subsection Includes
59 #include "StgStartup.h"
63 #include "StgMiscClosures.h"
65 #include "Evaluator.h"
66 #include "Exception.h"
74 #if defined(GRAN) || defined(PAR)
75 # include "GranSimRts.h"
77 # include "ParallelRts.h"
78 # include "Parallel.h"
79 # include "ParallelDebug.h"
87 //@node Variables and Data structures, Prototypes, Includes, Main scheduling code
88 //@subsection Variables and Data structures
92 * These are the threads which clients have requested that we run.
94 * In an SMP build, we might have several concurrent clients all
95 * waiting for results, and each one will wait on a condition variable
96 * until the result is available.
98 * In non-SMP, clients are strictly nested: the first client calls
99 * into the RTS, which might call out again to C with a _ccall_GC, and
100 * eventually re-enter the RTS.
102 * Main threads information is kept in a linked list:
104 //@cindex StgMainThread
105 typedef struct StgMainThread_ {
107 SchedulerStatus stat;
110 pthread_cond_t wakeup;
112 struct StgMainThread_ *link;
115 /* Main thread queue.
116 * Locks required: sched_mutex.
118 static StgMainThread *main_threads;
121 * Locks required: sched_mutex.
125 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
126 /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */
129 In GranSim we have a runable and a blocked queue for each processor.
130 In order to minimise code changes new arrays run_queue_hds/tls
131 are created. run_queue_hd is then a short cut (macro) for
132 run_queue_hds[CurrentProc] (see GranSim.h).
135 StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
136 StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
137 StgTSO *ccalling_threadss[MAX_PROC];
138 /* We use the same global list of threads (all_threads) in GranSim as in
139 the std RTS (i.e. we are cheating). However, we don't use this list in
140 the GranSim specific code at the moment (so we are only potentially
145 StgTSO *run_queue_hd, *run_queue_tl;
146 StgTSO *blocked_queue_hd, *blocked_queue_tl;
150 /* Linked list of all threads.
151 * Used for detecting garbage collected threads.
155 /* Threads suspended in _ccall_GC.
157 static StgTSO *suspended_ccalling_threads;
159 static void GetRoots(void);
160 static StgTSO *threadStackOverflow(StgTSO *tso);
162 /* KH: The following two flags are shared memory locations. There is no need
163 to lock them, since they are only unset at the end of a scheduler
167 /* flag set by signal handler to precipitate a context switch */
168 //@cindex context_switch
171 /* if this flag is set as well, give up execution */
172 //@cindex interrupted
175 /* Next thread ID to allocate.
176 * Locks required: sched_mutex
178 //@cindex next_thread_id
179 StgThreadID next_thread_id = 1;
182 * Pointers to the state of the current thread.
183 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
184 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
187 /* The smallest stack size that makes any sense is:
188 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
189 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
190 * + 1 (the realworld token for an IO thread)
191 * + 1 (the closure to enter)
193 * A thread with this stack will bomb immediately with a stack
194 * overflow, which will increase its stack size.
197 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
199 /* Free capability list.
200 * Locks required: sched_mutex.
203 //@cindex free_capabilities
204 //@cindex n_free_capabilities
205 Capability *free_capabilities; /* Available capabilities for running threads */
206 nat n_free_capabilities; /* total number of available capabilities */
208 //@cindex MainRegTable
209 Capability MainRegTable; /* for non-SMP, we have one global capability */
218 /* All our current task ids, saved in case we need to kill them later.
225 void addToBlockedQueue ( StgTSO *tso );
227 static void schedule ( void );
228 void interruptStgRts ( void );
230 static StgTSO * createThread_ ( nat size, rtsBool have_lock, StgInt pri );
232 static StgTSO * createThread_ ( nat size, rtsBool have_lock );
235 static void detectBlackHoles ( void );
238 static void sched_belch(char *s, ...);
242 //@cindex sched_mutex
244 //@cindex thread_ready_cond
245 //@cindex gc_pending_cond
246 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
247 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
248 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
249 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
256 rtsTime TimeOfLastYield;
260 char *whatNext_strs[] = {
268 char *threadReturnCode_strs[] = {
269 "HeapOverflow", /* might also be StackOverflow */
278 * The thread state for the main thread.
279 // ToDo: check whether not needed any more
283 //@node Main scheduling loop, Suspend and Resume, Prototypes, Main scheduling code
284 //@subsection Main scheduling loop
286 /* ---------------------------------------------------------------------------
287 Main scheduling loop.
289 We use round-robin scheduling, each thread returning to the
290 scheduler loop when one of these conditions is detected:
293 * timer expires (thread yields)
298 Locking notes: we acquire the scheduler lock once at the beginning
299 of the scheduler loop, and release it when
301 * running a thread, or
302 * waiting for work, or
303 * waiting for a GC to complete.
306 In a GranSim setup this loop iterates over the global event queue.
307 This revolves around the global event queue, which determines what
308 to do next. Therefore, it's more complicated than either the
309 concurrent or the parallel (GUM) setup.
312 GUM iterates over incoming messages.
313 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
314 and sends out a fish whenever it has nothing to do; in-between
315 doing the actual reductions (shared code below) it processes the
316 incoming messages and deals with delayed operations
317 (see PendingFetches).
318 This is not the ugliest code you could imagine, but it's bloody close.
320 ------------------------------------------------------------------------ */
327 StgThreadReturnCode ret;
336 rtsBool was_interrupted = rtsFalse;
338 ACQUIRE_LOCK(&sched_mutex);
342 /* set up first event to get things going */
343 /* ToDo: assign costs for system setup and init MainTSO ! */
344 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
346 CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
349 fprintf(stderr, "GRAN: Init CurrentTSO (in schedule) = %p\n", CurrentTSO);
350 G_TSO(CurrentTSO, 5));
352 if (RtsFlags.GranFlags.Light) {
353 /* Save current time; GranSim Light only */
354 CurrentTSO->gran.clock = CurrentTime[CurrentProc];
357 event = get_next_event();
359 while (event!=(rtsEvent*)NULL) {
360 /* Choose the processor with the next event */
361 CurrentProc = event->proc;
362 CurrentTSO = event->tso;
366 while (!GlobalStopPending) { /* GlobalStopPending set in par_exit */
374 IF_DEBUG(scheduler, printAllThreads());
376 /* If we're interrupted (the user pressed ^C, or some other
377 * termination condition occurred), kill all the currently running
381 IF_DEBUG(scheduler, sched_belch("interrupted"));
382 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
385 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
388 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
389 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
390 interrupted = rtsFalse;
391 was_interrupted = rtsTrue;
394 /* Go through the list of main threads and wake up any
395 * clients whose computations have finished. ToDo: this
396 * should be done more efficiently without a linear scan
397 * of the main threads list, somehow...
401 StgMainThread *m, **prev;
402 prev = &main_threads;
403 for (m = main_threads; m != NULL; m = m->link) {
404 switch (m->tso->what_next) {
407 *(m->ret) = (StgClosure *)m->tso->sp[0];
411 pthread_cond_broadcast(&m->wakeup);
415 if (was_interrupted) {
416 m->stat = Interrupted;
420 pthread_cond_broadcast(&m->wakeup);
430 /* in GUM do this only on the Main PE */
433 /* If our main thread has finished or been killed, return.
436 StgMainThread *m = main_threads;
437 if (m->tso->what_next == ThreadComplete
438 || m->tso->what_next == ThreadKilled) {
439 main_threads = main_threads->link;
440 if (m->tso->what_next == ThreadComplete) {
441 /* we finished successfully, fill in the return value */
442 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
446 if (was_interrupted) {
447 m->stat = Interrupted;
457 /* Top up the run queue from our spark pool. We try to make the
458 * number of threads in the run queue equal to the number of
463 nat n = n_free_capabilities;
464 StgTSO *tso = run_queue_hd;
466 /* Count the run queue */
467 while (n > 0 && tso != END_TSO_QUEUE) {
476 break; /* no more sparks in the pool */
478 /* I'd prefer this to be done in activateSpark -- HWL */
479 /* tricky - it needs to hold the scheduler lock and
480 * not try to re-acquire it -- SDM */
482 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
483 pushClosure(tso,spark);
484 PUSH_ON_RUN_QUEUE(tso);
486 advisory_thread_count++;
490 sched_belch("turning spark of closure %p into a thread",
491 (StgClosure *)spark));
494 /* We need to wake up the other tasks if we just created some
497 if (n_free_capabilities - n > 1) {
498 pthread_cond_signal(&thread_ready_cond);
503 /* Check whether any waiting threads need to be woken up. If the
504 * run queue is empty, and there are no other tasks running, we
505 * can wait indefinitely for something to happen.
506 * ToDo: what if another client comes along & requests another
509 if (blocked_queue_hd != END_TSO_QUEUE) {
511 (run_queue_hd == END_TSO_QUEUE)
513 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
518 /* check for signals each time around the scheduler */
519 #ifndef mingw32_TARGET_OS
520 if (signals_pending()) {
521 start_signal_handlers();
526 * Detect deadlock: when we have no threads to run, there are no
527 * threads waiting on I/O or sleeping, and all the other tasks are
528 * waiting for work, we must have a deadlock of some description.
530 * We first try to find threads blocked on themselves (ie. black
531 * holes), and generate NonTermination exceptions where necessary.
533 * If no threads are black holed, we have a deadlock situation, so
534 * inform all the main threads.
537 if (blocked_queue_hd == END_TSO_QUEUE
538 && run_queue_hd == END_TSO_QUEUE
539 && (n_free_capabilities == RtsFlags.ParFlags.nNodes))
541 IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
543 if (run_queue_hd == END_TSO_QUEUE) {
545 for (m = main_threads; m != NULL; m = m->link) {
548 pthread_cond_broadcast(&m->wakeup);
554 if (blocked_queue_hd == END_TSO_QUEUE
555 && run_queue_hd == END_TSO_QUEUE)
557 IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
559 if (run_queue_hd == END_TSO_QUEUE) {
560 StgMainThread *m = main_threads;
563 main_threads = m->link;
570 /* If there's a GC pending, don't do anything until it has
574 IF_DEBUG(scheduler,sched_belch("waiting for GC"));
575 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
578 /* block until we've got a thread on the run queue and a free
581 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
582 IF_DEBUG(scheduler, sched_belch("waiting for work"));
583 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
584 IF_DEBUG(scheduler, sched_belch("work now available"));
590 if (RtsFlags.GranFlags.Light)
591 GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
593 /* adjust time based on time-stamp */
594 if (event->time > CurrentTime[CurrentProc] &&
595 event->evttype != ContinueThread)
596 CurrentTime[CurrentProc] = event->time;
598 /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
599 if (!RtsFlags.GranFlags.Light)
602 IF_DEBUG(gran, fprintf(stderr, "GRAN: switch by event-type\n"))
604 /* main event dispatcher in GranSim */
605 switch (event->evttype) {
606 /* Should just be continuing execution */
608 IF_DEBUG(gran, fprintf(stderr, "GRAN: doing ContinueThread\n"));
609 /* ToDo: check assertion
610 ASSERT(run_queue_hd != (StgTSO*)NULL &&
611 run_queue_hd != END_TSO_QUEUE);
613 /* Ignore ContinueThreads for fetching threads (if synchr comm) */
614 if (!RtsFlags.GranFlags.DoAsyncFetch &&
615 procStatus[CurrentProc]==Fetching) {
616 belch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]",
617 CurrentTSO->id, CurrentTSO, CurrentProc);
620 /* Ignore ContinueThreads for completed threads */
621 if (CurrentTSO->what_next == ThreadComplete) {
622 belch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)",
623 CurrentTSO->id, CurrentTSO, CurrentProc);
626 /* Ignore ContinueThreads for threads that are being migrated */
627 if (PROCS(CurrentTSO)==Nowhere) {
628 belch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)",
629 CurrentTSO->id, CurrentTSO, CurrentProc);
632 /* The thread should be at the beginning of the run queue */
633 if (CurrentTSO!=run_queue_hds[CurrentProc]) {
634 belch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread",
635 CurrentTSO->id, CurrentTSO, CurrentProc);
636 break; // run the thread anyway
639 new_event(proc, proc, CurrentTime[proc],
641 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
643 */ /* Catches superfluous CONTINUEs -- should be unnecessary */
644 break; // now actually run the thread; DaH Qu'vam yImuHbej
647 do_the_fetchnode(event);
648 goto next_thread; /* handle next event in event queue */
651 do_the_globalblock(event);
652 goto next_thread; /* handle next event in event queue */
655 do_the_fetchreply(event);
656 goto next_thread; /* handle next event in event queue */
658 case UnblockThread: /* Move from the blocked queue to the tail of */
659 do_the_unblock(event);
660 goto next_thread; /* handle next event in event queue */
662 case ResumeThread: /* Move from the blocked queue to the tail of */
663 /* the runnable queue ( i.e. Qu' SImqa'lu') */
664 event->tso->gran.blocktime +=
665 CurrentTime[CurrentProc] - event->tso->gran.blockedat;
666 do_the_startthread(event);
667 goto next_thread; /* handle next event in event queue */
670 do_the_startthread(event);
671 goto next_thread; /* handle next event in event queue */
674 do_the_movethread(event);
675 goto next_thread; /* handle next event in event queue */
678 do_the_movespark(event);
679 goto next_thread; /* handle next event in event queue */
682 do_the_findwork(event);
683 goto next_thread; /* handle next event in event queue */
686 barf("Illegal event type %u\n", event->evttype);
689 /* This point was scheduler_loop in the old RTS */
691 IF_DEBUG(gran, belch("GRAN: after main switch"));
693 TimeOfLastEvent = CurrentTime[CurrentProc];
694 TimeOfNextEvent = get_time_of_next_event();
695 IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
696 // CurrentTSO = ThreadQueueHd;
698 IF_DEBUG(gran, belch("GRAN: time of next event is: %ld",
701 if (RtsFlags.GranFlags.Light)
702 GranSimLight_leave_system(event, &ActiveTSO);
704 EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
707 belch("GRAN: end of time-slice is %#lx", EndOfTimeSlice));
709 /* in a GranSim setup the TSO stays on the run queue */
711 /* Take a thread from the run queue. */
712 t = POP_RUN_QUEUE(); // take_off_run_queue(t);
715 fprintf(stderr, "GRAN: About to run current thread, which is\n");
718 context_switch = 0; // turned on via GranYield, checking events and time slice
721 DumpGranEvent(GR_SCHEDULE, t));
723 procStatus[CurrentProc] = Busy;
727 if (PendingFetches != END_BF_QUEUE) {
731 /* ToDo: phps merge with spark activation above */
732 /* check whether we have local work and send requests if we have none */
733 if (run_queue_hd == END_TSO_QUEUE) { /* no runnable threads */
734 /* :-[ no local threads => look out for local sparks */
735 /* the spark pool for the current PE */
736 pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
737 if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
738 pool->hd < pool->tl) {
740 * ToDo: add GC code check that we really have enough heap afterwards!!
742 * If we're here (no runnable threads) and we have pending
743 * sparks, we must have a space problem. Get enough space
744 * to turn one of those pending sparks into a
748 spark = findSpark(); /* get a spark */
749 if (spark != (rtsSpark) NULL) {
750 tso = activateSpark(spark); /* turn the spark into a thread */
751 IF_PAR_DEBUG(schedule,
752 belch("==== schedule: Created TSO %d (%p); %d threads active",
753 tso->id, tso, advisory_thread_count));
755 if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
756 belch("==^^ failed to activate spark");
758 } /* otherwise fall through & pick-up new tso */
760 IF_PAR_DEBUG(verbose,
761 belch("==^^ no local sparks (spark pool contains only NFs: %d)",
762 spark_queue_len(pool)));
766 /* =8-[ no local sparks => look for work on other PEs */
769 * We really have absolutely no work. Send out a fish
770 * (there may be some out there already), and wait for
771 * something to arrive. We clearly can't run any threads
772 * until a SCHEDULE or RESUME arrives, and so that's what
773 * we're hoping to see. (Of course, we still have to
774 * respond to other types of messages.)
777 outstandingFishes < RtsFlags.ParFlags.maxFishes ) { // &&
778 // (last_fish_arrived_at+FISH_DELAY < CURRENT_TIME)) {
779 /* fishing set in sendFish, processFish;
780 avoid flooding system with fishes via delay */
782 sendFish(pe, mytid, NEW_FISH_AGE, NEW_FISH_HISTORY,
790 } else if (PacketsWaiting()) { /* Look for incoming messages */
794 /* Now we are sure that we have some work available */
795 ASSERT(run_queue_hd != END_TSO_QUEUE);
796 /* Take a thread from the run queue, if we have work */
797 t = POP_RUN_QUEUE(); // take_off_run_queue(END_TSO_QUEUE);
799 /* ToDo: write something to the log-file
800 if (RTSflags.ParFlags.granSimStats && !sameThread)
801 DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
805 /* the spark pool for the current PE */
806 pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
808 IF_DEBUG(scheduler, belch("--^^ %d sparks on [%#x] (hd=%x; tl=%x; base=%x, lim=%x)",
809 spark_queue_len(pool),
811 pool->hd, pool->tl, pool->base, pool->lim));
813 IF_DEBUG(scheduler, belch("--== %d threads on [%#x] (hd=%x; tl=%x)",
814 run_queue_len(), CURRENT_PROC,
815 run_queue_hd, run_queue_tl));
820 we are running a different TSO, so write a schedule event to log file
821 NB: If we use fair scheduling we also have to write a deschedule
822 event for LastTSO; with unfair scheduling we know that the
823 previous tso has blocked whenever we switch to another tso, so
824 we don't need it in GUM for now
826 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
827 GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
831 #else /* !GRAN && !PAR */
833 /* grab a thread from the run queue
835 ASSERT(run_queue_hd != END_TSO_QUEUE);
837 IF_DEBUG(sanity,checkTSO(t));
844 cap = free_capabilities;
845 free_capabilities = cap->link;
846 n_free_capabilities--;
851 cap->rCurrentTSO = t;
853 /* context switches are now initiated by the timer signal, unless
854 * the user specified "context switch as often as possible", with
857 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
858 && (run_queue_hd != END_TSO_QUEUE
859 || blocked_queue_hd != END_TSO_QUEUE))
864 RELEASE_LOCK(&sched_mutex);
866 IF_DEBUG(scheduler, sched_belch("-->> Running TSO %ld (%p) %s ...",
867 t->id, t, whatNext_strs[t->what_next]));
869 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
870 /* Run the current thread
872 switch (cap->rCurrentTSO->what_next) {
875 /* Thread already finished, return to scheduler. */
876 ret = ThreadFinished;
879 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
882 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
884 case ThreadEnterHugs:
888 IF_DEBUG(scheduler,sched_belch("entering Hugs"));
889 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
890 cap->rCurrentTSO->sp += 1;
895 barf("Panic: entered a BCO but no bytecode interpreter in this build");
898 barf("schedule: invalid what_next field");
900 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
902 /* Costs for the scheduler are assigned to CCS_SYSTEM */
907 ACQUIRE_LOCK(&sched_mutex);
910 IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", pthread_self()););
911 #elif !defined(GRAN) && !defined(PAR)
912 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: "););
914 t = cap->rCurrentTSO;
917 /* HACK 675: if the last thread didn't yield, make sure to print a
918 SCHEDULE event to the log file when StgRunning the next thread, even
919 if it is the same one as before */
920 LastTSO = t; //(ret == ThreadBlocked) ? END_TSO_QUEUE : t;
921 TimeOfLastYield = CURRENT_TIME;
926 /* make all the running tasks block on a condition variable,
927 * maybe set context_switch and wait till they all pile in,
928 * then have them wait on a GC condition variable.
930 IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped: HeapOverflow",
931 t->id, t, whatNext_strs[t->what_next]));
934 ASSERT(!is_on_queue(t,CurrentProc));
937 ready_to_gc = rtsTrue;
938 context_switch = 1; /* stop other threads ASAP */
939 PUSH_ON_RUN_QUEUE(t);
940 /* actual GC is done at the end of the while loop */
944 IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped, StackOverflow",
945 t->id, t, whatNext_strs[t->what_next]));
946 /* just adjust the stack for this thread, then pop it back
952 /* enlarge the stack */
953 StgTSO *new_t = threadStackOverflow(t);
955 /* This TSO has moved, so update any pointers to it from the
956 * main thread stack. It better not be on any other queues...
959 for (m = main_threads; m != NULL; m = m->link) {
965 PUSH_ON_RUN_QUEUE(new_t);
972 DumpGranEvent(GR_DESCHEDULE, t));
973 globalGranStats.tot_yields++;
976 DumpGranEvent(GR_DESCHEDULE, t));
978 /* put the thread back on the run queue. Then, if we're ready to
979 * GC, check whether this is the last task to stop. If so, wake
980 * up the GC thread. getThread will block during a GC until the
984 if (t->what_next == ThreadEnterHugs) {
985 /* ToDo: or maybe a timer expired when we were in Hugs?
986 * or maybe someone hit ctrl-C
988 belch("--<< thread %ld (%p; %s) stopped to switch to Hugs",
989 t->id, t, whatNext_strs[t->what_next]);
991 belch("--<< thread %ld (%p; %s) stopped, yielding",
992 t->id, t, whatNext_strs[t->what_next]);
999 //belch("&& Doing sanity check on yielding TSO %ld.", t->id);
1001 ASSERT(t->link == END_TSO_QUEUE);
1003 ASSERT(!is_on_queue(t,CurrentProc));
1006 //belch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
1007 checkThreadQsSanity(rtsTrue));
1009 APPEND_TO_RUN_QUEUE(t);
1011 /* add a ContinueThread event to actually process the thread */
1012 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1014 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1016 belch("GRAN: eventq and runnableq after adding yielded thread to queue again:");
1025 belch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
1026 t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1027 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1029 // ??? needed; should emit block before
1031 DumpGranEvent(GR_DESCHEDULE, t));
1032 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1035 ASSERT(procStatus[CurrentProc]==Busy ||
1036 ((procStatus[CurrentProc]==Fetching) &&
1037 (t->block_info.closure!=(StgClosure*)NULL)));
1038 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1039 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1040 procStatus[CurrentProc]==Fetching))
1041 procStatus[CurrentProc] = Idle;
1045 DumpGranEvent(GR_DESCHEDULE, t));
1047 /* Send a fetch (if BlockedOnGA) and dump event to log file */
1051 belch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: ",
1052 t->id, t, whatNext_strs[t->what_next], t->block_info.closure);
1053 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1056 /* don't need to do anything. Either the thread is blocked on
1057 * I/O, in which case we'll have called addToBlockedQueue
1058 * previously, or it's blocked on an MVar or Blackhole, in which
1059 * case it'll be on the relevant queue already.
1062 fprintf(stderr, "--<< thread %d (%p) stopped: ", t->id, t);
1063 printThreadBlockage(t);
1064 fprintf(stderr, "\n"));
1066 /* Only for dumping event to log file
1067 ToDo: do I need this in GranSim, too?
1074 case ThreadFinished:
1075 /* Need to check whether this was a main thread, and if so, signal
1076 * the task that started it with the return value. If we have no
1077 * more main threads, we probably need to stop all the tasks until
1080 /* We also end up here if the thread kills itself with an
1081 * uncaught exception, see Exception.hc.
1083 IF_DEBUG(scheduler,belch("--++ thread %d (%p) finished", t->id, t));
1085 endThread(t, CurrentProc); // clean-up the thread
1087 advisory_thread_count--;
1088 if (RtsFlags.ParFlags.ParStats.Full)
1089 DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
1094 barf("schedule: invalid thread return code %d", (int)ret);
1098 cap->link = free_capabilities;
1099 free_capabilities = cap;
1100 n_free_capabilities++;
1104 if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes)
1109 /* everybody back, start the GC.
1110 * Could do it in this thread, or signal a condition var
1111 * to do it in another thread. Either way, we need to
1112 * broadcast on gc_pending_cond afterward.
1115 IF_DEBUG(scheduler,sched_belch("doing GC"));
1117 GarbageCollect(GetRoots,rtsFalse);
1118 ready_to_gc = rtsFalse;
1120 pthread_cond_broadcast(&gc_pending_cond);
1123 /* add a ContinueThread event to continue execution of current thread */
1124 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1126 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1128 fprintf(stderr, "GRAN: eventq and runnableq after Garbage collection:\n");
1135 IF_GRAN_DEBUG(unused,
1136 print_eventq(EventHd));
1138 event = get_next_event();
1142 /* ToDo: wait for next message to arrive rather than busy wait */
1147 t = take_off_run_queue(END_TSO_QUEUE);
1150 } /* end of while(1) */
1153 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
1154 void deleteAllThreads ( void )
1157 IF_DEBUG(scheduler,sched_belch("deleteAllThreads()"));
1158 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
1161 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
1164 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
1165 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
1168 /* startThread and insertThread are now in GranSim.c -- HWL */
1170 //@node Suspend and Resume, Run queue code, Main scheduling loop, Main scheduling code
1171 //@subsection Suspend and Resume
1173 /* ---------------------------------------------------------------------------
1174 * Suspending & resuming Haskell threads.
1176 * When making a "safe" call to C (aka _ccall_GC), the task gives back
1177 * its capability before calling the C function. This allows another
1178 * task to pick up the capability and carry on running Haskell
1179 * threads. It also means that if the C call blocks, it won't lock
1182 * The Haskell thread making the C call is put to sleep for the
1183 * duration of the call, on the susepended_ccalling_threads queue. We
1184 * give out a token to the task, which it can use to resume the thread
1185 * on return from the C function.
1186 * ------------------------------------------------------------------------- */
1189 suspendThread( Capability *cap )
1193 ACQUIRE_LOCK(&sched_mutex);
1196 sched_belch("thread %d did a _ccall_gc", cap->rCurrentTSO->id));
1198 threadPaused(cap->rCurrentTSO);
1199 cap->rCurrentTSO->link = suspended_ccalling_threads;
1200 suspended_ccalling_threads = cap->rCurrentTSO;
1202 /* Use the thread ID as the token; it should be unique */
1203 tok = cap->rCurrentTSO->id;
1206 cap->link = free_capabilities;
1207 free_capabilities = cap;
1208 n_free_capabilities++;
1211 RELEASE_LOCK(&sched_mutex);
1216 resumeThread( StgInt tok )
1218 StgTSO *tso, **prev;
1221 ACQUIRE_LOCK(&sched_mutex);
1223 prev = &suspended_ccalling_threads;
1224 for (tso = suspended_ccalling_threads;
1225 tso != END_TSO_QUEUE;
1226 prev = &tso->link, tso = tso->link) {
1227 if (tso->id == (StgThreadID)tok) {
1232 if (tso == END_TSO_QUEUE) {
1233 barf("resumeThread: thread not found");
1237 while (free_capabilities == NULL) {
1238 IF_DEBUG(scheduler, sched_belch("waiting to resume"));
1239 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
1240 IF_DEBUG(scheduler, sched_belch("resuming thread %d", tso->id));
1242 cap = free_capabilities;
1243 free_capabilities = cap->link;
1244 n_free_capabilities--;
1246 cap = &MainRegTable;
1249 cap->rCurrentTSO = tso;
1251 RELEASE_LOCK(&sched_mutex);
1256 /* ---------------------------------------------------------------------------
1258 * ------------------------------------------------------------------------ */
1259 static void unblockThread(StgTSO *tso);
1261 /* ---------------------------------------------------------------------------
1262 * Comparing Thread ids.
1264 * This is used from STG land in the implementation of the
1265 * instances of Eq/Ord for ThreadIds.
1266 * ------------------------------------------------------------------------ */
1268 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
1270 StgThreadID id1 = tso1->id;
1271 StgThreadID id2 = tso2->id;
1273 if (id1 < id2) return (-1);
1274 if (id1 > id2) return 1;
1278 /* ---------------------------------------------------------------------------
1279 Create a new thread.
1281 The new thread starts with the given stack size. Before the
1282 scheduler can run, however, this thread needs to have a closure
1283 (and possibly some arguments) pushed on its stack. See
1284 pushClosure() in Schedule.h.
1286 createGenThread() and createIOThread() (in SchedAPI.h) are
1287 convenient packaged versions of this function.
1289 currently pri (priority) is only used in a GRAN setup -- HWL
1290 ------------------------------------------------------------------------ */
1291 //@cindex createThread
1293 /* currently pri (priority) is only used in a GRAN setup -- HWL */
1295 createThread(nat stack_size, StgInt pri)
1297 return createThread_(stack_size, rtsFalse, pri);
1301 createThread_(nat size, rtsBool have_lock, StgInt pri)
1305 createThread(nat stack_size)
1307 return createThread_(stack_size, rtsFalse);
1311 createThread_(nat size, rtsBool have_lock)
1318 /* First check whether we should create a thread at all */
1320 /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
1321 if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
1323 belch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
1324 RtsFlags.ParFlags.maxThreads, advisory_thread_count);
1325 return END_TSO_QUEUE;
1331 ASSERT(!RtsFlags.GranFlags.Light || CurrentProc==0);
1334 // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW
1336 /* catch ridiculously small stack sizes */
1337 if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
1338 size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
1341 stack_size = size - TSO_STRUCT_SIZEW;
1343 tso = (StgTSO *)allocate(size);
1344 TICK_ALLOC_TSO(size-TSO_STRUCT_SIZEW, 0);
1346 SET_HDR(tso, &TSO_info, CCS_SYSTEM);
1348 SET_GRAN_HDR(tso, ThisPE);
1350 tso->what_next = ThreadEnterGHC;
1352 /* tso->id needs to be unique. For now we use a heavyweight mutex to
1353 * protect the increment operation on next_thread_id.
1354 * In future, we could use an atomic increment instead.
1356 if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
1357 tso->id = next_thread_id++;
1358 if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
1360 tso->why_blocked = NotBlocked;
1361 tso->blocked_exceptions = NULL;
1363 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
1364 tso->stack_size = stack_size;
1365 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
1367 tso->sp = (P_)&(tso->stack) + stack_size;
1370 tso->prof.CCCS = CCS_MAIN;
1373 /* put a stop frame on the stack */
1374 tso->sp -= sizeofW(StgStopFrame);
1375 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
1376 tso->su = (StgUpdateFrame*)tso->sp;
1380 tso->link = END_TSO_QUEUE;
1381 /* uses more flexible routine in GranSim */
1382 insertThread(tso, CurrentProc);
1384 /* In a non-GranSim setup the pushing of a TSO onto the runq is separated
1389 #if defined(GRAN) || defined(PAR)
1390 DumpGranEvent(GR_START,tso);
1393 /* Link the new thread on the global thread list.
1395 tso->global_link = all_threads;
1399 tso->gran.pri = pri;
1401 tso->gran.magic = TSO_MAGIC; // debugging only
1403 tso->gran.sparkname = 0;
1404 tso->gran.startedat = CURRENT_TIME;
1405 tso->gran.exported = 0;
1406 tso->gran.basicblocks = 0;
1407 tso->gran.allocs = 0;
1408 tso->gran.exectime = 0;
1409 tso->gran.fetchtime = 0;
1410 tso->gran.fetchcount = 0;
1411 tso->gran.blocktime = 0;
1412 tso->gran.blockcount = 0;
1413 tso->gran.blockedat = 0;
1414 tso->gran.globalsparks = 0;
1415 tso->gran.localsparks = 0;
1416 if (RtsFlags.GranFlags.Light)
1417 tso->gran.clock = Now; /* local clock */
1419 tso->gran.clock = 0;
1421 IF_DEBUG(gran,printTSO(tso));
1424 tso->par.magic = TSO_MAGIC; // debugging only
1426 tso->par.sparkname = 0;
1427 tso->par.startedat = CURRENT_TIME;
1428 tso->par.exported = 0;
1429 tso->par.basicblocks = 0;
1430 tso->par.allocs = 0;
1431 tso->par.exectime = 0;
1432 tso->par.fetchtime = 0;
1433 tso->par.fetchcount = 0;
1434 tso->par.blocktime = 0;
1435 tso->par.blockcount = 0;
1436 tso->par.blockedat = 0;
1437 tso->par.globalsparks = 0;
1438 tso->par.localsparks = 0;
1442 globalGranStats.tot_threads_created++;
1443 globalGranStats.threads_created_on_PE[CurrentProc]++;
1444 globalGranStats.tot_sq_len += spark_queue_len(CurrentProc);
1445 globalGranStats.tot_sq_probes++;
1450 belch("==__ schedule: Created TSO %d (%p);",
1451 CurrentProc, tso, tso->id));
1453 IF_PAR_DEBUG(verbose,
1454 belch("==__ schedule: Created TSO %d (%p); %d threads active",
1455 tso->id, tso, advisory_thread_count));
1457 IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
1458 tso->id, tso->stack_size));
1464 Turn a spark into a thread.
1465 ToDo: fix for SMP (needs to acquire SCHED_MUTEX!)
1468 //@cindex activateSpark
1470 activateSpark (rtsSpark spark)
1474 ASSERT(spark != (rtsSpark)NULL);
1475 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
1476 if (tso!=END_TSO_QUEUE) {
1477 pushClosure(tso,spark);
1478 PUSH_ON_RUN_QUEUE(tso);
1479 advisory_thread_count++;
1481 if (RtsFlags.ParFlags.ParStats.Full) {
1482 //ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ...
1483 IF_PAR_DEBUG(verbose,
1484 belch("==^^ activateSpark: turning spark of closure %p (%s) into a thread",
1485 (StgClosure *)spark, info_type((StgClosure *)spark)));
1488 barf("activateSpark: Cannot create TSO");
1490 // ToDo: fwd info on local/global spark to thread -- HWL
1491 // tso->gran.exported = spark->exported;
1492 // tso->gran.locked = !spark->global;
1493 // tso->gran.sparkname = spark->name;
1499 /* ---------------------------------------------------------------------------
1502 * scheduleThread puts a thread on the head of the runnable queue.
1503 * This will usually be done immediately after a thread is created.
1504 * The caller of scheduleThread must create the thread using e.g.
1505 * createThread and push an appropriate closure
1506 * on this thread's stack before the scheduler is invoked.
1507 * ------------------------------------------------------------------------ */
1510 scheduleThread(StgTSO *tso)
1512 if (tso==END_TSO_QUEUE){
1517 ACQUIRE_LOCK(&sched_mutex);
1519 /* Put the new thread on the head of the runnable queue. The caller
1520 * better push an appropriate closure on this thread's stack
1521 * beforehand. In the SMP case, the thread may start running as
1522 * soon as we release the scheduler lock below.
1524 PUSH_ON_RUN_QUEUE(tso);
1528 IF_DEBUG(scheduler,printTSO(tso));
1530 RELEASE_LOCK(&sched_mutex);
1533 /* ---------------------------------------------------------------------------
1536 * Start up Posix threads to run each of the scheduler tasks.
1537 * I believe the task ids are not needed in the system as defined.
1539 * ------------------------------------------------------------------------ */
1541 #if defined(PAR) || defined(SMP)
1543 taskStart( void *arg STG_UNUSED )
1545 rts_evalNothing(NULL);
1549 /* ---------------------------------------------------------------------------
1552 * Initialise the scheduler. This resets all the queues - if the
1553 * queues contained any threads, they'll be garbage collected at the
1556 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
1557 * ------------------------------------------------------------------------ */
1561 term_handler(int sig STG_UNUSED)
1564 ACQUIRE_LOCK(&term_mutex);
1566 RELEASE_LOCK(&term_mutex);
1571 //@cindex initScheduler
1578 for (i=0; i<=MAX_PROC; i++) {
1579 run_queue_hds[i] = END_TSO_QUEUE;
1580 run_queue_tls[i] = END_TSO_QUEUE;
1581 blocked_queue_hds[i] = END_TSO_QUEUE;
1582 blocked_queue_tls[i] = END_TSO_QUEUE;
1583 ccalling_threadss[i] = END_TSO_QUEUE;
1586 run_queue_hd = END_TSO_QUEUE;
1587 run_queue_tl = END_TSO_QUEUE;
1588 blocked_queue_hd = END_TSO_QUEUE;
1589 blocked_queue_tl = END_TSO_QUEUE;
1592 suspended_ccalling_threads = END_TSO_QUEUE;
1594 main_threads = NULL;
1595 all_threads = END_TSO_QUEUE;
1600 RtsFlags.ConcFlags.ctxtSwitchTicks =
1601 RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS;
1604 ecafList = END_ECAF_LIST;
1608 /* Install the SIGHUP handler */
1611 struct sigaction action,oact;
1613 action.sa_handler = term_handler;
1614 sigemptyset(&action.sa_mask);
1615 action.sa_flags = 0;
1616 if (sigaction(SIGTERM, &action, &oact) != 0) {
1617 barf("can't install TERM handler");
1623 /* Allocate N Capabilities */
1626 Capability *cap, *prev;
1629 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1630 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
1634 free_capabilities = cap;
1635 n_free_capabilities = RtsFlags.ParFlags.nNodes;
1637 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Allocated %d capabilities\n",
1638 n_free_capabilities););
1641 #if defined(SMP) || defined(PAR)
1654 /* make some space for saving all the thread ids */
1655 task_ids = stgMallocBytes(RtsFlags.ParFlags.nNodes * sizeof(task_info),
1656 "initScheduler:task_ids");
1658 /* and create all the threads */
1659 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1660 r = pthread_create(&tid,NULL,taskStart,NULL);
1662 barf("startTasks: Can't create new Posix thread");
1664 task_ids[i].id = tid;
1665 task_ids[i].mut_time = 0.0;
1666 task_ids[i].mut_etime = 0.0;
1667 task_ids[i].gc_time = 0.0;
1668 task_ids[i].gc_etime = 0.0;
1669 task_ids[i].elapsedtimestart = elapsedtime();
1670 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Started task: %ld\n",tid););
1676 exitScheduler( void )
1681 /* Don't want to use pthread_cancel, since we'd have to install
1682 * these silly exception handlers (pthread_cleanup_{push,pop}) around
1686 /* Cancel all our tasks */
1687 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1688 pthread_cancel(task_ids[i].id);
1691 /* Wait for all the tasks to terminate */
1692 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1693 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: waiting for task %ld\n",
1695 pthread_join(task_ids[i].id, NULL);
1699 /* Send 'em all a SIGHUP. That should shut 'em up.
1701 await_death = RtsFlags.ParFlags.nNodes;
1702 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1703 pthread_kill(task_ids[i].id,SIGTERM);
1705 while (await_death > 0) {
1711 /* -----------------------------------------------------------------------------
1712 Managing the per-task allocation areas.
1714 Each capability comes with an allocation area. These are
1715 fixed-length block lists into which allocation can be done.
1717 ToDo: no support for two-space collection at the moment???
1718 -------------------------------------------------------------------------- */
1720 /* -----------------------------------------------------------------------------
1721 * waitThread is the external interface for running a new computation
1722 * and waiting for the result.
1724 * In the non-SMP case, we create a new main thread, push it on the
1725 * main-thread stack, and invoke the scheduler to run it. The
1726 * scheduler will return when the top main thread on the stack has
1727 * completed or died, and fill in the necessary fields of the
1728 * main_thread structure.
1730 * In the SMP case, we create a main thread as before, but we then
1731 * create a new condition variable and sleep on it. When our new
1732 * main thread has completed, we'll be woken up and the status/result
1733 * will be in the main_thread struct.
1734 * -------------------------------------------------------------------------- */
1737 howManyThreadsAvail ( void )
1741 for (q = run_queue_hd; q != END_TSO_QUEUE; q = q->link)
1743 for (q = blocked_queue_hd; q != END_TSO_QUEUE; q = q->link)
1749 finishAllThreads ( void )
1752 while (run_queue_hd != END_TSO_QUEUE) {
1753 waitThread ( run_queue_hd, NULL );
1755 while (blocked_queue_hd != END_TSO_QUEUE) {
1756 waitThread ( blocked_queue_hd, NULL );
1759 (blocked_queue_hd != END_TSO_QUEUE ||
1760 run_queue_hd != END_TSO_QUEUE);
1764 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
1767 SchedulerStatus stat;
1769 ACQUIRE_LOCK(&sched_mutex);
1771 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
1777 pthread_cond_init(&m->wakeup, NULL);
1780 m->link = main_threads;
1783 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: new main thread (%d)\n",
1788 pthread_cond_wait(&m->wakeup, &sched_mutex);
1789 } while (m->stat == NoStatus);
1791 /* GranSim specific init */
1792 CurrentTSO = m->tso; // the TSO to run
1793 procStatus[MainProc] = Busy; // status of main PE
1794 CurrentProc = MainProc; // PE to run it on
1799 ASSERT(m->stat != NoStatus);
1805 pthread_cond_destroy(&m->wakeup);
1808 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: main thread (%d) finished\n",
1812 RELEASE_LOCK(&sched_mutex);
1817 //@node Run queue code, Garbage Collextion Routines, Suspend and Resume, Main scheduling code
1818 //@subsection Run queue code
1822 NB: In GranSim we have many run queues; run_queue_hd is actually a macro
1823 unfolding to run_queue_hds[CurrentProc], thus CurrentProc is an
1824 implicit global variable that has to be correct when calling these
1828 /* Put the new thread on the head of the runnable queue.
1829 * The caller of createThread better push an appropriate closure
1830 * on this thread's stack before the scheduler is invoked.
1832 static /* inline */ void
1833 add_to_run_queue(tso)
1836 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1837 tso->link = run_queue_hd;
1839 if (run_queue_tl == END_TSO_QUEUE) {
1844 /* Put the new thread at the end of the runnable queue. */
1845 static /* inline */ void
1846 push_on_run_queue(tso)
1849 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
1850 ASSERT(run_queue_hd!=NULL && run_queue_tl!=NULL);
1851 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1852 if (run_queue_hd == END_TSO_QUEUE) {
1855 run_queue_tl->link = tso;
1861 Should be inlined because it's used very often in schedule. The tso
1862 argument is actually only needed in GranSim, where we want to have the
1863 possibility to schedule *any* TSO on the run queue, irrespective of the
1864 actual ordering. Therefore, if tso is not the nil TSO then we traverse
1865 the run queue and dequeue the tso, adjusting the links in the queue.
1867 //@cindex take_off_run_queue
1868 static /* inline */ StgTSO*
1869 take_off_run_queue(StgTSO *tso) {
1873 qetlaHbogh Qu' ngaSbogh ghomDaQ {tso} yIteq!
1875 if tso is specified, unlink that tso from the run_queue (doesn't have
1876 to be at the beginning of the queue); GranSim only
1878 if (tso!=END_TSO_QUEUE) {
1879 /* find tso in queue */
1880 for (t=run_queue_hd, prev=END_TSO_QUEUE;
1881 t!=END_TSO_QUEUE && t!=tso;
1885 /* now actually dequeue the tso */
1886 if (prev!=END_TSO_QUEUE) {
1887 ASSERT(run_queue_hd!=t);
1888 prev->link = t->link;
1890 /* t is at beginning of thread queue */
1891 ASSERT(run_queue_hd==t);
1892 run_queue_hd = t->link;
1894 /* t is at end of thread queue */
1895 if (t->link==END_TSO_QUEUE) {
1896 ASSERT(t==run_queue_tl);
1897 run_queue_tl = prev;
1899 ASSERT(run_queue_tl!=t);
1901 t->link = END_TSO_QUEUE;
1903 /* take tso from the beginning of the queue; std concurrent code */
1905 if (t != END_TSO_QUEUE) {
1906 run_queue_hd = t->link;
1907 t->link = END_TSO_QUEUE;
1908 if (run_queue_hd == END_TSO_QUEUE) {
1909 run_queue_tl = END_TSO_QUEUE;
1918 //@node Garbage Collextion Routines, Blocking Queue Routines, Run queue code, Main scheduling code
1919 //@subsection Garbage Collextion Routines
1921 /* ---------------------------------------------------------------------------
1922 Where are the roots that we know about?
1924 - all the threads on the runnable queue
1925 - all the threads on the blocked queue
1926 - all the thread currently executing a _ccall_GC
1927 - all the "main threads"
1929 ------------------------------------------------------------------------ */
1931 /* This has to be protected either by the scheduler monitor, or by the
1932 garbage collection monitor (probably the latter).
1936 static void GetRoots(void)
1943 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
1944 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
1945 run_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_hds[i]);
1946 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
1947 run_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_tls[i]);
1949 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
1950 blocked_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hds[i]);
1951 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
1952 blocked_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tls[i]);
1953 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
1954 ccalling_threadss[i] = (StgTSO *)MarkRoot((StgClosure *)ccalling_threadss[i]);
1961 if (run_queue_hd != END_TSO_QUEUE) {
1962 ASSERT(run_queue_tl != END_TSO_QUEUE);
1963 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1964 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1967 if (blocked_queue_hd != END_TSO_QUEUE) {
1968 ASSERT(blocked_queue_tl != END_TSO_QUEUE);
1969 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1970 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1974 for (m = main_threads; m != NULL; m = m->link) {
1975 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1977 if (suspended_ccalling_threads != END_TSO_QUEUE)
1978 suspended_ccalling_threads =
1979 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1981 #if defined(SMP) || defined(PAR) || defined(GRAN)
1986 /* -----------------------------------------------------------------------------
1989 This is the interface to the garbage collector from Haskell land.
1990 We provide this so that external C code can allocate and garbage
1991 collect when called from Haskell via _ccall_GC.
1993 It might be useful to provide an interface whereby the programmer
1994 can specify more roots (ToDo).
1996 This needs to be protected by the GC condition variable above. KH.
1997 -------------------------------------------------------------------------- */
1999 void (*extra_roots)(void);
2004 GarbageCollect(GetRoots,rtsFalse);
2008 performMajorGC(void)
2010 GarbageCollect(GetRoots,rtsTrue);
2016 GetRoots(); /* the scheduler's roots */
2017 extra_roots(); /* the user's roots */
2021 performGCWithRoots(void (*get_roots)(void))
2023 extra_roots = get_roots;
2025 GarbageCollect(AllRoots,rtsFalse);
2028 /* -----------------------------------------------------------------------------
2031 If the thread has reached its maximum stack size, then raise the
2032 StackOverflow exception in the offending thread. Otherwise
2033 relocate the TSO into a larger chunk of memory and adjust its stack
2035 -------------------------------------------------------------------------- */
2038 threadStackOverflow(StgTSO *tso)
2040 nat new_stack_size, new_tso_size, diff, stack_words;
2044 IF_DEBUG(sanity,checkTSO(tso));
2045 if (tso->stack_size >= tso->max_stack_size) {
2048 belch("@@ threadStackOverflow of TSO %d (%p): stack too large (now %ld; max is %ld",
2049 tso->id, tso, tso->stack_size, tso->max_stack_size);
2050 /* If we're debugging, just print out the top of the stack */
2051 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2055 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
2058 /* Send this thread the StackOverflow exception */
2059 raiseAsync(tso, (StgClosure *)stackOverflow_closure);
2064 /* Try to double the current stack size. If that takes us over the
2065 * maximum stack size for this thread, then use the maximum instead.
2066 * Finally round up so the TSO ends up as a whole number of blocks.
2068 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2069 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2070 TSO_STRUCT_SIZE)/sizeof(W_);
2071 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2072 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2074 IF_DEBUG(scheduler, fprintf(stderr,"scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
2076 dest = (StgTSO *)allocate(new_tso_size);
2077 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
2079 /* copy the TSO block and the old stack into the new area */
2080 memcpy(dest,tso,TSO_STRUCT_SIZE);
2081 stack_words = tso->stack + tso->stack_size - tso->sp;
2082 new_sp = (P_)dest + new_tso_size - stack_words;
2083 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2085 /* relocate the stack pointers... */
2086 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
2087 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
2089 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
2090 dest->stack_size = new_stack_size;
2092 /* and relocate the update frame list */
2093 relocate_TSO(tso, dest);
2095 /* Mark the old TSO as relocated. We have to check for relocated
2096 * TSOs in the garbage collector and any primops that deal with TSOs.
2098 * It's important to set the sp and su values to just beyond the end
2099 * of the stack, so we don't attempt to scavenge any part of the
2102 tso->what_next = ThreadRelocated;
2104 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2105 tso->su = (StgUpdateFrame *)tso->sp;
2106 tso->why_blocked = NotBlocked;
2107 dest->mut_link = NULL;
2109 IF_PAR_DEBUG(verbose,
2110 belch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld",
2111 tso->id, tso, tso->stack_size);
2112 /* If we're debugging, just print out the top of the stack */
2113 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2116 IF_DEBUG(sanity,checkTSO(tso));
2118 IF_DEBUG(scheduler,printTSO(dest));
2124 //@node Blocking Queue Routines, Exception Handling Routines, Garbage Collextion Routines, Main scheduling code
2125 //@subsection Blocking Queue Routines
2127 /* ---------------------------------------------------------------------------
2128 Wake up a queue that was blocked on some resource.
2129 ------------------------------------------------------------------------ */
2131 /* ToDo: check push_on_run_queue vs. PUSH_ON_RUN_QUEUE */
2135 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2140 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2142 /* write RESUME events to log file and
2143 update blocked and fetch time (depending on type of the orig closure) */
2144 if (RtsFlags.ParFlags.ParStats.Full) {
2145 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
2146 GR_RESUME, ((StgTSO *)bqe), ((StgTSO *)bqe)->block_info.closure,
2147 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
2149 switch (get_itbl(node)->type) {
2151 ((StgTSO *)bqe)->par.fetchtime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2156 ((StgTSO *)bqe)->par.blocktime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2159 barf("{unblockOneLocked}Daq Qagh: unexpected closure in blocking queue");
2166 static StgBlockingQueueElement *
2167 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
2170 PEs node_loc, tso_loc;
2172 node_loc = where_is(node); // should be lifted out of loop
2173 tso = (StgTSO *)bqe; // wastes an assignment to get the type right
2174 tso_loc = where_is((StgClosure *)tso);
2175 if (IS_LOCAL_TO(PROCS(node),tso_loc)) { // TSO is local
2176 /* !fake_fetch => TSO is on CurrentProc is same as IS_LOCAL_TO */
2177 ASSERT(CurrentProc!=node_loc || tso_loc==CurrentProc);
2178 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.lunblocktime;
2179 // insertThread(tso, node_loc);
2180 new_event(tso_loc, tso_loc, CurrentTime[CurrentProc],
2182 tso, node, (rtsSpark*)NULL);
2183 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2186 } else { // TSO is remote (actually should be FMBQ)
2187 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.mpacktime +
2188 RtsFlags.GranFlags.Costs.gunblocktime +
2189 RtsFlags.GranFlags.Costs.latency;
2190 new_event(tso_loc, CurrentProc, CurrentTime[CurrentProc],
2192 tso, node, (rtsSpark*)NULL);
2193 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2196 /* the thread-queue-overhead is accounted for in either Resume or UnblockThread */
2198 fprintf(stderr," %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,",
2199 (node_loc==tso_loc ? "Local" : "Global"),
2200 tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
2201 tso->block_info.closure = NULL;
2202 IF_DEBUG(scheduler,belch("-- Waking up thread %ld (%p)",
2206 static StgBlockingQueueElement *
2207 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
2209 StgBlockingQueueElement *next;
2211 switch (get_itbl(bqe)->type) {
2213 ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked);
2214 /* if it's a TSO just push it onto the run_queue */
2216 // ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
2217 PUSH_ON_RUN_QUEUE((StgTSO *)bqe);
2219 unblockCount(bqe, node);
2220 /* reset blocking status after dumping event */
2221 ((StgTSO *)bqe)->why_blocked = NotBlocked;
2225 /* if it's a BLOCKED_FETCH put it on the PendingFetches list */
2227 bqe->link = PendingFetches;
2228 PendingFetches = bqe;
2232 /* can ignore this case in a non-debugging setup;
2233 see comments on RBHSave closures above */
2235 /* check that the closure is an RBHSave closure */
2236 ASSERT(get_itbl((StgClosure *)bqe) == &RBH_Save_0_info ||
2237 get_itbl((StgClosure *)bqe) == &RBH_Save_1_info ||
2238 get_itbl((StgClosure *)bqe) == &RBH_Save_2_info);
2242 barf("{unblockOneLocked}Daq Qagh: Unexpected IP (%#lx; %s) in blocking queue at %#lx\n",
2243 get_itbl((StgClosure *)bqe), info_type((StgClosure *)bqe),
2247 // IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
2251 #else /* !GRAN && !PAR */
2253 unblockOneLocked(StgTSO *tso)
2257 ASSERT(get_itbl(tso)->type == TSO);
2258 ASSERT(tso->why_blocked != NotBlocked);
2259 tso->why_blocked = NotBlocked;
2261 PUSH_ON_RUN_QUEUE(tso);
2263 IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
2268 #if defined(GRAN) || defined(PAR)
2269 inline StgBlockingQueueElement *
2270 unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
2272 ACQUIRE_LOCK(&sched_mutex);
2273 bqe = unblockOneLocked(bqe, node);
2274 RELEASE_LOCK(&sched_mutex);
2279 unblockOne(StgTSO *tso)
2281 ACQUIRE_LOCK(&sched_mutex);
2282 tso = unblockOneLocked(tso);
2283 RELEASE_LOCK(&sched_mutex);
2290 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
2292 StgBlockingQueueElement *bqe;
2297 belch("## AwBQ for node %p on PE %d @ %ld by TSO %d (%p): ", \
2298 node, CurrentProc, CurrentTime[CurrentProc],
2299 CurrentTSO->id, CurrentTSO));
2301 node_loc = where_is(node);
2303 ASSERT(get_itbl(q)->type == TSO || // q is either a TSO or an RBHSave
2304 get_itbl(q)->type == CONSTR); // closure (type constructor)
2305 ASSERT(is_unique(node));
2307 /* FAKE FETCH: magically copy the node to the tso's proc;
2308 no Fetch necessary because in reality the node should not have been
2309 moved to the other PE in the first place
2311 if (CurrentProc!=node_loc) {
2313 belch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)",
2314 node, node_loc, CurrentProc, CurrentTSO->id,
2315 // CurrentTSO, where_is(CurrentTSO),
2316 node->header.gran.procs));
2317 node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc);
2319 belch("## new bitmask of node %p is %#x",
2320 node, node->header.gran.procs));
2321 if (RtsFlags.GranFlags.GranSimStats.Global) {
2322 globalGranStats.tot_fake_fetches++;
2327 // ToDo: check: ASSERT(CurrentProc==node_loc);
2328 while (get_itbl(bqe)->type==TSO) { // q != END_TSO_QUEUE) {
2331 bqe points to the current element in the queue
2332 next points to the next element in the queue
2334 //tso = (StgTSO *)bqe; // wastes an assignment to get the type right
2335 //tso_loc = where_is(tso);
2337 bqe = unblockOneLocked(bqe, node);
2340 /* if this is the BQ of an RBH, we have to put back the info ripped out of
2341 the closure to make room for the anchor of the BQ */
2342 if (bqe!=END_BQ_QUEUE) {
2343 ASSERT(get_itbl(node)->type == RBH && get_itbl(bqe)->type == CONSTR);
2345 ASSERT((info_ptr==&RBH_Save_0_info) ||
2346 (info_ptr==&RBH_Save_1_info) ||
2347 (info_ptr==&RBH_Save_2_info));
2349 /* cf. convertToRBH in RBH.c for writing the RBHSave closure */
2350 ((StgRBH *)node)->blocking_queue = (StgBlockingQueueElement *)((StgRBHSave *)bqe)->payload[0];
2351 ((StgRBH *)node)->mut_link = (StgMutClosure *)((StgRBHSave *)bqe)->payload[1];
2354 belch("## Filled in RBH_Save for %p (%s) at end of AwBQ",
2355 node, info_type(node)));
2358 /* statistics gathering */
2359 if (RtsFlags.GranFlags.GranSimStats.Global) {
2360 // globalGranStats.tot_bq_processing_time += bq_processing_time;
2361 globalGranStats.tot_bq_len += len; // total length of all bqs awakened
2362 // globalGranStats.tot_bq_len_local += len_local; // same for local TSOs only
2363 globalGranStats.tot_awbq++; // total no. of bqs awakened
2366 fprintf(stderr,"## BQ Stats of %p: [%d entries] %s\n",
2367 node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : ""));
2371 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
2373 StgBlockingQueueElement *bqe, *next;
2375 ACQUIRE_LOCK(&sched_mutex);
2377 IF_PAR_DEBUG(verbose,
2378 belch("## AwBQ for node %p on [%x]: ",
2381 ASSERT(get_itbl(q)->type == TSO ||
2382 get_itbl(q)->type == BLOCKED_FETCH ||
2383 get_itbl(q)->type == CONSTR);
2386 while (get_itbl(bqe)->type==TSO ||
2387 get_itbl(bqe)->type==BLOCKED_FETCH) {
2388 bqe = unblockOneLocked(bqe, node);
2390 RELEASE_LOCK(&sched_mutex);
2393 #else /* !GRAN && !PAR */
2395 awakenBlockedQueue(StgTSO *tso)
2397 ACQUIRE_LOCK(&sched_mutex);
2398 while (tso != END_TSO_QUEUE) {
2399 tso = unblockOneLocked(tso);
2401 RELEASE_LOCK(&sched_mutex);
2405 //@node Exception Handling Routines, Debugging Routines, Blocking Queue Routines, Main scheduling code
2406 //@subsection Exception Handling Routines
2408 /* ---------------------------------------------------------------------------
2410 - usually called inside a signal handler so it mustn't do anything fancy.
2411 ------------------------------------------------------------------------ */
2414 interruptStgRts(void)
2420 /* -----------------------------------------------------------------------------
2423 This is for use when we raise an exception in another thread, which
2425 This has nothing to do with the UnblockThread event in GranSim. -- HWL
2426 -------------------------------------------------------------------------- */
2428 #if defined(GRAN) || defined(PAR)
2430 NB: only the type of the blocking queue is different in GranSim and GUM
2431 the operations on the queue-elements are the same
2432 long live polymorphism!
2435 unblockThread(StgTSO *tso)
2437 StgBlockingQueueElement *t, **last;
2439 ACQUIRE_LOCK(&sched_mutex);
2440 switch (tso->why_blocked) {
2443 return; /* not blocked */
2446 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
2448 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
2449 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
2451 last = (StgBlockingQueueElement **)&mvar->head;
2452 for (t = (StgBlockingQueueElement *)mvar->head;
2454 last = &t->link, last_tso = t, t = t->link) {
2455 if (t == (StgBlockingQueueElement *)tso) {
2456 *last = (StgBlockingQueueElement *)tso->link;
2457 if (mvar->tail == tso) {
2458 mvar->tail = (StgTSO *)last_tso;
2463 barf("unblockThread (MVAR): TSO not found");
2466 case BlockedOnBlackHole:
2467 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
2469 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
2471 last = &bq->blocking_queue;
2472 for (t = bq->blocking_queue;
2474 last = &t->link, t = t->link) {
2475 if (t == (StgBlockingQueueElement *)tso) {
2476 *last = (StgBlockingQueueElement *)tso->link;
2480 barf("unblockThread (BLACKHOLE): TSO not found");
2483 case BlockedOnException:
2485 StgTSO *target = tso->block_info.tso;
2487 ASSERT(get_itbl(target)->type == TSO);
2488 ASSERT(target->blocked_exceptions != NULL);
2490 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
2491 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
2493 last = &t->link, t = t->link) {
2494 ASSERT(get_itbl(t)->type == TSO);
2495 if (t == (StgBlockingQueueElement *)tso) {
2496 *last = (StgBlockingQueueElement *)tso->link;
2500 barf("unblockThread (Exception): TSO not found");
2503 case BlockedOnDelay:
2505 case BlockedOnWrite:
2507 StgBlockingQueueElement *prev = NULL;
2508 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
2509 prev = t, t = t->link) {
2510 if (t == (StgBlockingQueueElement *)tso) {
2512 blocked_queue_hd = (StgTSO *)t->link;
2513 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
2514 blocked_queue_tl = END_TSO_QUEUE;
2517 prev->link = t->link;
2518 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
2519 blocked_queue_tl = (StgTSO *)prev;
2525 barf("unblockThread (I/O): TSO not found");
2529 barf("unblockThread");
2533 tso->link = END_TSO_QUEUE;
2534 tso->why_blocked = NotBlocked;
2535 tso->block_info.closure = NULL;
2536 PUSH_ON_RUN_QUEUE(tso);
2537 RELEASE_LOCK(&sched_mutex);
2541 unblockThread(StgTSO *tso)
2545 ACQUIRE_LOCK(&sched_mutex);
2546 switch (tso->why_blocked) {
2549 return; /* not blocked */
2552 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
2554 StgTSO *last_tso = END_TSO_QUEUE;
2555 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
2558 for (t = mvar->head; t != END_TSO_QUEUE;
2559 last = &t->link, last_tso = t, t = t->link) {
2562 if (mvar->tail == tso) {
2563 mvar->tail = last_tso;
2568 barf("unblockThread (MVAR): TSO not found");
2571 case BlockedOnBlackHole:
2572 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
2574 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
2576 last = &bq->blocking_queue;
2577 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
2578 last = &t->link, t = t->link) {
2584 barf("unblockThread (BLACKHOLE): TSO not found");
2587 case BlockedOnException:
2589 StgTSO *target = tso->block_info.tso;
2591 ASSERT(get_itbl(target)->type == TSO);
2592 ASSERT(target->blocked_exceptions != NULL);
2594 last = &target->blocked_exceptions;
2595 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
2596 last = &t->link, t = t->link) {
2597 ASSERT(get_itbl(t)->type == TSO);
2603 barf("unblockThread (Exception): TSO not found");
2606 case BlockedOnDelay:
2608 case BlockedOnWrite:
2610 StgTSO *prev = NULL;
2611 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
2612 prev = t, t = t->link) {
2615 blocked_queue_hd = t->link;
2616 if (blocked_queue_tl == t) {
2617 blocked_queue_tl = END_TSO_QUEUE;
2620 prev->link = t->link;
2621 if (blocked_queue_tl == t) {
2622 blocked_queue_tl = prev;
2628 barf("unblockThread (I/O): TSO not found");
2632 barf("unblockThread");
2636 tso->link = END_TSO_QUEUE;
2637 tso->why_blocked = NotBlocked;
2638 tso->block_info.closure = NULL;
2639 PUSH_ON_RUN_QUEUE(tso);
2640 RELEASE_LOCK(&sched_mutex);
2644 /* -----------------------------------------------------------------------------
2647 * The following function implements the magic for raising an
2648 * asynchronous exception in an existing thread.
2650 * We first remove the thread from any queue on which it might be
2651 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
2653 * We strip the stack down to the innermost CATCH_FRAME, building
2654 * thunks in the heap for all the active computations, so they can
2655 * be restarted if necessary. When we reach a CATCH_FRAME, we build
2656 * an application of the handler to the exception, and push it on
2657 * the top of the stack.
2659 * How exactly do we save all the active computations? We create an
2660 * AP_UPD for every UpdateFrame on the stack. Entering one of these
2661 * AP_UPDs pushes everything from the corresponding update frame
2662 * upwards onto the stack. (Actually, it pushes everything up to the
2663 * next update frame plus a pointer to the next AP_UPD object.
2664 * Entering the next AP_UPD object pushes more onto the stack until we
2665 * reach the last AP_UPD object - at which point the stack should look
2666 * exactly as it did when we killed the TSO and we can continue
2667 * execution by entering the closure on top of the stack.
2669 * We can also kill a thread entirely - this happens if either (a) the
2670 * exception passed to raiseAsync is NULL, or (b) there's no
2671 * CATCH_FRAME on the stack. In either case, we strip the entire
2672 * stack and replace the thread with a zombie.
2674 * -------------------------------------------------------------------------- */
2677 deleteThread(StgTSO *tso)
2679 raiseAsync(tso,NULL);
2683 raiseAsync(StgTSO *tso, StgClosure *exception)
2685 StgUpdateFrame* su = tso->su;
2686 StgPtr sp = tso->sp;
2688 /* Thread already dead? */
2689 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
2693 IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
2695 /* Remove it from any blocking queues */
2698 /* The stack freezing code assumes there's a closure pointer on
2699 * the top of the stack. This isn't always the case with compiled
2700 * code, so we have to push a dummy closure on the top which just
2701 * returns to the next return address on the stack.
2703 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
2704 *(--sp) = (W_)&dummy_ret_closure;
2708 int words = ((P_)su - (P_)sp) - 1;
2712 /* If we find a CATCH_FRAME, and we've got an exception to raise,
2713 * then build PAP(handler,exception,realworld#), and leave it on
2714 * top of the stack ready to enter.
2716 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
2717 StgCatchFrame *cf = (StgCatchFrame *)su;
2718 /* we've got an exception to raise, so let's pass it to the
2719 * handler in this frame.
2721 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 2);
2722 TICK_ALLOC_UPD_PAP(3,0);
2723 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
2726 ap->fun = cf->handler; /* :: Exception -> IO a */
2727 ap->payload[0] = (P_)exception;
2728 ap->payload[1] = ARG_TAG(0); /* realworld token */
2730 /* throw away the stack from Sp up to and including the
2733 sp = (P_)su + sizeofW(StgCatchFrame) - 1;
2736 /* Restore the blocked/unblocked state for asynchronous exceptions
2737 * at the CATCH_FRAME.
2739 * If exceptions were unblocked at the catch, arrange that they
2740 * are unblocked again after executing the handler by pushing an
2741 * unblockAsyncExceptions_ret stack frame.
2743 if (!cf->exceptions_blocked) {
2744 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
2747 /* Ensure that async exceptions are blocked when running the handler.
2749 if (tso->blocked_exceptions == NULL) {
2750 tso->blocked_exceptions = END_TSO_QUEUE;
2753 /* Put the newly-built PAP on top of the stack, ready to execute
2754 * when the thread restarts.
2758 tso->what_next = ThreadEnterGHC;
2759 IF_DEBUG(sanity, checkTSO(tso));
2763 /* First build an AP_UPD consisting of the stack chunk above the
2764 * current update frame, with the top word on the stack as the
2767 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
2772 ap->fun = (StgClosure *)sp[0];
2774 for(i=0; i < (nat)words; ++i) {
2775 ap->payload[i] = (P_)*sp++;
2778 switch (get_itbl(su)->type) {
2782 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
2783 TICK_ALLOC_UP_THK(words+1,0);
2786 fprintf(stderr, "scheduler: Updating ");
2787 printPtr((P_)su->updatee);
2788 fprintf(stderr, " with ");
2789 printObj((StgClosure *)ap);
2792 /* Replace the updatee with an indirection - happily
2793 * this will also wake up any threads currently
2794 * waiting on the result.
2796 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
2798 sp += sizeofW(StgUpdateFrame) -1;
2799 sp[0] = (W_)ap; /* push onto stack */
2805 StgCatchFrame *cf = (StgCatchFrame *)su;
2808 /* We want a PAP, not an AP_UPD. Fortunately, the
2809 * layout's the same.
2811 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2812 TICK_ALLOC_UPD_PAP(words+1,0);
2814 /* now build o = FUN(catch,ap,handler) */
2815 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
2816 TICK_ALLOC_FUN(2,0);
2817 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
2818 o->payload[0] = (StgClosure *)ap;
2819 o->payload[1] = cf->handler;
2822 fprintf(stderr, "scheduler: Built ");
2823 printObj((StgClosure *)o);
2826 /* pop the old handler and put o on the stack */
2828 sp += sizeofW(StgCatchFrame) - 1;
2835 StgSeqFrame *sf = (StgSeqFrame *)su;
2838 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2839 TICK_ALLOC_UPD_PAP(words+1,0);
2841 /* now build o = FUN(seq,ap) */
2842 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
2843 TICK_ALLOC_SE_THK(1,0);
2844 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
2845 o->payload[0] = (StgClosure *)ap;
2848 fprintf(stderr, "scheduler: Built ");
2849 printObj((StgClosure *)o);
2852 /* pop the old handler and put o on the stack */
2854 sp += sizeofW(StgSeqFrame) - 1;
2860 /* We've stripped the entire stack, the thread is now dead. */
2861 sp += sizeofW(StgStopFrame) - 1;
2862 sp[0] = (W_)exception; /* save the exception */
2863 tso->what_next = ThreadKilled;
2864 tso->su = (StgUpdateFrame *)(sp+1);
2875 /* -----------------------------------------------------------------------------
2876 resurrectThreads is called after garbage collection on the list of
2877 threads found to be garbage. Each of these threads will be woken
2878 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
2879 on an MVar, or NonTermination if the thread was blocked on a Black
2881 -------------------------------------------------------------------------- */
2884 resurrectThreads( StgTSO *threads )
2888 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2889 next = tso->global_link;
2890 tso->global_link = all_threads;
2892 IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id));
2894 switch (tso->why_blocked) {
2896 case BlockedOnException:
2897 raiseAsync(tso,(StgClosure *)BlockedOnDeadMVar_closure);
2899 case BlockedOnBlackHole:
2900 raiseAsync(tso,(StgClosure *)NonTermination_closure);
2903 /* This might happen if the thread was blocked on a black hole
2904 * belonging to a thread that we've just woken up (raiseAsync
2905 * can wake up threads, remember...).
2909 barf("resurrectThreads: thread blocked in a strange way");
2914 /* -----------------------------------------------------------------------------
2915 * Blackhole detection: if we reach a deadlock, test whether any
2916 * threads are blocked on themselves. Any threads which are found to
2917 * be self-blocked get sent a NonTermination exception.
2919 * This is only done in a deadlock situation in order to avoid
2920 * performance overhead in the normal case.
2921 * -------------------------------------------------------------------------- */
2924 detectBlackHoles( void )
2926 StgTSO *t = all_threads;
2927 StgUpdateFrame *frame;
2928 StgClosure *blocked_on;
2930 for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
2932 if (t->why_blocked != BlockedOnBlackHole) {
2936 blocked_on = t->block_info.closure;
2938 for (frame = t->su; ; frame = frame->link) {
2939 switch (get_itbl(frame)->type) {
2942 if (frame->updatee == blocked_on) {
2943 /* We are blocking on one of our own computations, so
2944 * send this thread the NonTermination exception.
2947 sched_belch("thread %d is blocked on itself", t->id));
2948 raiseAsync(t, (StgClosure *)NonTermination_closure);
2969 //@node Debugging Routines, Index, Exception Handling Routines, Main scheduling code
2970 //@subsection Debugging Routines
2972 /* -----------------------------------------------------------------------------
2973 Debugging: why is a thread blocked
2974 -------------------------------------------------------------------------- */
2979 printThreadBlockage(StgTSO *tso)
2981 switch (tso->why_blocked) {
2983 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
2985 case BlockedOnWrite:
2986 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
2988 case BlockedOnDelay:
2989 #if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
2990 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
2992 fprintf(stderr,"blocked on delay of %d ms",
2993 tso->block_info.target - getourtimeofday());
2997 fprintf(stderr,"blocked on an MVar");
2999 case BlockedOnException:
3000 fprintf(stderr,"blocked on delivering an exception to thread %d",
3001 tso->block_info.tso->id);
3003 case BlockedOnBlackHole:
3004 fprintf(stderr,"blocked on a black hole");
3007 fprintf(stderr,"not blocked");
3011 fprintf(stderr,"blocked on global address; local FM_BQ is %p (%s)",
3012 tso->block_info.closure, info_type(tso->block_info.closure));
3014 case BlockedOnGA_NoSend:
3015 fprintf(stderr,"blocked on global address (no send); local FM_BQ is %p (%s)",
3016 tso->block_info.closure, info_type(tso->block_info.closure));
3020 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
3021 tso->why_blocked, tso->id, tso);
3026 printThreadStatus(StgTSO *tso)
3028 switch (tso->what_next) {
3030 fprintf(stderr,"has been killed");
3032 case ThreadComplete:
3033 fprintf(stderr,"has completed");
3036 printThreadBlockage(tso);
3041 printAllThreads(void)
3045 sched_belch("all threads:");
3046 for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
3047 fprintf(stderr, "\tthread %d is ", t->id);
3048 printThreadStatus(t);
3049 fprintf(stderr,"\n");
3054 Print a whole blocking queue attached to node (debugging only).
3059 print_bq (StgClosure *node)
3061 StgBlockingQueueElement *bqe;
3065 fprintf(stderr,"## BQ of closure %p (%s): ",
3066 node, info_type(node));
3068 /* should cover all closures that may have a blocking queue */
3069 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
3070 get_itbl(node)->type == FETCH_ME_BQ ||
3071 get_itbl(node)->type == RBH);
3073 ASSERT(node!=(StgClosure*)NULL); // sanity check
3075 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
3077 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
3078 !end; // iterate until bqe points to a CONSTR
3079 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
3080 ASSERT(bqe != END_BQ_QUEUE); // sanity check
3081 ASSERT(bqe != (StgTSO*)NULL); // sanity check
3082 /* types of closures that may appear in a blocking queue */
3083 ASSERT(get_itbl(bqe)->type == TSO ||
3084 get_itbl(bqe)->type == BLOCKED_FETCH ||
3085 get_itbl(bqe)->type == CONSTR);
3086 /* only BQs of an RBH end with an RBH_Save closure */
3087 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
3089 switch (get_itbl(bqe)->type) {
3091 fprintf(stderr," TSO %d (%x),",
3092 ((StgTSO *)bqe)->id, ((StgTSO *)bqe));
3095 fprintf(stderr," BF (node=%p, ga=((%x, %d, %x)),",
3096 ((StgBlockedFetch *)bqe)->node,
3097 ((StgBlockedFetch *)bqe)->ga.payload.gc.gtid,
3098 ((StgBlockedFetch *)bqe)->ga.payload.gc.slot,
3099 ((StgBlockedFetch *)bqe)->ga.weight);
3102 fprintf(stderr," %s (IP %p),",
3103 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
3104 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
3105 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
3106 "RBH_Save_?"), get_itbl(bqe));
3109 barf("Unexpected closure type %s in blocking queue of %p (%s)",
3110 info_type(bqe), node, info_type(node));
3114 fputc('\n', stderr);
3116 # elif defined(GRAN)
3118 print_bq (StgClosure *node)
3120 StgBlockingQueueElement *bqe;
3121 PEs node_loc, tso_loc;
3124 /* should cover all closures that may have a blocking queue */
3125 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
3126 get_itbl(node)->type == FETCH_ME_BQ ||
3127 get_itbl(node)->type == RBH);
3129 ASSERT(node!=(StgClosure*)NULL); // sanity check
3130 node_loc = where_is(node);
3132 fprintf(stderr,"## BQ of closure %p (%s) on [PE %d]: ",
3133 node, info_type(node), node_loc);
3136 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
3138 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
3139 !end; // iterate until bqe points to a CONSTR
3140 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
3141 ASSERT(bqe != END_BQ_QUEUE); // sanity check
3142 ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check
3143 /* types of closures that may appear in a blocking queue */
3144 ASSERT(get_itbl(bqe)->type == TSO ||
3145 get_itbl(bqe)->type == CONSTR);
3146 /* only BQs of an RBH end with an RBH_Save closure */
3147 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
3149 tso_loc = where_is((StgClosure *)bqe);
3150 switch (get_itbl(bqe)->type) {
3152 fprintf(stderr," TSO %d (%p) on [PE %d],",
3153 ((StgTSO *)bqe)->id, (StgTSO *)bqe, tso_loc);
3156 fprintf(stderr," %s (IP %p),",
3157 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
3158 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
3159 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
3160 "RBH_Save_?"), get_itbl(bqe));
3163 barf("Unexpected closure type %s in blocking queue of %p (%s)",
3164 info_type((StgClosure *)bqe), node, info_type(node));
3168 fputc('\n', stderr);
3172 Nice and easy: only TSOs on the blocking queue
3175 print_bq (StgClosure *node)
3179 ASSERT(node!=(StgClosure*)NULL); // sanity check
3180 for (tso = ((StgBlockingQueue*)node)->blocking_queue;
3181 tso != END_TSO_QUEUE;
3183 ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
3184 ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
3185 fprintf(stderr," TSO %d (%p),", tso->id, tso);
3187 fputc('\n', stderr);
3198 for (i=0, tso=run_queue_hd;
3199 tso != END_TSO_QUEUE;
3208 sched_belch(char *s, ...)
3213 fprintf(stderr, "scheduler (task %ld): ", pthread_self());
3215 fprintf(stderr, "scheduler: ");
3217 vfprintf(stderr, s, ap);
3218 fprintf(stderr, "\n");
3224 //@node Index, , Debugging Routines, Main scheduling code
3228 //* MainRegTable:: @cindex\s-+MainRegTable
3229 //* StgMainThread:: @cindex\s-+StgMainThread
3230 //* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue
3231 //* blocked_queue_hd:: @cindex\s-+blocked_queue_hd
3232 //* blocked_queue_tl:: @cindex\s-+blocked_queue_tl
3233 //* context_switch:: @cindex\s-+context_switch
3234 //* createThread:: @cindex\s-+createThread
3235 //* free_capabilities:: @cindex\s-+free_capabilities
3236 //* gc_pending_cond:: @cindex\s-+gc_pending_cond
3237 //* initScheduler:: @cindex\s-+initScheduler
3238 //* interrupted:: @cindex\s-+interrupted
3239 //* n_free_capabilities:: @cindex\s-+n_free_capabilities
3240 //* next_thread_id:: @cindex\s-+next_thread_id
3241 //* print_bq:: @cindex\s-+print_bq
3242 //* run_queue_hd:: @cindex\s-+run_queue_hd
3243 //* run_queue_tl:: @cindex\s-+run_queue_tl
3244 //* sched_mutex:: @cindex\s-+sched_mutex
3245 //* schedule:: @cindex\s-+schedule
3246 //* take_off_run_queue:: @cindex\s-+take_off_run_queue
3247 //* task_ids:: @cindex\s-+task_ids
3248 //* term_mutex:: @cindex\s-+term_mutex
3249 //* thread_ready_cond:: @cindex\s-+thread_ready_cond