1 /* ---------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.70 2000/05/08 15:57:01 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
8 * The main scheduling code in GranSim is quite different from that in std
9 * (concurrent) Haskell: while concurrent Haskell just iterates over the
10 * threads in the runnable queue, GranSim is event driven, i.e. it iterates
11 * over the events in the global event queue. -- HWL
12 * --------------------------------------------------------------------------*/
14 //@node Main scheduling code, , ,
15 //@section Main scheduling code
17 /* Version with scheduler monitor support for SMPs.
19 This design provides a high-level API to create and schedule threads etc.
20 as documented in the SMP design document.
22 It uses a monitor design controlled by a single mutex to exercise control
23 over accesses to shared data structures, and builds on the Posix threads
26 The majority of state is shared. In order to keep essential per-task state,
27 there is a Capability structure, which contains all the information
28 needed to run a thread: its STG registers, a pointer to its TSO, a
29 nursery etc. During STG execution, a pointer to the capability is
30 kept in a register (BaseReg).
32 In a non-SMP build, there is one global capability, namely MainRegTable.
39 //* Variables and Data structures::
40 //* Main scheduling loop::
41 //* Suspend and Resume::
43 //* Garbage Collextion Routines::
44 //* Blocking Queue Routines::
45 //* Exception Handling Routines::
46 //* Debugging Routines::
50 //@node Includes, Variables and Data structures, Main scheduling code, Main scheduling code
51 //@subsection Includes
59 #include "StgStartup.h"
63 #include "StgMiscClosures.h"
65 #include "Evaluator.h"
66 #include "Exception.h"
74 #if defined(GRAN) || defined(PAR)
75 # include "GranSimRts.h"
77 # include "ParallelRts.h"
78 # include "Parallel.h"
79 # include "ParallelDebug.h"
87 //@node Variables and Data structures, Prototypes, Includes, Main scheduling code
88 //@subsection Variables and Data structures
92 * These are the threads which clients have requested that we run.
94 * In an SMP build, we might have several concurrent clients all
95 * waiting for results, and each one will wait on a condition variable
96 * until the result is available.
98 * In non-SMP, clients are strictly nested: the first client calls
99 * into the RTS, which might call out again to C with a _ccall_GC, and
100 * eventually re-enter the RTS.
102 * Main threads information is kept in a linked list:
104 //@cindex StgMainThread
105 typedef struct StgMainThread_ {
107 SchedulerStatus stat;
110 pthread_cond_t wakeup;
112 struct StgMainThread_ *link;
115 /* Main thread queue.
116 * Locks required: sched_mutex.
118 static StgMainThread *main_threads;
121 * Locks required: sched_mutex.
125 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
126 /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */
129 In GranSim we have a runable and a blocked queue for each processor.
130 In order to minimise code changes new arrays run_queue_hds/tls
131 are created. run_queue_hd is then a short cut (macro) for
132 run_queue_hds[CurrentProc] (see GranSim.h).
135 StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
136 StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
137 StgTSO *ccalling_threadss[MAX_PROC];
138 /* We use the same global list of threads (all_threads) in GranSim as in
139 the std RTS (i.e. we are cheating). However, we don't use this list in
140 the GranSim specific code at the moment (so we are only potentially
145 StgTSO *run_queue_hd, *run_queue_tl;
146 StgTSO *blocked_queue_hd, *blocked_queue_tl;
150 /* Linked list of all threads.
151 * Used for detecting garbage collected threads.
155 /* Threads suspended in _ccall_GC.
157 static StgTSO *suspended_ccalling_threads;
159 static void GetRoots(void);
160 static StgTSO *threadStackOverflow(StgTSO *tso);
162 /* KH: The following two flags are shared memory locations. There is no need
163 to lock them, since they are only unset at the end of a scheduler
167 /* flag set by signal handler to precipitate a context switch */
168 //@cindex context_switch
171 /* if this flag is set as well, give up execution */
172 //@cindex interrupted
175 /* Next thread ID to allocate.
176 * Locks required: sched_mutex
178 //@cindex next_thread_id
179 StgThreadID next_thread_id = 1;
182 * Pointers to the state of the current thread.
183 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
184 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
187 /* The smallest stack size that makes any sense is:
188 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
189 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
190 * + 1 (the realworld token for an IO thread)
191 * + 1 (the closure to enter)
193 * A thread with this stack will bomb immediately with a stack
194 * overflow, which will increase its stack size.
197 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
199 /* Free capability list.
200 * Locks required: sched_mutex.
203 //@cindex free_capabilities
204 //@cindex n_free_capabilities
205 Capability *free_capabilities; /* Available capabilities for running threads */
206 nat n_free_capabilities; /* total number of available capabilities */
208 //@cindex MainRegTable
209 Capability MainRegTable; /* for non-SMP, we have one global capability */
218 /* All our current task ids, saved in case we need to kill them later.
225 void addToBlockedQueue ( StgTSO *tso );
227 static void schedule ( void );
228 void interruptStgRts ( void );
230 static StgTSO * createThread_ ( nat size, rtsBool have_lock, StgInt pri );
232 static StgTSO * createThread_ ( nat size, rtsBool have_lock );
235 static void detectBlackHoles ( void );
238 static void sched_belch(char *s, ...);
242 //@cindex sched_mutex
244 //@cindex thread_ready_cond
245 //@cindex gc_pending_cond
246 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
247 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
248 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
249 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
256 rtsTime TimeOfLastYield;
260 char *whatNext_strs[] = {
268 char *threadReturnCode_strs[] = {
269 "HeapOverflow", /* might also be StackOverflow */
278 * The thread state for the main thread.
279 // ToDo: check whether not needed any more
283 //@node Main scheduling loop, Suspend and Resume, Prototypes, Main scheduling code
284 //@subsection Main scheduling loop
286 /* ---------------------------------------------------------------------------
287 Main scheduling loop.
289 We use round-robin scheduling, each thread returning to the
290 scheduler loop when one of these conditions is detected:
293 * timer expires (thread yields)
298 Locking notes: we acquire the scheduler lock once at the beginning
299 of the scheduler loop, and release it when
301 * running a thread, or
302 * waiting for work, or
303 * waiting for a GC to complete.
306 In a GranSim setup this loop iterates over the global event queue.
307 This revolves around the global event queue, which determines what
308 to do next. Therefore, it's more complicated than either the
309 concurrent or the parallel (GUM) setup.
312 GUM iterates over incoming messages.
313 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
314 and sends out a fish whenever it has nothing to do; in-between
315 doing the actual reductions (shared code below) it processes the
316 incoming messages and deals with delayed operations
317 (see PendingFetches).
318 This is not the ugliest code you could imagine, but it's bloody close.
320 ------------------------------------------------------------------------ */
327 StgThreadReturnCode ret;
336 rtsBool was_interrupted = rtsFalse;
338 ACQUIRE_LOCK(&sched_mutex);
342 /* set up first event to get things going */
343 /* ToDo: assign costs for system setup and init MainTSO ! */
344 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
346 CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
349 fprintf(stderr, "GRAN: Init CurrentTSO (in schedule) = %p\n", CurrentTSO);
350 G_TSO(CurrentTSO, 5));
352 if (RtsFlags.GranFlags.Light) {
353 /* Save current time; GranSim Light only */
354 CurrentTSO->gran.clock = CurrentTime[CurrentProc];
357 event = get_next_event();
359 while (event!=(rtsEvent*)NULL) {
360 /* Choose the processor with the next event */
361 CurrentProc = event->proc;
362 CurrentTSO = event->tso;
366 while (!GlobalStopPending) { /* GlobalStopPending set in par_exit */
374 IF_DEBUG(scheduler, printAllThreads());
376 /* If we're interrupted (the user pressed ^C, or some other
377 * termination condition occurred), kill all the currently running
381 IF_DEBUG(scheduler, sched_belch("interrupted"));
382 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
385 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
388 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
389 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
390 interrupted = rtsFalse;
391 was_interrupted = rtsTrue;
394 /* Go through the list of main threads and wake up any
395 * clients whose computations have finished. ToDo: this
396 * should be done more efficiently without a linear scan
397 * of the main threads list, somehow...
401 StgMainThread *m, **prev;
402 prev = &main_threads;
403 for (m = main_threads; m != NULL; m = m->link) {
404 switch (m->tso->what_next) {
407 *(m->ret) = (StgClosure *)m->tso->sp[0];
411 pthread_cond_broadcast(&m->wakeup);
415 if (was_interrupted) {
416 m->stat = Interrupted;
420 pthread_cond_broadcast(&m->wakeup);
430 /* in GUM do this only on the Main PE */
433 /* If our main thread has finished or been killed, return.
436 StgMainThread *m = main_threads;
437 if (m->tso->what_next == ThreadComplete
438 || m->tso->what_next == ThreadKilled) {
439 main_threads = main_threads->link;
440 if (m->tso->what_next == ThreadComplete) {
441 /* we finished successfully, fill in the return value */
442 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
446 if (was_interrupted) {
447 m->stat = Interrupted;
457 /* Top up the run queue from our spark pool. We try to make the
458 * number of threads in the run queue equal to the number of
463 nat n = n_free_capabilities;
464 StgTSO *tso = run_queue_hd;
466 /* Count the run queue */
467 while (n > 0 && tso != END_TSO_QUEUE) {
476 break; /* no more sparks in the pool */
478 /* I'd prefer this to be done in activateSpark -- HWL */
479 /* tricky - it needs to hold the scheduler lock and
480 * not try to re-acquire it -- SDM */
482 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
483 pushClosure(tso,spark);
484 PUSH_ON_RUN_QUEUE(tso);
486 advisory_thread_count++;
490 sched_belch("turning spark of closure %p into a thread",
491 (StgClosure *)spark));
494 /* We need to wake up the other tasks if we just created some
497 if (n_free_capabilities - n > 1) {
498 pthread_cond_signal(&thread_ready_cond);
503 /* Check whether any waiting threads need to be woken up. If the
504 * run queue is empty, and there are no other tasks running, we
505 * can wait indefinitely for something to happen.
506 * ToDo: what if another client comes along & requests another
509 if (blocked_queue_hd != END_TSO_QUEUE) {
511 (run_queue_hd == END_TSO_QUEUE)
513 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
518 /* check for signals each time around the scheduler */
520 if (signals_pending()) {
521 start_signal_handlers();
526 * Detect deadlock: when we have no threads to run, there are no
527 * threads waiting on I/O or sleeping, and all the other tasks are
528 * waiting for work, we must have a deadlock of some description.
530 * We first try to find threads blocked on themselves (ie. black
531 * holes), and generate NonTermination exceptions where necessary.
533 * If no threads are black holed, we have a deadlock situation, so
534 * inform all the main threads.
537 if (blocked_queue_hd == END_TSO_QUEUE
538 && run_queue_hd == END_TSO_QUEUE
539 && (n_free_capabilities == RtsFlags.ParFlags.nNodes))
541 IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
543 if (run_queue_hd == END_TSO_QUEUE) {
545 for (m = main_threads; m != NULL; m = m->link) {
548 pthread_cond_broadcast(&m->wakeup);
554 if (blocked_queue_hd == END_TSO_QUEUE
555 && run_queue_hd == END_TSO_QUEUE)
557 IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
559 if (run_queue_hd == END_TSO_QUEUE) {
560 StgMainThread *m = main_threads;
563 main_threads = m->link;
570 /* If there's a GC pending, don't do anything until it has
574 IF_DEBUG(scheduler,sched_belch("waiting for GC"));
575 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
578 /* block until we've got a thread on the run queue and a free
581 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
582 IF_DEBUG(scheduler, sched_belch("waiting for work"));
583 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
584 IF_DEBUG(scheduler, sched_belch("work now available"));
590 if (RtsFlags.GranFlags.Light)
591 GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
593 /* adjust time based on time-stamp */
594 if (event->time > CurrentTime[CurrentProc] &&
595 event->evttype != ContinueThread)
596 CurrentTime[CurrentProc] = event->time;
598 /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
599 if (!RtsFlags.GranFlags.Light)
602 IF_DEBUG(gran, fprintf(stderr, "GRAN: switch by event-type\n"))
604 /* main event dispatcher in GranSim */
605 switch (event->evttype) {
606 /* Should just be continuing execution */
608 IF_DEBUG(gran, fprintf(stderr, "GRAN: doing ContinueThread\n"));
609 /* ToDo: check assertion
610 ASSERT(run_queue_hd != (StgTSO*)NULL &&
611 run_queue_hd != END_TSO_QUEUE);
613 /* Ignore ContinueThreads for fetching threads (if synchr comm) */
614 if (!RtsFlags.GranFlags.DoAsyncFetch &&
615 procStatus[CurrentProc]==Fetching) {
616 belch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]",
617 CurrentTSO->id, CurrentTSO, CurrentProc);
620 /* Ignore ContinueThreads for completed threads */
621 if (CurrentTSO->what_next == ThreadComplete) {
622 belch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)",
623 CurrentTSO->id, CurrentTSO, CurrentProc);
626 /* Ignore ContinueThreads for threads that are being migrated */
627 if (PROCS(CurrentTSO)==Nowhere) {
628 belch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)",
629 CurrentTSO->id, CurrentTSO, CurrentProc);
632 /* The thread should be at the beginning of the run queue */
633 if (CurrentTSO!=run_queue_hds[CurrentProc]) {
634 belch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread",
635 CurrentTSO->id, CurrentTSO, CurrentProc);
636 break; // run the thread anyway
639 new_event(proc, proc, CurrentTime[proc],
641 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
643 */ /* Catches superfluous CONTINUEs -- should be unnecessary */
644 break; // now actually run the thread; DaH Qu'vam yImuHbej
647 do_the_fetchnode(event);
648 goto next_thread; /* handle next event in event queue */
651 do_the_globalblock(event);
652 goto next_thread; /* handle next event in event queue */
655 do_the_fetchreply(event);
656 goto next_thread; /* handle next event in event queue */
658 case UnblockThread: /* Move from the blocked queue to the tail of */
659 do_the_unblock(event);
660 goto next_thread; /* handle next event in event queue */
662 case ResumeThread: /* Move from the blocked queue to the tail of */
663 /* the runnable queue ( i.e. Qu' SImqa'lu') */
664 event->tso->gran.blocktime +=
665 CurrentTime[CurrentProc] - event->tso->gran.blockedat;
666 do_the_startthread(event);
667 goto next_thread; /* handle next event in event queue */
670 do_the_startthread(event);
671 goto next_thread; /* handle next event in event queue */
674 do_the_movethread(event);
675 goto next_thread; /* handle next event in event queue */
678 do_the_movespark(event);
679 goto next_thread; /* handle next event in event queue */
682 do_the_findwork(event);
683 goto next_thread; /* handle next event in event queue */
686 barf("Illegal event type %u\n", event->evttype);
689 /* This point was scheduler_loop in the old RTS */
691 IF_DEBUG(gran, belch("GRAN: after main switch"));
693 TimeOfLastEvent = CurrentTime[CurrentProc];
694 TimeOfNextEvent = get_time_of_next_event();
695 IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
696 // CurrentTSO = ThreadQueueHd;
698 IF_DEBUG(gran, belch("GRAN: time of next event is: %ld",
701 if (RtsFlags.GranFlags.Light)
702 GranSimLight_leave_system(event, &ActiveTSO);
704 EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
707 belch("GRAN: end of time-slice is %#lx", EndOfTimeSlice));
709 /* in a GranSim setup the TSO stays on the run queue */
711 /* Take a thread from the run queue. */
712 t = POP_RUN_QUEUE(); // take_off_run_queue(t);
715 fprintf(stderr, "GRAN: About to run current thread, which is\n");
718 context_switch = 0; // turned on via GranYield, checking events and time slice
721 DumpGranEvent(GR_SCHEDULE, t));
723 procStatus[CurrentProc] = Busy;
727 if (PendingFetches != END_BF_QUEUE) {
731 /* ToDo: phps merge with spark activation above */
732 /* check whether we have local work and send requests if we have none */
733 if (run_queue_hd == END_TSO_QUEUE) { /* no runnable threads */
734 /* :-[ no local threads => look out for local sparks */
735 /* the spark pool for the current PE */
736 pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
737 if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
738 pool->hd < pool->tl) {
740 * ToDo: add GC code check that we really have enough heap afterwards!!
742 * If we're here (no runnable threads) and we have pending
743 * sparks, we must have a space problem. Get enough space
744 * to turn one of those pending sparks into a
748 spark = findSpark(); /* get a spark */
749 if (spark != (rtsSpark) NULL) {
750 tso = activateSpark(spark); /* turn the spark into a thread */
751 IF_PAR_DEBUG(schedule,
752 belch("==== schedule: Created TSO %d (%p); %d threads active",
753 tso->id, tso, advisory_thread_count));
755 if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
756 belch("==^^ failed to activate spark");
758 } /* otherwise fall through & pick-up new tso */
760 IF_PAR_DEBUG(verbose,
761 belch("==^^ no local sparks (spark pool contains only NFs: %d)",
762 spark_queue_len(pool)));
766 /* =8-[ no local sparks => look for work on other PEs */
769 * We really have absolutely no work. Send out a fish
770 * (there may be some out there already), and wait for
771 * something to arrive. We clearly can't run any threads
772 * until a SCHEDULE or RESUME arrives, and so that's what
773 * we're hoping to see. (Of course, we still have to
774 * respond to other types of messages.)
777 outstandingFishes < RtsFlags.ParFlags.maxFishes ) { // &&
778 // (last_fish_arrived_at+FISH_DELAY < CURRENT_TIME)) {
779 /* fishing set in sendFish, processFish;
780 avoid flooding system with fishes via delay */
782 sendFish(pe, mytid, NEW_FISH_AGE, NEW_FISH_HISTORY,
790 } else if (PacketsWaiting()) { /* Look for incoming messages */
794 /* Now we are sure that we have some work available */
795 ASSERT(run_queue_hd != END_TSO_QUEUE);
796 /* Take a thread from the run queue, if we have work */
797 t = POP_RUN_QUEUE(); // take_off_run_queue(END_TSO_QUEUE);
799 /* ToDo: write something to the log-file
800 if (RTSflags.ParFlags.granSimStats && !sameThread)
801 DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
805 /* the spark pool for the current PE */
806 pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
808 IF_DEBUG(scheduler, belch("--^^ %d sparks on [%#x] (hd=%x; tl=%x; base=%x, lim=%x)",
809 spark_queue_len(pool),
811 pool->hd, pool->tl, pool->base, pool->lim));
813 IF_DEBUG(scheduler, belch("--== %d threads on [%#x] (hd=%x; tl=%x)",
814 run_queue_len(), CURRENT_PROC,
815 run_queue_hd, run_queue_tl));
820 we are running a different TSO, so write a schedule event to log file
821 NB: If we use fair scheduling we also have to write a deschedule
822 event for LastTSO; with unfair scheduling we know that the
823 previous tso has blocked whenever we switch to another tso, so
824 we don't need it in GUM for now
826 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
827 GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
831 #else /* !GRAN && !PAR */
833 /* grab a thread from the run queue
835 ASSERT(run_queue_hd != END_TSO_QUEUE);
837 IF_DEBUG(sanity,checkTSO(t));
844 cap = free_capabilities;
845 free_capabilities = cap->link;
846 n_free_capabilities--;
851 cap->rCurrentTSO = t;
853 /* set the context_switch flag
855 if (run_queue_hd == END_TSO_QUEUE)
860 RELEASE_LOCK(&sched_mutex);
862 IF_DEBUG(scheduler, sched_belch("-->> Running TSO %ld (%p) %s ...",
863 t->id, t, whatNext_strs[t->what_next]));
865 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
866 /* Run the current thread
868 switch (cap->rCurrentTSO->what_next) {
871 /* Thread already finished, return to scheduler. */
872 ret = ThreadFinished;
875 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
878 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
880 case ThreadEnterHugs:
884 IF_DEBUG(scheduler,sched_belch("entering Hugs"));
885 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
886 cap->rCurrentTSO->sp += 1;
891 barf("Panic: entered a BCO but no bytecode interpreter in this build");
894 barf("schedule: invalid what_next field");
896 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
898 /* Costs for the scheduler are assigned to CCS_SYSTEM */
903 ACQUIRE_LOCK(&sched_mutex);
906 IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", pthread_self()););
907 #elif !defined(GRAN) && !defined(PAR)
908 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: "););
910 t = cap->rCurrentTSO;
913 /* HACK 675: if the last thread didn't yield, make sure to print a
914 SCHEDULE event to the log file when StgRunning the next thread, even
915 if it is the same one as before */
916 LastTSO = t; //(ret == ThreadBlocked) ? END_TSO_QUEUE : t;
917 TimeOfLastYield = CURRENT_TIME;
922 /* make all the running tasks block on a condition variable,
923 * maybe set context_switch and wait till they all pile in,
924 * then have them wait on a GC condition variable.
926 IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped: HeapOverflow",
927 t->id, t, whatNext_strs[t->what_next]));
930 ASSERT(!is_on_queue(t,CurrentProc));
933 ready_to_gc = rtsTrue;
934 context_switch = 1; /* stop other threads ASAP */
935 PUSH_ON_RUN_QUEUE(t);
936 /* actual GC is done at the end of the while loop */
940 IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped, StackOverflow",
941 t->id, t, whatNext_strs[t->what_next]));
942 /* just adjust the stack for this thread, then pop it back
948 /* enlarge the stack */
949 StgTSO *new_t = threadStackOverflow(t);
951 /* This TSO has moved, so update any pointers to it from the
952 * main thread stack. It better not be on any other queues...
955 for (m = main_threads; m != NULL; m = m->link) {
961 PUSH_ON_RUN_QUEUE(new_t);
968 DumpGranEvent(GR_DESCHEDULE, t));
969 globalGranStats.tot_yields++;
972 DumpGranEvent(GR_DESCHEDULE, t));
974 /* put the thread back on the run queue. Then, if we're ready to
975 * GC, check whether this is the last task to stop. If so, wake
976 * up the GC thread. getThread will block during a GC until the
980 if (t->what_next == ThreadEnterHugs) {
981 /* ToDo: or maybe a timer expired when we were in Hugs?
982 * or maybe someone hit ctrl-C
984 belch("--<< thread %ld (%p; %s) stopped to switch to Hugs",
985 t->id, t, whatNext_strs[t->what_next]);
987 belch("--<< thread %ld (%p; %s) stopped, yielding",
988 t->id, t, whatNext_strs[t->what_next]);
995 //belch("&& Doing sanity check on yielding TSO %ld.", t->id);
997 ASSERT(t->link == END_TSO_QUEUE);
999 ASSERT(!is_on_queue(t,CurrentProc));
1002 //belch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
1003 checkThreadQsSanity(rtsTrue));
1005 APPEND_TO_RUN_QUEUE(t);
1007 /* add a ContinueThread event to actually process the thread */
1008 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1010 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1012 belch("GRAN: eventq and runnableq after adding yielded thread to queue again:");
1021 belch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
1022 t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1023 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1025 // ??? needed; should emit block before
1027 DumpGranEvent(GR_DESCHEDULE, t));
1028 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1031 ASSERT(procStatus[CurrentProc]==Busy ||
1032 ((procStatus[CurrentProc]==Fetching) &&
1033 (t->block_info.closure!=(StgClosure*)NULL)));
1034 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1035 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1036 procStatus[CurrentProc]==Fetching))
1037 procStatus[CurrentProc] = Idle;
1041 DumpGranEvent(GR_DESCHEDULE, t));
1043 /* Send a fetch (if BlockedOnGA) and dump event to log file */
1047 belch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: ",
1048 t->id, t, whatNext_strs[t->what_next], t->block_info.closure);
1049 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1052 /* don't need to do anything. Either the thread is blocked on
1053 * I/O, in which case we'll have called addToBlockedQueue
1054 * previously, or it's blocked on an MVar or Blackhole, in which
1055 * case it'll be on the relevant queue already.
1058 fprintf(stderr, "--<< thread %d (%p) stopped: ", t->id, t);
1059 printThreadBlockage(t);
1060 fprintf(stderr, "\n"));
1062 /* Only for dumping event to log file
1063 ToDo: do I need this in GranSim, too?
1070 case ThreadFinished:
1071 /* Need to check whether this was a main thread, and if so, signal
1072 * the task that started it with the return value. If we have no
1073 * more main threads, we probably need to stop all the tasks until
1076 /* We also end up here if the thread kills itself with an
1077 * uncaught exception, see Exception.hc.
1079 IF_DEBUG(scheduler,belch("--++ thread %d (%p) finished", t->id, t));
1081 endThread(t, CurrentProc); // clean-up the thread
1083 advisory_thread_count--;
1084 if (RtsFlags.ParFlags.ParStats.Full)
1085 DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
1090 barf("schedule: invalid thread return code %d", (int)ret);
1094 cap->link = free_capabilities;
1095 free_capabilities = cap;
1096 n_free_capabilities++;
1100 if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes)
1105 /* everybody back, start the GC.
1106 * Could do it in this thread, or signal a condition var
1107 * to do it in another thread. Either way, we need to
1108 * broadcast on gc_pending_cond afterward.
1111 IF_DEBUG(scheduler,sched_belch("doing GC"));
1113 GarbageCollect(GetRoots,rtsFalse);
1114 ready_to_gc = rtsFalse;
1116 pthread_cond_broadcast(&gc_pending_cond);
1119 /* add a ContinueThread event to continue execution of current thread */
1120 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1122 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1124 fprintf(stderr, "GRAN: eventq and runnableq after Garbage collection:\n");
1131 IF_GRAN_DEBUG(unused,
1132 print_eventq(EventHd));
1134 event = get_next_event();
1138 /* ToDo: wait for next message to arrive rather than busy wait */
1143 t = take_off_run_queue(END_TSO_QUEUE);
1146 } /* end of while(1) */
1149 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
1150 void deleteAllThreads ( void )
1153 IF_DEBUG(scheduler,sched_belch("deleteAllThreads()"));
1154 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
1157 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
1160 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
1161 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
1164 /* startThread and insertThread are now in GranSim.c -- HWL */
1166 //@node Suspend and Resume, Run queue code, Main scheduling loop, Main scheduling code
1167 //@subsection Suspend and Resume
1169 /* ---------------------------------------------------------------------------
1170 * Suspending & resuming Haskell threads.
1172 * When making a "safe" call to C (aka _ccall_GC), the task gives back
1173 * its capability before calling the C function. This allows another
1174 * task to pick up the capability and carry on running Haskell
1175 * threads. It also means that if the C call blocks, it won't lock
1178 * The Haskell thread making the C call is put to sleep for the
1179 * duration of the call, on the susepended_ccalling_threads queue. We
1180 * give out a token to the task, which it can use to resume the thread
1181 * on return from the C function.
1182 * ------------------------------------------------------------------------- */
1185 suspendThread( Capability *cap )
1189 ACQUIRE_LOCK(&sched_mutex);
1192 sched_belch("thread %d did a _ccall_gc\n", cap->rCurrentTSO->id));
1194 threadPaused(cap->rCurrentTSO);
1195 cap->rCurrentTSO->link = suspended_ccalling_threads;
1196 suspended_ccalling_threads = cap->rCurrentTSO;
1198 /* Use the thread ID as the token; it should be unique */
1199 tok = cap->rCurrentTSO->id;
1202 cap->link = free_capabilities;
1203 free_capabilities = cap;
1204 n_free_capabilities++;
1207 RELEASE_LOCK(&sched_mutex);
1212 resumeThread( StgInt tok )
1214 StgTSO *tso, **prev;
1217 ACQUIRE_LOCK(&sched_mutex);
1219 prev = &suspended_ccalling_threads;
1220 for (tso = suspended_ccalling_threads;
1221 tso != END_TSO_QUEUE;
1222 prev = &tso->link, tso = tso->link) {
1223 if (tso->id == (StgThreadID)tok) {
1228 if (tso == END_TSO_QUEUE) {
1229 barf("resumeThread: thread not found");
1233 while (free_capabilities == NULL) {
1234 IF_DEBUG(scheduler, sched_belch("waiting to resume"));
1235 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
1236 IF_DEBUG(scheduler, sched_belch("resuming thread %d", tso->id));
1238 cap = free_capabilities;
1239 free_capabilities = cap->link;
1240 n_free_capabilities--;
1242 cap = &MainRegTable;
1245 cap->rCurrentTSO = tso;
1247 RELEASE_LOCK(&sched_mutex);
1252 /* ---------------------------------------------------------------------------
1254 * ------------------------------------------------------------------------ */
1255 static void unblockThread(StgTSO *tso);
1257 /* ---------------------------------------------------------------------------
1258 * Comparing Thread ids.
1260 * This is used from STG land in the implementation of the
1261 * instances of Eq/Ord for ThreadIds.
1262 * ------------------------------------------------------------------------ */
1264 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
1266 StgThreadID id1 = tso1->id;
1267 StgThreadID id2 = tso2->id;
1269 if (id1 < id2) return (-1);
1270 if (id1 > id2) return 1;
1274 /* ---------------------------------------------------------------------------
1275 Create a new thread.
1277 The new thread starts with the given stack size. Before the
1278 scheduler can run, however, this thread needs to have a closure
1279 (and possibly some arguments) pushed on its stack. See
1280 pushClosure() in Schedule.h.
1282 createGenThread() and createIOThread() (in SchedAPI.h) are
1283 convenient packaged versions of this function.
1285 currently pri (priority) is only used in a GRAN setup -- HWL
1286 ------------------------------------------------------------------------ */
1287 //@cindex createThread
1289 /* currently pri (priority) is only used in a GRAN setup -- HWL */
1291 createThread(nat stack_size, StgInt pri)
1293 return createThread_(stack_size, rtsFalse, pri);
1297 createThread_(nat size, rtsBool have_lock, StgInt pri)
1301 createThread(nat stack_size)
1303 return createThread_(stack_size, rtsFalse);
1307 createThread_(nat size, rtsBool have_lock)
1314 /* First check whether we should create a thread at all */
1316 /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
1317 if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
1319 belch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
1320 RtsFlags.ParFlags.maxThreads, advisory_thread_count);
1321 return END_TSO_QUEUE;
1327 ASSERT(!RtsFlags.GranFlags.Light || CurrentProc==0);
1330 // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW
1332 /* catch ridiculously small stack sizes */
1333 if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
1334 size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
1337 stack_size = size - TSO_STRUCT_SIZEW;
1339 tso = (StgTSO *)allocate(size);
1340 TICK_ALLOC_TSO(size-TSO_STRUCT_SIZEW, 0);
1342 SET_HDR(tso, &TSO_info, CCS_SYSTEM);
1344 SET_GRAN_HDR(tso, ThisPE);
1346 tso->what_next = ThreadEnterGHC;
1348 /* tso->id needs to be unique. For now we use a heavyweight mutex to
1349 * protect the increment operation on next_thread_id.
1350 * In future, we could use an atomic increment instead.
1352 if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
1353 tso->id = next_thread_id++;
1354 if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
1356 tso->why_blocked = NotBlocked;
1357 tso->blocked_exceptions = NULL;
1359 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
1360 tso->stack_size = stack_size;
1361 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
1363 tso->sp = (P_)&(tso->stack) + stack_size;
1366 tso->prof.CCCS = CCS_MAIN;
1369 /* put a stop frame on the stack */
1370 tso->sp -= sizeofW(StgStopFrame);
1371 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
1372 tso->su = (StgUpdateFrame*)tso->sp;
1376 tso->link = END_TSO_QUEUE;
1377 /* uses more flexible routine in GranSim */
1378 insertThread(tso, CurrentProc);
1380 /* In a non-GranSim setup the pushing of a TSO onto the runq is separated
1385 #if defined(GRAN) || defined(PAR)
1386 DumpGranEvent(GR_START,tso);
1389 /* Link the new thread on the global thread list.
1391 tso->global_link = all_threads;
1395 tso->gran.pri = pri;
1397 tso->gran.magic = TSO_MAGIC; // debugging only
1399 tso->gran.sparkname = 0;
1400 tso->gran.startedat = CURRENT_TIME;
1401 tso->gran.exported = 0;
1402 tso->gran.basicblocks = 0;
1403 tso->gran.allocs = 0;
1404 tso->gran.exectime = 0;
1405 tso->gran.fetchtime = 0;
1406 tso->gran.fetchcount = 0;
1407 tso->gran.blocktime = 0;
1408 tso->gran.blockcount = 0;
1409 tso->gran.blockedat = 0;
1410 tso->gran.globalsparks = 0;
1411 tso->gran.localsparks = 0;
1412 if (RtsFlags.GranFlags.Light)
1413 tso->gran.clock = Now; /* local clock */
1415 tso->gran.clock = 0;
1417 IF_DEBUG(gran,printTSO(tso));
1420 tso->par.magic = TSO_MAGIC; // debugging only
1422 tso->par.sparkname = 0;
1423 tso->par.startedat = CURRENT_TIME;
1424 tso->par.exported = 0;
1425 tso->par.basicblocks = 0;
1426 tso->par.allocs = 0;
1427 tso->par.exectime = 0;
1428 tso->par.fetchtime = 0;
1429 tso->par.fetchcount = 0;
1430 tso->par.blocktime = 0;
1431 tso->par.blockcount = 0;
1432 tso->par.blockedat = 0;
1433 tso->par.globalsparks = 0;
1434 tso->par.localsparks = 0;
1438 globalGranStats.tot_threads_created++;
1439 globalGranStats.threads_created_on_PE[CurrentProc]++;
1440 globalGranStats.tot_sq_len += spark_queue_len(CurrentProc);
1441 globalGranStats.tot_sq_probes++;
1446 belch("==__ schedule: Created TSO %d (%p);",
1447 CurrentProc, tso, tso->id));
1449 IF_PAR_DEBUG(verbose,
1450 belch("==__ schedule: Created TSO %d (%p); %d threads active",
1451 tso->id, tso, advisory_thread_count));
1453 IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
1454 tso->id, tso->stack_size));
1460 Turn a spark into a thread.
1461 ToDo: fix for SMP (needs to acquire SCHED_MUTEX!)
1464 //@cindex activateSpark
1466 activateSpark (rtsSpark spark)
1470 ASSERT(spark != (rtsSpark)NULL);
1471 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
1472 if (tso!=END_TSO_QUEUE) {
1473 pushClosure(tso,spark);
1474 PUSH_ON_RUN_QUEUE(tso);
1475 advisory_thread_count++;
1477 if (RtsFlags.ParFlags.ParStats.Full) {
1478 //ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ...
1479 IF_PAR_DEBUG(verbose,
1480 belch("==^^ activateSpark: turning spark of closure %p (%s) into a thread",
1481 (StgClosure *)spark, info_type((StgClosure *)spark)));
1484 barf("activateSpark: Cannot create TSO");
1486 // ToDo: fwd info on local/global spark to thread -- HWL
1487 // tso->gran.exported = spark->exported;
1488 // tso->gran.locked = !spark->global;
1489 // tso->gran.sparkname = spark->name;
1495 /* ---------------------------------------------------------------------------
1498 * scheduleThread puts a thread on the head of the runnable queue.
1499 * This will usually be done immediately after a thread is created.
1500 * The caller of scheduleThread must create the thread using e.g.
1501 * createThread and push an appropriate closure
1502 * on this thread's stack before the scheduler is invoked.
1503 * ------------------------------------------------------------------------ */
1506 scheduleThread(StgTSO *tso)
1508 if (tso==END_TSO_QUEUE){
1513 ACQUIRE_LOCK(&sched_mutex);
1515 /* Put the new thread on the head of the runnable queue. The caller
1516 * better push an appropriate closure on this thread's stack
1517 * beforehand. In the SMP case, the thread may start running as
1518 * soon as we release the scheduler lock below.
1520 PUSH_ON_RUN_QUEUE(tso);
1524 IF_DEBUG(scheduler,printTSO(tso));
1526 RELEASE_LOCK(&sched_mutex);
1529 /* ---------------------------------------------------------------------------
1532 * Start up Posix threads to run each of the scheduler tasks.
1533 * I believe the task ids are not needed in the system as defined.
1535 * ------------------------------------------------------------------------ */
1537 #if defined(PAR) || defined(SMP)
1539 taskStart( void *arg STG_UNUSED )
1541 rts_evalNothing(NULL);
1545 /* ---------------------------------------------------------------------------
1548 * Initialise the scheduler. This resets all the queues - if the
1549 * queues contained any threads, they'll be garbage collected at the
1552 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
1553 * ------------------------------------------------------------------------ */
1557 term_handler(int sig STG_UNUSED)
1560 ACQUIRE_LOCK(&term_mutex);
1562 RELEASE_LOCK(&term_mutex);
1567 //@cindex initScheduler
1574 for (i=0; i<=MAX_PROC; i++) {
1575 run_queue_hds[i] = END_TSO_QUEUE;
1576 run_queue_tls[i] = END_TSO_QUEUE;
1577 blocked_queue_hds[i] = END_TSO_QUEUE;
1578 blocked_queue_tls[i] = END_TSO_QUEUE;
1579 ccalling_threadss[i] = END_TSO_QUEUE;
1582 run_queue_hd = END_TSO_QUEUE;
1583 run_queue_tl = END_TSO_QUEUE;
1584 blocked_queue_hd = END_TSO_QUEUE;
1585 blocked_queue_tl = END_TSO_QUEUE;
1588 suspended_ccalling_threads = END_TSO_QUEUE;
1590 main_threads = NULL;
1591 all_threads = END_TSO_QUEUE;
1597 ecafList = END_ECAF_LIST;
1601 /* Install the SIGHUP handler */
1604 struct sigaction action,oact;
1606 action.sa_handler = term_handler;
1607 sigemptyset(&action.sa_mask);
1608 action.sa_flags = 0;
1609 if (sigaction(SIGTERM, &action, &oact) != 0) {
1610 barf("can't install TERM handler");
1616 /* Allocate N Capabilities */
1619 Capability *cap, *prev;
1622 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1623 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
1627 free_capabilities = cap;
1628 n_free_capabilities = RtsFlags.ParFlags.nNodes;
1630 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Allocated %d capabilities\n",
1631 n_free_capabilities););
1634 #if defined(SMP) || defined(PAR)
1647 /* make some space for saving all the thread ids */
1648 task_ids = stgMallocBytes(RtsFlags.ParFlags.nNodes * sizeof(task_info),
1649 "initScheduler:task_ids");
1651 /* and create all the threads */
1652 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1653 r = pthread_create(&tid,NULL,taskStart,NULL);
1655 barf("startTasks: Can't create new Posix thread");
1657 task_ids[i].id = tid;
1658 task_ids[i].mut_time = 0.0;
1659 task_ids[i].mut_etime = 0.0;
1660 task_ids[i].gc_time = 0.0;
1661 task_ids[i].gc_etime = 0.0;
1662 task_ids[i].elapsedtimestart = elapsedtime();
1663 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Started task: %ld\n",tid););
1669 exitScheduler( void )
1674 /* Don't want to use pthread_cancel, since we'd have to install
1675 * these silly exception handlers (pthread_cleanup_{push,pop}) around
1679 /* Cancel all our tasks */
1680 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1681 pthread_cancel(task_ids[i].id);
1684 /* Wait for all the tasks to terminate */
1685 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1686 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: waiting for task %ld\n",
1688 pthread_join(task_ids[i].id, NULL);
1692 /* Send 'em all a SIGHUP. That should shut 'em up.
1694 await_death = RtsFlags.ParFlags.nNodes;
1695 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1696 pthread_kill(task_ids[i].id,SIGTERM);
1698 while (await_death > 0) {
1704 /* -----------------------------------------------------------------------------
1705 Managing the per-task allocation areas.
1707 Each capability comes with an allocation area. These are
1708 fixed-length block lists into which allocation can be done.
1710 ToDo: no support for two-space collection at the moment???
1711 -------------------------------------------------------------------------- */
1713 /* -----------------------------------------------------------------------------
1714 * waitThread is the external interface for running a new computation
1715 * and waiting for the result.
1717 * In the non-SMP case, we create a new main thread, push it on the
1718 * main-thread stack, and invoke the scheduler to run it. The
1719 * scheduler will return when the top main thread on the stack has
1720 * completed or died, and fill in the necessary fields of the
1721 * main_thread structure.
1723 * In the SMP case, we create a main thread as before, but we then
1724 * create a new condition variable and sleep on it. When our new
1725 * main thread has completed, we'll be woken up and the status/result
1726 * will be in the main_thread struct.
1727 * -------------------------------------------------------------------------- */
1730 howManyThreadsAvail ( void )
1734 for (q = run_queue_hd; q != END_TSO_QUEUE; q = q->link)
1736 for (q = blocked_queue_hd; q != END_TSO_QUEUE; q = q->link)
1742 finishAllThreads ( void )
1745 while (run_queue_hd != END_TSO_QUEUE) {
1746 waitThread ( run_queue_hd, NULL );
1748 while (blocked_queue_hd != END_TSO_QUEUE) {
1749 waitThread ( blocked_queue_hd, NULL );
1752 (blocked_queue_hd != END_TSO_QUEUE ||
1753 run_queue_hd != END_TSO_QUEUE);
1757 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
1760 SchedulerStatus stat;
1762 ACQUIRE_LOCK(&sched_mutex);
1764 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
1770 pthread_cond_init(&m->wakeup, NULL);
1773 m->link = main_threads;
1776 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: new main thread (%d)\n",
1781 pthread_cond_wait(&m->wakeup, &sched_mutex);
1782 } while (m->stat == NoStatus);
1784 /* GranSim specific init */
1785 CurrentTSO = m->tso; // the TSO to run
1786 procStatus[MainProc] = Busy; // status of main PE
1787 CurrentProc = MainProc; // PE to run it on
1792 ASSERT(m->stat != NoStatus);
1798 pthread_cond_destroy(&m->wakeup);
1801 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: main thread (%d) finished\n",
1805 RELEASE_LOCK(&sched_mutex);
1810 //@node Run queue code, Garbage Collextion Routines, Suspend and Resume, Main scheduling code
1811 //@subsection Run queue code
1815 NB: In GranSim we have many run queues; run_queue_hd is actually a macro
1816 unfolding to run_queue_hds[CurrentProc], thus CurrentProc is an
1817 implicit global variable that has to be correct when calling these
1821 /* Put the new thread on the head of the runnable queue.
1822 * The caller of createThread better push an appropriate closure
1823 * on this thread's stack before the scheduler is invoked.
1825 static /* inline */ void
1826 add_to_run_queue(tso)
1829 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1830 tso->link = run_queue_hd;
1832 if (run_queue_tl == END_TSO_QUEUE) {
1837 /* Put the new thread at the end of the runnable queue. */
1838 static /* inline */ void
1839 push_on_run_queue(tso)
1842 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
1843 ASSERT(run_queue_hd!=NULL && run_queue_tl!=NULL);
1844 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1845 if (run_queue_hd == END_TSO_QUEUE) {
1848 run_queue_tl->link = tso;
1854 Should be inlined because it's used very often in schedule. The tso
1855 argument is actually only needed in GranSim, where we want to have the
1856 possibility to schedule *any* TSO on the run queue, irrespective of the
1857 actual ordering. Therefore, if tso is not the nil TSO then we traverse
1858 the run queue and dequeue the tso, adjusting the links in the queue.
1860 //@cindex take_off_run_queue
1861 static /* inline */ StgTSO*
1862 take_off_run_queue(StgTSO *tso) {
1866 qetlaHbogh Qu' ngaSbogh ghomDaQ {tso} yIteq!
1868 if tso is specified, unlink that tso from the run_queue (doesn't have
1869 to be at the beginning of the queue); GranSim only
1871 if (tso!=END_TSO_QUEUE) {
1872 /* find tso in queue */
1873 for (t=run_queue_hd, prev=END_TSO_QUEUE;
1874 t!=END_TSO_QUEUE && t!=tso;
1878 /* now actually dequeue the tso */
1879 if (prev!=END_TSO_QUEUE) {
1880 ASSERT(run_queue_hd!=t);
1881 prev->link = t->link;
1883 /* t is at beginning of thread queue */
1884 ASSERT(run_queue_hd==t);
1885 run_queue_hd = t->link;
1887 /* t is at end of thread queue */
1888 if (t->link==END_TSO_QUEUE) {
1889 ASSERT(t==run_queue_tl);
1890 run_queue_tl = prev;
1892 ASSERT(run_queue_tl!=t);
1894 t->link = END_TSO_QUEUE;
1896 /* take tso from the beginning of the queue; std concurrent code */
1898 if (t != END_TSO_QUEUE) {
1899 run_queue_hd = t->link;
1900 t->link = END_TSO_QUEUE;
1901 if (run_queue_hd == END_TSO_QUEUE) {
1902 run_queue_tl = END_TSO_QUEUE;
1911 //@node Garbage Collextion Routines, Blocking Queue Routines, Run queue code, Main scheduling code
1912 //@subsection Garbage Collextion Routines
1914 /* ---------------------------------------------------------------------------
1915 Where are the roots that we know about?
1917 - all the threads on the runnable queue
1918 - all the threads on the blocked queue
1919 - all the thread currently executing a _ccall_GC
1920 - all the "main threads"
1922 ------------------------------------------------------------------------ */
1924 /* This has to be protected either by the scheduler monitor, or by the
1925 garbage collection monitor (probably the latter).
1929 static void GetRoots(void)
1936 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
1937 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
1938 run_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_hds[i]);
1939 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
1940 run_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_tls[i]);
1942 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
1943 blocked_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hds[i]);
1944 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
1945 blocked_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tls[i]);
1946 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
1947 ccalling_threadss[i] = (StgTSO *)MarkRoot((StgClosure *)ccalling_threadss[i]);
1954 if (run_queue_hd != END_TSO_QUEUE) {
1955 ASSERT(run_queue_tl != END_TSO_QUEUE);
1956 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1957 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1960 if (blocked_queue_hd != END_TSO_QUEUE) {
1961 ASSERT(blocked_queue_tl != END_TSO_QUEUE);
1962 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1963 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1967 for (m = main_threads; m != NULL; m = m->link) {
1968 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1970 if (suspended_ccalling_threads != END_TSO_QUEUE)
1971 suspended_ccalling_threads =
1972 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1974 #if defined(SMP) || defined(PAR) || defined(GRAN)
1979 /* -----------------------------------------------------------------------------
1982 This is the interface to the garbage collector from Haskell land.
1983 We provide this so that external C code can allocate and garbage
1984 collect when called from Haskell via _ccall_GC.
1986 It might be useful to provide an interface whereby the programmer
1987 can specify more roots (ToDo).
1989 This needs to be protected by the GC condition variable above. KH.
1990 -------------------------------------------------------------------------- */
1992 void (*extra_roots)(void);
1997 GarbageCollect(GetRoots,rtsFalse);
2001 performMajorGC(void)
2003 GarbageCollect(GetRoots,rtsTrue);
2009 GetRoots(); /* the scheduler's roots */
2010 extra_roots(); /* the user's roots */
2014 performGCWithRoots(void (*get_roots)(void))
2016 extra_roots = get_roots;
2018 GarbageCollect(AllRoots,rtsFalse);
2021 /* -----------------------------------------------------------------------------
2024 If the thread has reached its maximum stack size, then raise the
2025 StackOverflow exception in the offending thread. Otherwise
2026 relocate the TSO into a larger chunk of memory and adjust its stack
2028 -------------------------------------------------------------------------- */
2031 threadStackOverflow(StgTSO *tso)
2033 nat new_stack_size, new_tso_size, diff, stack_words;
2037 IF_DEBUG(sanity,checkTSO(tso));
2038 if (tso->stack_size >= tso->max_stack_size) {
2041 belch("@@ threadStackOverflow of TSO %d (%p): stack too large (now %ld; max is %ld",
2042 tso->id, tso, tso->stack_size, tso->max_stack_size);
2043 /* If we're debugging, just print out the top of the stack */
2044 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2048 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
2051 /* Send this thread the StackOverflow exception */
2052 raiseAsync(tso, (StgClosure *)stackOverflow_closure);
2057 /* Try to double the current stack size. If that takes us over the
2058 * maximum stack size for this thread, then use the maximum instead.
2059 * Finally round up so the TSO ends up as a whole number of blocks.
2061 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2062 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2063 TSO_STRUCT_SIZE)/sizeof(W_);
2064 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2065 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2067 IF_DEBUG(scheduler, fprintf(stderr,"scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
2069 dest = (StgTSO *)allocate(new_tso_size);
2070 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
2072 /* copy the TSO block and the old stack into the new area */
2073 memcpy(dest,tso,TSO_STRUCT_SIZE);
2074 stack_words = tso->stack + tso->stack_size - tso->sp;
2075 new_sp = (P_)dest + new_tso_size - stack_words;
2076 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2078 /* relocate the stack pointers... */
2079 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
2080 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
2082 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
2083 dest->stack_size = new_stack_size;
2085 /* and relocate the update frame list */
2086 relocate_TSO(tso, dest);
2088 /* Mark the old TSO as relocated. We have to check for relocated
2089 * TSOs in the garbage collector and any primops that deal with TSOs.
2091 * It's important to set the sp and su values to just beyond the end
2092 * of the stack, so we don't attempt to scavenge any part of the
2095 tso->what_next = ThreadRelocated;
2097 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2098 tso->su = (StgUpdateFrame *)tso->sp;
2099 tso->why_blocked = NotBlocked;
2100 dest->mut_link = NULL;
2102 IF_PAR_DEBUG(verbose,
2103 belch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld",
2104 tso->id, tso, tso->stack_size);
2105 /* If we're debugging, just print out the top of the stack */
2106 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2109 IF_DEBUG(sanity,checkTSO(tso));
2111 IF_DEBUG(scheduler,printTSO(dest));
2117 //@node Blocking Queue Routines, Exception Handling Routines, Garbage Collextion Routines, Main scheduling code
2118 //@subsection Blocking Queue Routines
2120 /* ---------------------------------------------------------------------------
2121 Wake up a queue that was blocked on some resource.
2122 ------------------------------------------------------------------------ */
2124 /* ToDo: check push_on_run_queue vs. PUSH_ON_RUN_QUEUE */
2128 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2133 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2135 /* write RESUME events to log file and
2136 update blocked and fetch time (depending on type of the orig closure) */
2137 if (RtsFlags.ParFlags.ParStats.Full) {
2138 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
2139 GR_RESUME, ((StgTSO *)bqe), ((StgTSO *)bqe)->block_info.closure,
2140 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
2142 switch (get_itbl(node)->type) {
2144 ((StgTSO *)bqe)->par.fetchtime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2149 ((StgTSO *)bqe)->par.blocktime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2152 barf("{unblockOneLocked}Daq Qagh: unexpected closure in blocking queue");
2159 static StgBlockingQueueElement *
2160 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
2163 PEs node_loc, tso_loc;
2165 node_loc = where_is(node); // should be lifted out of loop
2166 tso = (StgTSO *)bqe; // wastes an assignment to get the type right
2167 tso_loc = where_is((StgClosure *)tso);
2168 if (IS_LOCAL_TO(PROCS(node),tso_loc)) { // TSO is local
2169 /* !fake_fetch => TSO is on CurrentProc is same as IS_LOCAL_TO */
2170 ASSERT(CurrentProc!=node_loc || tso_loc==CurrentProc);
2171 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.lunblocktime;
2172 // insertThread(tso, node_loc);
2173 new_event(tso_loc, tso_loc, CurrentTime[CurrentProc],
2175 tso, node, (rtsSpark*)NULL);
2176 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2179 } else { // TSO is remote (actually should be FMBQ)
2180 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.mpacktime +
2181 RtsFlags.GranFlags.Costs.gunblocktime +
2182 RtsFlags.GranFlags.Costs.latency;
2183 new_event(tso_loc, CurrentProc, CurrentTime[CurrentProc],
2185 tso, node, (rtsSpark*)NULL);
2186 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2189 /* the thread-queue-overhead is accounted for in either Resume or UnblockThread */
2191 fprintf(stderr," %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,",
2192 (node_loc==tso_loc ? "Local" : "Global"),
2193 tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
2194 tso->block_info.closure = NULL;
2195 IF_DEBUG(scheduler,belch("-- Waking up thread %ld (%p)",
2199 static StgBlockingQueueElement *
2200 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
2202 StgBlockingQueueElement *next;
2204 switch (get_itbl(bqe)->type) {
2206 ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked);
2207 /* if it's a TSO just push it onto the run_queue */
2209 // ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
2210 PUSH_ON_RUN_QUEUE((StgTSO *)bqe);
2212 unblockCount(bqe, node);
2213 /* reset blocking status after dumping event */
2214 ((StgTSO *)bqe)->why_blocked = NotBlocked;
2218 /* if it's a BLOCKED_FETCH put it on the PendingFetches list */
2220 bqe->link = PendingFetches;
2221 PendingFetches = bqe;
2225 /* can ignore this case in a non-debugging setup;
2226 see comments on RBHSave closures above */
2228 /* check that the closure is an RBHSave closure */
2229 ASSERT(get_itbl((StgClosure *)bqe) == &RBH_Save_0_info ||
2230 get_itbl((StgClosure *)bqe) == &RBH_Save_1_info ||
2231 get_itbl((StgClosure *)bqe) == &RBH_Save_2_info);
2235 barf("{unblockOneLocked}Daq Qagh: Unexpected IP (%#lx; %s) in blocking queue at %#lx\n",
2236 get_itbl((StgClosure *)bqe), info_type((StgClosure *)bqe),
2240 // IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
2244 #else /* !GRAN && !PAR */
2246 unblockOneLocked(StgTSO *tso)
2250 ASSERT(get_itbl(tso)->type == TSO);
2251 ASSERT(tso->why_blocked != NotBlocked);
2252 tso->why_blocked = NotBlocked;
2254 PUSH_ON_RUN_QUEUE(tso);
2256 IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
2261 #if defined(GRAN) || defined(PAR)
2262 inline StgBlockingQueueElement *
2263 unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
2265 ACQUIRE_LOCK(&sched_mutex);
2266 bqe = unblockOneLocked(bqe, node);
2267 RELEASE_LOCK(&sched_mutex);
2272 unblockOne(StgTSO *tso)
2274 ACQUIRE_LOCK(&sched_mutex);
2275 tso = unblockOneLocked(tso);
2276 RELEASE_LOCK(&sched_mutex);
2283 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
2285 StgBlockingQueueElement *bqe;
2290 belch("## AwBQ for node %p on PE %d @ %ld by TSO %d (%p): ", \
2291 node, CurrentProc, CurrentTime[CurrentProc],
2292 CurrentTSO->id, CurrentTSO));
2294 node_loc = where_is(node);
2296 ASSERT(get_itbl(q)->type == TSO || // q is either a TSO or an RBHSave
2297 get_itbl(q)->type == CONSTR); // closure (type constructor)
2298 ASSERT(is_unique(node));
2300 /* FAKE FETCH: magically copy the node to the tso's proc;
2301 no Fetch necessary because in reality the node should not have been
2302 moved to the other PE in the first place
2304 if (CurrentProc!=node_loc) {
2306 belch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)",
2307 node, node_loc, CurrentProc, CurrentTSO->id,
2308 // CurrentTSO, where_is(CurrentTSO),
2309 node->header.gran.procs));
2310 node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc);
2312 belch("## new bitmask of node %p is %#x",
2313 node, node->header.gran.procs));
2314 if (RtsFlags.GranFlags.GranSimStats.Global) {
2315 globalGranStats.tot_fake_fetches++;
2320 // ToDo: check: ASSERT(CurrentProc==node_loc);
2321 while (get_itbl(bqe)->type==TSO) { // q != END_TSO_QUEUE) {
2324 bqe points to the current element in the queue
2325 next points to the next element in the queue
2327 //tso = (StgTSO *)bqe; // wastes an assignment to get the type right
2328 //tso_loc = where_is(tso);
2330 bqe = unblockOneLocked(bqe, node);
2333 /* if this is the BQ of an RBH, we have to put back the info ripped out of
2334 the closure to make room for the anchor of the BQ */
2335 if (bqe!=END_BQ_QUEUE) {
2336 ASSERT(get_itbl(node)->type == RBH && get_itbl(bqe)->type == CONSTR);
2338 ASSERT((info_ptr==&RBH_Save_0_info) ||
2339 (info_ptr==&RBH_Save_1_info) ||
2340 (info_ptr==&RBH_Save_2_info));
2342 /* cf. convertToRBH in RBH.c for writing the RBHSave closure */
2343 ((StgRBH *)node)->blocking_queue = (StgBlockingQueueElement *)((StgRBHSave *)bqe)->payload[0];
2344 ((StgRBH *)node)->mut_link = (StgMutClosure *)((StgRBHSave *)bqe)->payload[1];
2347 belch("## Filled in RBH_Save for %p (%s) at end of AwBQ",
2348 node, info_type(node)));
2351 /* statistics gathering */
2352 if (RtsFlags.GranFlags.GranSimStats.Global) {
2353 // globalGranStats.tot_bq_processing_time += bq_processing_time;
2354 globalGranStats.tot_bq_len += len; // total length of all bqs awakened
2355 // globalGranStats.tot_bq_len_local += len_local; // same for local TSOs only
2356 globalGranStats.tot_awbq++; // total no. of bqs awakened
2359 fprintf(stderr,"## BQ Stats of %p: [%d entries] %s\n",
2360 node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : ""));
2364 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
2366 StgBlockingQueueElement *bqe, *next;
2368 ACQUIRE_LOCK(&sched_mutex);
2370 IF_PAR_DEBUG(verbose,
2371 belch("## AwBQ for node %p on [%x]: ",
2374 ASSERT(get_itbl(q)->type == TSO ||
2375 get_itbl(q)->type == BLOCKED_FETCH ||
2376 get_itbl(q)->type == CONSTR);
2379 while (get_itbl(bqe)->type==TSO ||
2380 get_itbl(bqe)->type==BLOCKED_FETCH) {
2381 bqe = unblockOneLocked(bqe, node);
2383 RELEASE_LOCK(&sched_mutex);
2386 #else /* !GRAN && !PAR */
2388 awakenBlockedQueue(StgTSO *tso)
2390 ACQUIRE_LOCK(&sched_mutex);
2391 while (tso != END_TSO_QUEUE) {
2392 tso = unblockOneLocked(tso);
2394 RELEASE_LOCK(&sched_mutex);
2398 //@node Exception Handling Routines, Debugging Routines, Blocking Queue Routines, Main scheduling code
2399 //@subsection Exception Handling Routines
2401 /* ---------------------------------------------------------------------------
2403 - usually called inside a signal handler so it mustn't do anything fancy.
2404 ------------------------------------------------------------------------ */
2407 interruptStgRts(void)
2413 /* -----------------------------------------------------------------------------
2416 This is for use when we raise an exception in another thread, which
2418 This has nothing to do with the UnblockThread event in GranSim. -- HWL
2419 -------------------------------------------------------------------------- */
2421 #if defined(GRAN) || defined(PAR)
2423 NB: only the type of the blocking queue is different in GranSim and GUM
2424 the operations on the queue-elements are the same
2425 long live polymorphism!
2428 unblockThread(StgTSO *tso)
2430 StgBlockingQueueElement *t, **last;
2432 ACQUIRE_LOCK(&sched_mutex);
2433 switch (tso->why_blocked) {
2436 return; /* not blocked */
2439 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
2441 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
2442 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
2444 last = (StgBlockingQueueElement **)&mvar->head;
2445 for (t = (StgBlockingQueueElement *)mvar->head;
2447 last = &t->link, last_tso = t, t = t->link) {
2448 if (t == (StgBlockingQueueElement *)tso) {
2449 *last = (StgBlockingQueueElement *)tso->link;
2450 if (mvar->tail == tso) {
2451 mvar->tail = (StgTSO *)last_tso;
2456 barf("unblockThread (MVAR): TSO not found");
2459 case BlockedOnBlackHole:
2460 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
2462 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
2464 last = &bq->blocking_queue;
2465 for (t = bq->blocking_queue;
2467 last = &t->link, t = t->link) {
2468 if (t == (StgBlockingQueueElement *)tso) {
2469 *last = (StgBlockingQueueElement *)tso->link;
2473 barf("unblockThread (BLACKHOLE): TSO not found");
2476 case BlockedOnException:
2478 StgTSO *target = tso->block_info.tso;
2480 ASSERT(get_itbl(target)->type == TSO);
2481 ASSERT(target->blocked_exceptions != NULL);
2483 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
2484 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
2486 last = &t->link, t = t->link) {
2487 ASSERT(get_itbl(t)->type == TSO);
2488 if (t == (StgBlockingQueueElement *)tso) {
2489 *last = (StgBlockingQueueElement *)tso->link;
2493 barf("unblockThread (Exception): TSO not found");
2496 case BlockedOnDelay:
2498 case BlockedOnWrite:
2500 StgBlockingQueueElement *prev = NULL;
2501 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
2502 prev = t, t = t->link) {
2503 if (t == (StgBlockingQueueElement *)tso) {
2505 blocked_queue_hd = (StgTSO *)t->link;
2506 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
2507 blocked_queue_tl = END_TSO_QUEUE;
2510 prev->link = t->link;
2511 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
2512 blocked_queue_tl = (StgTSO *)prev;
2518 barf("unblockThread (I/O): TSO not found");
2522 barf("unblockThread");
2526 tso->link = END_TSO_QUEUE;
2527 tso->why_blocked = NotBlocked;
2528 tso->block_info.closure = NULL;
2529 PUSH_ON_RUN_QUEUE(tso);
2530 RELEASE_LOCK(&sched_mutex);
2534 unblockThread(StgTSO *tso)
2538 ACQUIRE_LOCK(&sched_mutex);
2539 switch (tso->why_blocked) {
2542 return; /* not blocked */
2545 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
2547 StgTSO *last_tso = END_TSO_QUEUE;
2548 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
2551 for (t = mvar->head; t != END_TSO_QUEUE;
2552 last = &t->link, last_tso = t, t = t->link) {
2555 if (mvar->tail == tso) {
2556 mvar->tail = last_tso;
2561 barf("unblockThread (MVAR): TSO not found");
2564 case BlockedOnBlackHole:
2565 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
2567 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
2569 last = &bq->blocking_queue;
2570 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
2571 last = &t->link, t = t->link) {
2577 barf("unblockThread (BLACKHOLE): TSO not found");
2580 case BlockedOnException:
2582 StgTSO *target = tso->block_info.tso;
2584 ASSERT(get_itbl(target)->type == TSO);
2585 ASSERT(target->blocked_exceptions != NULL);
2587 last = &target->blocked_exceptions;
2588 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
2589 last = &t->link, t = t->link) {
2590 ASSERT(get_itbl(t)->type == TSO);
2596 barf("unblockThread (Exception): TSO not found");
2599 case BlockedOnDelay:
2601 case BlockedOnWrite:
2603 StgTSO *prev = NULL;
2604 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
2605 prev = t, t = t->link) {
2608 blocked_queue_hd = t->link;
2609 if (blocked_queue_tl == t) {
2610 blocked_queue_tl = END_TSO_QUEUE;
2613 prev->link = t->link;
2614 if (blocked_queue_tl == t) {
2615 blocked_queue_tl = prev;
2621 barf("unblockThread (I/O): TSO not found");
2625 barf("unblockThread");
2629 tso->link = END_TSO_QUEUE;
2630 tso->why_blocked = NotBlocked;
2631 tso->block_info.closure = NULL;
2632 PUSH_ON_RUN_QUEUE(tso);
2633 RELEASE_LOCK(&sched_mutex);
2637 /* -----------------------------------------------------------------------------
2640 * The following function implements the magic for raising an
2641 * asynchronous exception in an existing thread.
2643 * We first remove the thread from any queue on which it might be
2644 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
2646 * We strip the stack down to the innermost CATCH_FRAME, building
2647 * thunks in the heap for all the active computations, so they can
2648 * be restarted if necessary. When we reach a CATCH_FRAME, we build
2649 * an application of the handler to the exception, and push it on
2650 * the top of the stack.
2652 * How exactly do we save all the active computations? We create an
2653 * AP_UPD for every UpdateFrame on the stack. Entering one of these
2654 * AP_UPDs pushes everything from the corresponding update frame
2655 * upwards onto the stack. (Actually, it pushes everything up to the
2656 * next update frame plus a pointer to the next AP_UPD object.
2657 * Entering the next AP_UPD object pushes more onto the stack until we
2658 * reach the last AP_UPD object - at which point the stack should look
2659 * exactly as it did when we killed the TSO and we can continue
2660 * execution by entering the closure on top of the stack.
2662 * We can also kill a thread entirely - this happens if either (a) the
2663 * exception passed to raiseAsync is NULL, or (b) there's no
2664 * CATCH_FRAME on the stack. In either case, we strip the entire
2665 * stack and replace the thread with a zombie.
2667 * -------------------------------------------------------------------------- */
2670 deleteThread(StgTSO *tso)
2672 raiseAsync(tso,NULL);
2676 raiseAsync(StgTSO *tso, StgClosure *exception)
2678 StgUpdateFrame* su = tso->su;
2679 StgPtr sp = tso->sp;
2681 /* Thread already dead? */
2682 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
2686 IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
2688 /* Remove it from any blocking queues */
2691 /* The stack freezing code assumes there's a closure pointer on
2692 * the top of the stack. This isn't always the case with compiled
2693 * code, so we have to push a dummy closure on the top which just
2694 * returns to the next return address on the stack.
2696 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
2697 *(--sp) = (W_)&dummy_ret_closure;
2701 int words = ((P_)su - (P_)sp) - 1;
2705 /* If we find a CATCH_FRAME, and we've got an exception to raise,
2706 * then build PAP(handler,exception,realworld#), and leave it on
2707 * top of the stack ready to enter.
2709 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
2710 StgCatchFrame *cf = (StgCatchFrame *)su;
2711 /* we've got an exception to raise, so let's pass it to the
2712 * handler in this frame.
2714 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 2);
2715 TICK_ALLOC_UPD_PAP(3,0);
2716 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
2719 ap->fun = cf->handler; /* :: Exception -> IO a */
2720 ap->payload[0] = (P_)exception;
2721 ap->payload[1] = ARG_TAG(0); /* realworld token */
2723 /* throw away the stack from Sp up to and including the
2726 sp = (P_)su + sizeofW(StgCatchFrame) - 1;
2729 /* Restore the blocked/unblocked state for asynchronous exceptions
2730 * at the CATCH_FRAME.
2732 * If exceptions were unblocked at the catch, arrange that they
2733 * are unblocked again after executing the handler by pushing an
2734 * unblockAsyncExceptions_ret stack frame.
2736 if (!cf->exceptions_blocked) {
2737 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
2740 /* Ensure that async exceptions are blocked when running the handler.
2742 if (tso->blocked_exceptions == NULL) {
2743 tso->blocked_exceptions = END_TSO_QUEUE;
2746 /* Put the newly-built PAP on top of the stack, ready to execute
2747 * when the thread restarts.
2751 tso->what_next = ThreadEnterGHC;
2752 IF_DEBUG(sanity, checkTSO(tso));
2756 /* First build an AP_UPD consisting of the stack chunk above the
2757 * current update frame, with the top word on the stack as the
2760 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
2765 ap->fun = (StgClosure *)sp[0];
2767 for(i=0; i < (nat)words; ++i) {
2768 ap->payload[i] = (P_)*sp++;
2771 switch (get_itbl(su)->type) {
2775 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
2776 TICK_ALLOC_UP_THK(words+1,0);
2779 fprintf(stderr, "scheduler: Updating ");
2780 printPtr((P_)su->updatee);
2781 fprintf(stderr, " with ");
2782 printObj((StgClosure *)ap);
2785 /* Replace the updatee with an indirection - happily
2786 * this will also wake up any threads currently
2787 * waiting on the result.
2789 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
2791 sp += sizeofW(StgUpdateFrame) -1;
2792 sp[0] = (W_)ap; /* push onto stack */
2798 StgCatchFrame *cf = (StgCatchFrame *)su;
2801 /* We want a PAP, not an AP_UPD. Fortunately, the
2802 * layout's the same.
2804 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2805 TICK_ALLOC_UPD_PAP(words+1,0);
2807 /* now build o = FUN(catch,ap,handler) */
2808 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
2809 TICK_ALLOC_FUN(2,0);
2810 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
2811 o->payload[0] = (StgClosure *)ap;
2812 o->payload[1] = cf->handler;
2815 fprintf(stderr, "scheduler: Built ");
2816 printObj((StgClosure *)o);
2819 /* pop the old handler and put o on the stack */
2821 sp += sizeofW(StgCatchFrame) - 1;
2828 StgSeqFrame *sf = (StgSeqFrame *)su;
2831 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2832 TICK_ALLOC_UPD_PAP(words+1,0);
2834 /* now build o = FUN(seq,ap) */
2835 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
2836 TICK_ALLOC_SE_THK(1,0);
2837 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
2838 o->payload[0] = (StgClosure *)ap;
2841 fprintf(stderr, "scheduler: Built ");
2842 printObj((StgClosure *)o);
2845 /* pop the old handler and put o on the stack */
2847 sp += sizeofW(StgSeqFrame) - 1;
2853 /* We've stripped the entire stack, the thread is now dead. */
2854 sp += sizeofW(StgStopFrame) - 1;
2855 sp[0] = (W_)exception; /* save the exception */
2856 tso->what_next = ThreadKilled;
2857 tso->su = (StgUpdateFrame *)(sp+1);
2868 /* -----------------------------------------------------------------------------
2869 resurrectThreads is called after garbage collection on the list of
2870 threads found to be garbage. Each of these threads will be woken
2871 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
2872 on an MVar, or NonTermination if the thread was blocked on a Black
2874 -------------------------------------------------------------------------- */
2877 resurrectThreads( StgTSO *threads )
2881 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2882 next = tso->global_link;
2883 tso->global_link = all_threads;
2885 IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id));
2887 switch (tso->why_blocked) {
2889 case BlockedOnException:
2890 raiseAsync(tso,(StgClosure *)BlockedOnDeadMVar_closure);
2892 case BlockedOnBlackHole:
2893 raiseAsync(tso,(StgClosure *)NonTermination_closure);
2896 /* This might happen if the thread was blocked on a black hole
2897 * belonging to a thread that we've just woken up (raiseAsync
2898 * can wake up threads, remember...).
2902 barf("resurrectThreads: thread blocked in a strange way");
2907 /* -----------------------------------------------------------------------------
2908 * Blackhole detection: if we reach a deadlock, test whether any
2909 * threads are blocked on themselves. Any threads which are found to
2910 * be self-blocked get sent a NonTermination exception.
2912 * This is only done in a deadlock situation in order to avoid
2913 * performance overhead in the normal case.
2914 * -------------------------------------------------------------------------- */
2917 detectBlackHoles( void )
2919 StgTSO *t = all_threads;
2920 StgUpdateFrame *frame;
2921 StgClosure *blocked_on;
2923 for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
2925 if (t->why_blocked != BlockedOnBlackHole) {
2929 blocked_on = t->block_info.closure;
2931 for (frame = t->su; ; frame = frame->link) {
2932 switch (get_itbl(frame)->type) {
2935 if (frame->updatee == blocked_on) {
2936 /* We are blocking on one of our own computations, so
2937 * send this thread the NonTermination exception.
2940 sched_belch("thread %d is blocked on itself", t->id));
2941 raiseAsync(t, (StgClosure *)NonTermination_closure);
2962 //@node Debugging Routines, Index, Exception Handling Routines, Main scheduling code
2963 //@subsection Debugging Routines
2965 /* -----------------------------------------------------------------------------
2966 Debugging: why is a thread blocked
2967 -------------------------------------------------------------------------- */
2972 printThreadBlockage(StgTSO *tso)
2974 switch (tso->why_blocked) {
2976 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
2978 case BlockedOnWrite:
2979 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
2981 case BlockedOnDelay:
2982 #if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
2983 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
2985 fprintf(stderr,"blocked on delay of %d ms",
2986 tso->block_info.target - getourtimeofday());
2990 fprintf(stderr,"blocked on an MVar");
2992 case BlockedOnException:
2993 fprintf(stderr,"blocked on delivering an exception to thread %d",
2994 tso->block_info.tso->id);
2996 case BlockedOnBlackHole:
2997 fprintf(stderr,"blocked on a black hole");
3000 fprintf(stderr,"not blocked");
3004 fprintf(stderr,"blocked on global address; local FM_BQ is %p (%s)",
3005 tso->block_info.closure, info_type(tso->block_info.closure));
3007 case BlockedOnGA_NoSend:
3008 fprintf(stderr,"blocked on global address (no send); local FM_BQ is %p (%s)",
3009 tso->block_info.closure, info_type(tso->block_info.closure));
3013 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
3014 tso->why_blocked, tso->id, tso);
3019 printThreadStatus(StgTSO *tso)
3021 switch (tso->what_next) {
3023 fprintf(stderr,"has been killed");
3025 case ThreadComplete:
3026 fprintf(stderr,"has completed");
3029 printThreadBlockage(tso);
3034 printAllThreads(void)
3038 sched_belch("all threads:");
3039 for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
3040 fprintf(stderr, "\tthread %d is ", t->id);
3041 printThreadStatus(t);
3042 fprintf(stderr,"\n");
3047 Print a whole blocking queue attached to node (debugging only).
3052 print_bq (StgClosure *node)
3054 StgBlockingQueueElement *bqe;
3058 fprintf(stderr,"## BQ of closure %p (%s): ",
3059 node, info_type(node));
3061 /* should cover all closures that may have a blocking queue */
3062 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
3063 get_itbl(node)->type == FETCH_ME_BQ ||
3064 get_itbl(node)->type == RBH);
3066 ASSERT(node!=(StgClosure*)NULL); // sanity check
3068 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
3070 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
3071 !end; // iterate until bqe points to a CONSTR
3072 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
3073 ASSERT(bqe != END_BQ_QUEUE); // sanity check
3074 ASSERT(bqe != (StgTSO*)NULL); // sanity check
3075 /* types of closures that may appear in a blocking queue */
3076 ASSERT(get_itbl(bqe)->type == TSO ||
3077 get_itbl(bqe)->type == BLOCKED_FETCH ||
3078 get_itbl(bqe)->type == CONSTR);
3079 /* only BQs of an RBH end with an RBH_Save closure */
3080 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
3082 switch (get_itbl(bqe)->type) {
3084 fprintf(stderr," TSO %d (%x),",
3085 ((StgTSO *)bqe)->id, ((StgTSO *)bqe));
3088 fprintf(stderr," BF (node=%p, ga=((%x, %d, %x)),",
3089 ((StgBlockedFetch *)bqe)->node,
3090 ((StgBlockedFetch *)bqe)->ga.payload.gc.gtid,
3091 ((StgBlockedFetch *)bqe)->ga.payload.gc.slot,
3092 ((StgBlockedFetch *)bqe)->ga.weight);
3095 fprintf(stderr," %s (IP %p),",
3096 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
3097 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
3098 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
3099 "RBH_Save_?"), get_itbl(bqe));
3102 barf("Unexpected closure type %s in blocking queue of %p (%s)",
3103 info_type(bqe), node, info_type(node));
3107 fputc('\n', stderr);
3109 # elif defined(GRAN)
3111 print_bq (StgClosure *node)
3113 StgBlockingQueueElement *bqe;
3114 PEs node_loc, tso_loc;
3117 /* should cover all closures that may have a blocking queue */
3118 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
3119 get_itbl(node)->type == FETCH_ME_BQ ||
3120 get_itbl(node)->type == RBH);
3122 ASSERT(node!=(StgClosure*)NULL); // sanity check
3123 node_loc = where_is(node);
3125 fprintf(stderr,"## BQ of closure %p (%s) on [PE %d]: ",
3126 node, info_type(node), node_loc);
3129 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
3131 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
3132 !end; // iterate until bqe points to a CONSTR
3133 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
3134 ASSERT(bqe != END_BQ_QUEUE); // sanity check
3135 ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check
3136 /* types of closures that may appear in a blocking queue */
3137 ASSERT(get_itbl(bqe)->type == TSO ||
3138 get_itbl(bqe)->type == CONSTR);
3139 /* only BQs of an RBH end with an RBH_Save closure */
3140 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
3142 tso_loc = where_is((StgClosure *)bqe);
3143 switch (get_itbl(bqe)->type) {
3145 fprintf(stderr," TSO %d (%p) on [PE %d],",
3146 ((StgTSO *)bqe)->id, (StgTSO *)bqe, tso_loc);
3149 fprintf(stderr," %s (IP %p),",
3150 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
3151 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
3152 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
3153 "RBH_Save_?"), get_itbl(bqe));
3156 barf("Unexpected closure type %s in blocking queue of %p (%s)",
3157 info_type((StgClosure *)bqe), node, info_type(node));
3161 fputc('\n', stderr);
3165 Nice and easy: only TSOs on the blocking queue
3168 print_bq (StgClosure *node)
3172 ASSERT(node!=(StgClosure*)NULL); // sanity check
3173 for (tso = ((StgBlockingQueue*)node)->blocking_queue;
3174 tso != END_TSO_QUEUE;
3176 ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
3177 ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
3178 fprintf(stderr," TSO %d (%p),", tso->id, tso);
3180 fputc('\n', stderr);
3191 for (i=0, tso=run_queue_hd;
3192 tso != END_TSO_QUEUE;
3201 sched_belch(char *s, ...)
3206 fprintf(stderr, "scheduler (task %ld): ", pthread_self());
3208 fprintf(stderr, "scheduler: ");
3210 vfprintf(stderr, s, ap);
3211 fprintf(stderr, "\n");
3217 //@node Index, , Debugging Routines, Main scheduling code
3221 //* MainRegTable:: @cindex\s-+MainRegTable
3222 //* StgMainThread:: @cindex\s-+StgMainThread
3223 //* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue
3224 //* blocked_queue_hd:: @cindex\s-+blocked_queue_hd
3225 //* blocked_queue_tl:: @cindex\s-+blocked_queue_tl
3226 //* context_switch:: @cindex\s-+context_switch
3227 //* createThread:: @cindex\s-+createThread
3228 //* free_capabilities:: @cindex\s-+free_capabilities
3229 //* gc_pending_cond:: @cindex\s-+gc_pending_cond
3230 //* initScheduler:: @cindex\s-+initScheduler
3231 //* interrupted:: @cindex\s-+interrupted
3232 //* n_free_capabilities:: @cindex\s-+n_free_capabilities
3233 //* next_thread_id:: @cindex\s-+next_thread_id
3234 //* print_bq:: @cindex\s-+print_bq
3235 //* run_queue_hd:: @cindex\s-+run_queue_hd
3236 //* run_queue_tl:: @cindex\s-+run_queue_tl
3237 //* sched_mutex:: @cindex\s-+sched_mutex
3238 //* schedule:: @cindex\s-+schedule
3239 //* take_off_run_queue:: @cindex\s-+take_off_run_queue
3240 //* task_ids:: @cindex\s-+task_ids
3241 //* term_mutex:: @cindex\s-+term_mutex
3242 //* thread_ready_cond:: @cindex\s-+thread_ready_cond