1 /* ---------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.76 2000/08/15 14:18:43 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
8 * The main scheduling code in GranSim is quite different from that in std
9 * (concurrent) Haskell: while concurrent Haskell just iterates over the
10 * threads in the runnable queue, GranSim is event driven, i.e. it iterates
11 * over the events in the global event queue. -- HWL
12 * --------------------------------------------------------------------------*/
14 //@node Main scheduling code, , ,
15 //@section Main scheduling code
17 /* Version with scheduler monitor support for SMPs.
19 This design provides a high-level API to create and schedule threads etc.
20 as documented in the SMP design document.
22 It uses a monitor design controlled by a single mutex to exercise control
23 over accesses to shared data structures, and builds on the Posix threads
26 The majority of state is shared. In order to keep essential per-task state,
27 there is a Capability structure, which contains all the information
28 needed to run a thread: its STG registers, a pointer to its TSO, a
29 nursery etc. During STG execution, a pointer to the capability is
30 kept in a register (BaseReg).
32 In a non-SMP build, there is one global capability, namely MainRegTable.
39 //* Variables and Data structures::
40 //* Main scheduling loop::
41 //* Suspend and Resume::
43 //* Garbage Collextion Routines::
44 //* Blocking Queue Routines::
45 //* Exception Handling Routines::
46 //* Debugging Routines::
50 //@node Includes, Variables and Data structures, Main scheduling code, Main scheduling code
51 //@subsection Includes
59 #include "StgStartup.h"
63 #include "StgMiscClosures.h"
65 #include "Evaluator.h"
66 #include "Exception.h"
74 #if defined(GRAN) || defined(PAR)
75 # include "GranSimRts.h"
77 # include "ParallelRts.h"
78 # include "Parallel.h"
79 # include "ParallelDebug.h"
87 //@node Variables and Data structures, Prototypes, Includes, Main scheduling code
88 //@subsection Variables and Data structures
92 * These are the threads which clients have requested that we run.
94 * In an SMP build, we might have several concurrent clients all
95 * waiting for results, and each one will wait on a condition variable
96 * until the result is available.
98 * In non-SMP, clients are strictly nested: the first client calls
99 * into the RTS, which might call out again to C with a _ccall_GC, and
100 * eventually re-enter the RTS.
102 * Main threads information is kept in a linked list:
104 //@cindex StgMainThread
105 typedef struct StgMainThread_ {
107 SchedulerStatus stat;
110 pthread_cond_t wakeup;
112 struct StgMainThread_ *link;
115 /* Main thread queue.
116 * Locks required: sched_mutex.
118 static StgMainThread *main_threads;
121 * Locks required: sched_mutex.
125 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
126 /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */
129 In GranSim we have a runable and a blocked queue for each processor.
130 In order to minimise code changes new arrays run_queue_hds/tls
131 are created. run_queue_hd is then a short cut (macro) for
132 run_queue_hds[CurrentProc] (see GranSim.h).
135 StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
136 StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
137 StgTSO *ccalling_threadss[MAX_PROC];
138 /* We use the same global list of threads (all_threads) in GranSim as in
139 the std RTS (i.e. we are cheating). However, we don't use this list in
140 the GranSim specific code at the moment (so we are only potentially
145 StgTSO *run_queue_hd, *run_queue_tl;
146 StgTSO *blocked_queue_hd, *blocked_queue_tl;
150 /* Linked list of all threads.
151 * Used for detecting garbage collected threads.
155 /* Threads suspended in _ccall_GC.
157 static StgTSO *suspended_ccalling_threads;
159 static void GetRoots(void);
160 static StgTSO *threadStackOverflow(StgTSO *tso);
162 /* KH: The following two flags are shared memory locations. There is no need
163 to lock them, since they are only unset at the end of a scheduler
167 /* flag set by signal handler to precipitate a context switch */
168 //@cindex context_switch
171 /* if this flag is set as well, give up execution */
172 //@cindex interrupted
175 /* Next thread ID to allocate.
176 * Locks required: sched_mutex
178 //@cindex next_thread_id
179 StgThreadID next_thread_id = 1;
182 * Pointers to the state of the current thread.
183 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
184 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
187 /* The smallest stack size that makes any sense is:
188 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
189 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
190 * + 1 (the realworld token for an IO thread)
191 * + 1 (the closure to enter)
193 * A thread with this stack will bomb immediately with a stack
194 * overflow, which will increase its stack size.
197 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
199 /* Free capability list.
200 * Locks required: sched_mutex.
203 //@cindex free_capabilities
204 //@cindex n_free_capabilities
205 Capability *free_capabilities; /* Available capabilities for running threads */
206 nat n_free_capabilities; /* total number of available capabilities */
208 //@cindex MainRegTable
209 Capability MainRegTable; /* for non-SMP, we have one global capability */
218 /* All our current task ids, saved in case we need to kill them later.
225 void addToBlockedQueue ( StgTSO *tso );
227 static void schedule ( void );
228 void interruptStgRts ( void );
230 static StgTSO * createThread_ ( nat size, rtsBool have_lock, StgInt pri );
232 static StgTSO * createThread_ ( nat size, rtsBool have_lock );
235 static void detectBlackHoles ( void );
238 static void sched_belch(char *s, ...);
242 //@cindex sched_mutex
244 //@cindex thread_ready_cond
245 //@cindex gc_pending_cond
246 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
247 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
248 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
249 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
256 rtsTime TimeOfLastYield;
260 char *whatNext_strs[] = {
268 char *threadReturnCode_strs[] = {
269 "HeapOverflow", /* might also be StackOverflow */
278 * The thread state for the main thread.
279 // ToDo: check whether not needed any more
283 //@node Main scheduling loop, Suspend and Resume, Prototypes, Main scheduling code
284 //@subsection Main scheduling loop
286 /* ---------------------------------------------------------------------------
287 Main scheduling loop.
289 We use round-robin scheduling, each thread returning to the
290 scheduler loop when one of these conditions is detected:
293 * timer expires (thread yields)
298 Locking notes: we acquire the scheduler lock once at the beginning
299 of the scheduler loop, and release it when
301 * running a thread, or
302 * waiting for work, or
303 * waiting for a GC to complete.
306 In a GranSim setup this loop iterates over the global event queue.
307 This revolves around the global event queue, which determines what
308 to do next. Therefore, it's more complicated than either the
309 concurrent or the parallel (GUM) setup.
312 GUM iterates over incoming messages.
313 It starts with nothing to do (thus CurrentTSO == END_TSO_QUEUE),
314 and sends out a fish whenever it has nothing to do; in-between
315 doing the actual reductions (shared code below) it processes the
316 incoming messages and deals with delayed operations
317 (see PendingFetches).
318 This is not the ugliest code you could imagine, but it's bloody close.
320 ------------------------------------------------------------------------ */
327 StgThreadReturnCode ret;
336 rtsBool was_interrupted = rtsFalse;
338 ACQUIRE_LOCK(&sched_mutex);
342 /* set up first event to get things going */
343 /* ToDo: assign costs for system setup and init MainTSO ! */
344 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
346 CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
349 fprintf(stderr, "GRAN: Init CurrentTSO (in schedule) = %p\n", CurrentTSO);
350 G_TSO(CurrentTSO, 5));
352 if (RtsFlags.GranFlags.Light) {
353 /* Save current time; GranSim Light only */
354 CurrentTSO->gran.clock = CurrentTime[CurrentProc];
357 event = get_next_event();
359 while (event!=(rtsEvent*)NULL) {
360 /* Choose the processor with the next event */
361 CurrentProc = event->proc;
362 CurrentTSO = event->tso;
366 while (!GlobalStopPending) { /* GlobalStopPending set in par_exit */
374 IF_DEBUG(scheduler, printAllThreads());
376 /* If we're interrupted (the user pressed ^C, or some other
377 * termination condition occurred), kill all the currently running
381 IF_DEBUG(scheduler, sched_belch("interrupted"));
382 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
385 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
388 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
389 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
390 interrupted = rtsFalse;
391 was_interrupted = rtsTrue;
394 /* Go through the list of main threads and wake up any
395 * clients whose computations have finished. ToDo: this
396 * should be done more efficiently without a linear scan
397 * of the main threads list, somehow...
401 StgMainThread *m, **prev;
402 prev = &main_threads;
403 for (m = main_threads; m != NULL; m = m->link) {
404 switch (m->tso->what_next) {
407 *(m->ret) = (StgClosure *)m->tso->sp[0];
411 pthread_cond_broadcast(&m->wakeup);
415 if (was_interrupted) {
416 m->stat = Interrupted;
420 pthread_cond_broadcast(&m->wakeup);
430 /* in GUM do this only on the Main PE */
433 /* If our main thread has finished or been killed, return.
436 StgMainThread *m = main_threads;
437 if (m->tso->what_next == ThreadComplete
438 || m->tso->what_next == ThreadKilled) {
439 main_threads = main_threads->link;
440 if (m->tso->what_next == ThreadComplete) {
441 /* we finished successfully, fill in the return value */
442 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
446 if (was_interrupted) {
447 m->stat = Interrupted;
457 /* Top up the run queue from our spark pool. We try to make the
458 * number of threads in the run queue equal to the number of
463 nat n = n_free_capabilities;
464 StgTSO *tso = run_queue_hd;
466 /* Count the run queue */
467 while (n > 0 && tso != END_TSO_QUEUE) {
476 break; /* no more sparks in the pool */
478 /* I'd prefer this to be done in activateSpark -- HWL */
479 /* tricky - it needs to hold the scheduler lock and
480 * not try to re-acquire it -- SDM */
482 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
483 pushClosure(tso,spark);
484 PUSH_ON_RUN_QUEUE(tso);
486 advisory_thread_count++;
490 sched_belch("turning spark of closure %p into a thread",
491 (StgClosure *)spark));
494 /* We need to wake up the other tasks if we just created some
497 if (n_free_capabilities - n > 1) {
498 pthread_cond_signal(&thread_ready_cond);
503 /* Check whether any waiting threads need to be woken up. If the
504 * run queue is empty, and there are no other tasks running, we
505 * can wait indefinitely for something to happen.
506 * ToDo: what if another client comes along & requests another
509 if (blocked_queue_hd != END_TSO_QUEUE) {
511 (run_queue_hd == END_TSO_QUEUE)
513 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
518 /* check for signals each time around the scheduler */
519 #ifndef mingw32_TARGET_OS
520 if (signals_pending()) {
521 start_signal_handlers();
526 * Detect deadlock: when we have no threads to run, there are no
527 * threads waiting on I/O or sleeping, and all the other tasks are
528 * waiting for work, we must have a deadlock of some description.
530 * We first try to find threads blocked on themselves (ie. black
531 * holes), and generate NonTermination exceptions where necessary.
533 * If no threads are black holed, we have a deadlock situation, so
534 * inform all the main threads.
537 if (blocked_queue_hd == END_TSO_QUEUE
538 && run_queue_hd == END_TSO_QUEUE
539 && (n_free_capabilities == RtsFlags.ParFlags.nNodes))
541 IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
543 if (run_queue_hd == END_TSO_QUEUE) {
545 for (m = main_threads; m != NULL; m = m->link) {
548 pthread_cond_broadcast(&m->wakeup);
554 if (blocked_queue_hd == END_TSO_QUEUE
555 && run_queue_hd == END_TSO_QUEUE)
557 IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes..."));
559 if (run_queue_hd == END_TSO_QUEUE) {
560 StgMainThread *m = main_threads;
563 main_threads = m->link;
570 /* If there's a GC pending, don't do anything until it has
574 IF_DEBUG(scheduler,sched_belch("waiting for GC"));
575 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
578 /* block until we've got a thread on the run queue and a free
581 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
582 IF_DEBUG(scheduler, sched_belch("waiting for work"));
583 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
584 IF_DEBUG(scheduler, sched_belch("work now available"));
590 if (RtsFlags.GranFlags.Light)
591 GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
593 /* adjust time based on time-stamp */
594 if (event->time > CurrentTime[CurrentProc] &&
595 event->evttype != ContinueThread)
596 CurrentTime[CurrentProc] = event->time;
598 /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
599 if (!RtsFlags.GranFlags.Light)
602 IF_DEBUG(gran, fprintf(stderr, "GRAN: switch by event-type\n"))
604 /* main event dispatcher in GranSim */
605 switch (event->evttype) {
606 /* Should just be continuing execution */
608 IF_DEBUG(gran, fprintf(stderr, "GRAN: doing ContinueThread\n"));
609 /* ToDo: check assertion
610 ASSERT(run_queue_hd != (StgTSO*)NULL &&
611 run_queue_hd != END_TSO_QUEUE);
613 /* Ignore ContinueThreads for fetching threads (if synchr comm) */
614 if (!RtsFlags.GranFlags.DoAsyncFetch &&
615 procStatus[CurrentProc]==Fetching) {
616 belch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]",
617 CurrentTSO->id, CurrentTSO, CurrentProc);
620 /* Ignore ContinueThreads for completed threads */
621 if (CurrentTSO->what_next == ThreadComplete) {
622 belch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)",
623 CurrentTSO->id, CurrentTSO, CurrentProc);
626 /* Ignore ContinueThreads for threads that are being migrated */
627 if (PROCS(CurrentTSO)==Nowhere) {
628 belch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)",
629 CurrentTSO->id, CurrentTSO, CurrentProc);
632 /* The thread should be at the beginning of the run queue */
633 if (CurrentTSO!=run_queue_hds[CurrentProc]) {
634 belch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread",
635 CurrentTSO->id, CurrentTSO, CurrentProc);
636 break; // run the thread anyway
639 new_event(proc, proc, CurrentTime[proc],
641 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
643 */ /* Catches superfluous CONTINUEs -- should be unnecessary */
644 break; // now actually run the thread; DaH Qu'vam yImuHbej
647 do_the_fetchnode(event);
648 goto next_thread; /* handle next event in event queue */
651 do_the_globalblock(event);
652 goto next_thread; /* handle next event in event queue */
655 do_the_fetchreply(event);
656 goto next_thread; /* handle next event in event queue */
658 case UnblockThread: /* Move from the blocked queue to the tail of */
659 do_the_unblock(event);
660 goto next_thread; /* handle next event in event queue */
662 case ResumeThread: /* Move from the blocked queue to the tail of */
663 /* the runnable queue ( i.e. Qu' SImqa'lu') */
664 event->tso->gran.blocktime +=
665 CurrentTime[CurrentProc] - event->tso->gran.blockedat;
666 do_the_startthread(event);
667 goto next_thread; /* handle next event in event queue */
670 do_the_startthread(event);
671 goto next_thread; /* handle next event in event queue */
674 do_the_movethread(event);
675 goto next_thread; /* handle next event in event queue */
678 do_the_movespark(event);
679 goto next_thread; /* handle next event in event queue */
682 do_the_findwork(event);
683 goto next_thread; /* handle next event in event queue */
686 barf("Illegal event type %u\n", event->evttype);
689 /* This point was scheduler_loop in the old RTS */
691 IF_DEBUG(gran, belch("GRAN: after main switch"));
693 TimeOfLastEvent = CurrentTime[CurrentProc];
694 TimeOfNextEvent = get_time_of_next_event();
695 IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
696 // CurrentTSO = ThreadQueueHd;
698 IF_DEBUG(gran, belch("GRAN: time of next event is: %ld",
701 if (RtsFlags.GranFlags.Light)
702 GranSimLight_leave_system(event, &ActiveTSO);
704 EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
707 belch("GRAN: end of time-slice is %#lx", EndOfTimeSlice));
709 /* in a GranSim setup the TSO stays on the run queue */
711 /* Take a thread from the run queue. */
712 t = POP_RUN_QUEUE(); // take_off_run_queue(t);
715 fprintf(stderr, "GRAN: About to run current thread, which is\n");
718 context_switch = 0; // turned on via GranYield, checking events and time slice
721 DumpGranEvent(GR_SCHEDULE, t));
723 procStatus[CurrentProc] = Busy;
727 if (PendingFetches != END_BF_QUEUE) {
731 /* ToDo: phps merge with spark activation above */
732 /* check whether we have local work and send requests if we have none */
733 if (run_queue_hd == END_TSO_QUEUE) { /* no runnable threads */
734 /* :-[ no local threads => look out for local sparks */
735 /* the spark pool for the current PE */
736 pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
737 if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
738 pool->hd < pool->tl) {
740 * ToDo: add GC code check that we really have enough heap afterwards!!
742 * If we're here (no runnable threads) and we have pending
743 * sparks, we must have a space problem. Get enough space
744 * to turn one of those pending sparks into a
748 spark = findSpark(); /* get a spark */
749 if (spark != (rtsSpark) NULL) {
750 tso = activateSpark(spark); /* turn the spark into a thread */
751 IF_PAR_DEBUG(schedule,
752 belch("==== schedule: Created TSO %d (%p); %d threads active",
753 tso->id, tso, advisory_thread_count));
755 if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
756 belch("==^^ failed to activate spark");
758 } /* otherwise fall through & pick-up new tso */
760 IF_PAR_DEBUG(verbose,
761 belch("==^^ no local sparks (spark pool contains only NFs: %d)",
762 spark_queue_len(pool)));
766 /* =8-[ no local sparks => look for work on other PEs */
769 * We really have absolutely no work. Send out a fish
770 * (there may be some out there already), and wait for
771 * something to arrive. We clearly can't run any threads
772 * until a SCHEDULE or RESUME arrives, and so that's what
773 * we're hoping to see. (Of course, we still have to
774 * respond to other types of messages.)
777 outstandingFishes < RtsFlags.ParFlags.maxFishes ) { // &&
778 // (last_fish_arrived_at+FISH_DELAY < CURRENT_TIME)) {
779 /* fishing set in sendFish, processFish;
780 avoid flooding system with fishes via delay */
782 sendFish(pe, mytid, NEW_FISH_AGE, NEW_FISH_HISTORY,
790 } else if (PacketsWaiting()) { /* Look for incoming messages */
794 /* Now we are sure that we have some work available */
795 ASSERT(run_queue_hd != END_TSO_QUEUE);
796 /* Take a thread from the run queue, if we have work */
797 t = POP_RUN_QUEUE(); // take_off_run_queue(END_TSO_QUEUE);
799 /* ToDo: write something to the log-file
800 if (RTSflags.ParFlags.granSimStats && !sameThread)
801 DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
805 /* the spark pool for the current PE */
806 pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
808 IF_DEBUG(scheduler, belch("--^^ %d sparks on [%#x] (hd=%x; tl=%x; base=%x, lim=%x)",
809 spark_queue_len(pool),
811 pool->hd, pool->tl, pool->base, pool->lim));
813 IF_DEBUG(scheduler, belch("--== %d threads on [%#x] (hd=%x; tl=%x)",
814 run_queue_len(), CURRENT_PROC,
815 run_queue_hd, run_queue_tl));
820 we are running a different TSO, so write a schedule event to log file
821 NB: If we use fair scheduling we also have to write a deschedule
822 event for LastTSO; with unfair scheduling we know that the
823 previous tso has blocked whenever we switch to another tso, so
824 we don't need it in GUM for now
826 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
827 GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
831 #else /* !GRAN && !PAR */
833 /* grab a thread from the run queue
835 ASSERT(run_queue_hd != END_TSO_QUEUE);
837 IF_DEBUG(sanity,checkTSO(t));
844 cap = free_capabilities;
845 free_capabilities = cap->link;
846 n_free_capabilities--;
851 cap->rCurrentTSO = t;
853 /* context switches are now initiated by the timer signal, unless
854 * the user specified "context switch as often as possible", with
857 if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
858 && (run_queue_hd != END_TSO_QUEUE
859 || blocked_queue_hd != END_TSO_QUEUE))
864 RELEASE_LOCK(&sched_mutex);
866 IF_DEBUG(scheduler, sched_belch("-->> Running TSO %ld (%p) %s ...",
867 t->id, t, whatNext_strs[t->what_next]));
869 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
870 /* Run the current thread
872 switch (cap->rCurrentTSO->what_next) {
875 /* Thread already finished, return to scheduler. */
876 ret = ThreadFinished;
879 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
882 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
884 case ThreadEnterHugs:
888 IF_DEBUG(scheduler,sched_belch("entering Hugs"));
889 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
890 cap->rCurrentTSO->sp += 1;
895 barf("Panic: entered a BCO but no bytecode interpreter in this build");
898 barf("schedule: invalid what_next field");
900 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
902 /* Costs for the scheduler are assigned to CCS_SYSTEM */
907 ACQUIRE_LOCK(&sched_mutex);
910 IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", pthread_self()););
911 #elif !defined(GRAN) && !defined(PAR)
912 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: "););
914 t = cap->rCurrentTSO;
917 /* HACK 675: if the last thread didn't yield, make sure to print a
918 SCHEDULE event to the log file when StgRunning the next thread, even
919 if it is the same one as before */
920 LastTSO = t; //(ret == ThreadBlocked) ? END_TSO_QUEUE : t;
921 TimeOfLastYield = CURRENT_TIME;
926 /* make all the running tasks block on a condition variable,
927 * maybe set context_switch and wait till they all pile in,
928 * then have them wait on a GC condition variable.
930 IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped: HeapOverflow",
931 t->id, t, whatNext_strs[t->what_next]));
934 ASSERT(!is_on_queue(t,CurrentProc));
937 ready_to_gc = rtsTrue;
938 context_switch = 1; /* stop other threads ASAP */
939 PUSH_ON_RUN_QUEUE(t);
940 /* actual GC is done at the end of the while loop */
944 IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped, StackOverflow",
945 t->id, t, whatNext_strs[t->what_next]));
946 /* just adjust the stack for this thread, then pop it back
952 /* enlarge the stack */
953 StgTSO *new_t = threadStackOverflow(t);
955 /* This TSO has moved, so update any pointers to it from the
956 * main thread stack. It better not be on any other queues...
959 for (m = main_threads; m != NULL; m = m->link) {
965 PUSH_ON_RUN_QUEUE(new_t);
972 DumpGranEvent(GR_DESCHEDULE, t));
973 globalGranStats.tot_yields++;
976 DumpGranEvent(GR_DESCHEDULE, t));
978 /* put the thread back on the run queue. Then, if we're ready to
979 * GC, check whether this is the last task to stop. If so, wake
980 * up the GC thread. getThread will block during a GC until the
984 if (t->what_next == ThreadEnterHugs) {
985 /* ToDo: or maybe a timer expired when we were in Hugs?
986 * or maybe someone hit ctrl-C
988 belch("--<< thread %ld (%p; %s) stopped to switch to Hugs",
989 t->id, t, whatNext_strs[t->what_next]);
991 belch("--<< thread %ld (%p; %s) stopped, yielding",
992 t->id, t, whatNext_strs[t->what_next]);
999 //belch("&& Doing sanity check on yielding TSO %ld.", t->id);
1001 ASSERT(t->link == END_TSO_QUEUE);
1003 ASSERT(!is_on_queue(t,CurrentProc));
1006 //belch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
1007 checkThreadQsSanity(rtsTrue));
1009 APPEND_TO_RUN_QUEUE(t);
1011 /* add a ContinueThread event to actually process the thread */
1012 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1014 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1016 belch("GRAN: eventq and runnableq after adding yielded thread to queue again:");
1025 belch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
1026 t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
1027 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1029 // ??? needed; should emit block before
1031 DumpGranEvent(GR_DESCHEDULE, t));
1032 prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
1035 ASSERT(procStatus[CurrentProc]==Busy ||
1036 ((procStatus[CurrentProc]==Fetching) &&
1037 (t->block_info.closure!=(StgClosure*)NULL)));
1038 if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
1039 !(!RtsFlags.GranFlags.DoAsyncFetch &&
1040 procStatus[CurrentProc]==Fetching))
1041 procStatus[CurrentProc] = Idle;
1045 DumpGranEvent(GR_DESCHEDULE, t));
1047 /* Send a fetch (if BlockedOnGA) and dump event to log file */
1051 belch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: ",
1052 t->id, t, whatNext_strs[t->what_next], t->block_info.closure);
1053 if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
1056 /* don't need to do anything. Either the thread is blocked on
1057 * I/O, in which case we'll have called addToBlockedQueue
1058 * previously, or it's blocked on an MVar or Blackhole, in which
1059 * case it'll be on the relevant queue already.
1062 fprintf(stderr, "--<< thread %d (%p) stopped: ", t->id, t);
1063 printThreadBlockage(t);
1064 fprintf(stderr, "\n"));
1066 /* Only for dumping event to log file
1067 ToDo: do I need this in GranSim, too?
1074 case ThreadFinished:
1075 /* Need to check whether this was a main thread, and if so, signal
1076 * the task that started it with the return value. If we have no
1077 * more main threads, we probably need to stop all the tasks until
1080 /* We also end up here if the thread kills itself with an
1081 * uncaught exception, see Exception.hc.
1083 IF_DEBUG(scheduler,belch("--++ thread %d (%p) finished", t->id, t));
1085 endThread(t, CurrentProc); // clean-up the thread
1087 advisory_thread_count--;
1088 if (RtsFlags.ParFlags.ParStats.Full)
1089 DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
1094 barf("schedule: invalid thread return code %d", (int)ret);
1098 cap->link = free_capabilities;
1099 free_capabilities = cap;
1100 n_free_capabilities++;
1104 if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes)
1109 /* everybody back, start the GC.
1110 * Could do it in this thread, or signal a condition var
1111 * to do it in another thread. Either way, we need to
1112 * broadcast on gc_pending_cond afterward.
1115 IF_DEBUG(scheduler,sched_belch("doing GC"));
1117 GarbageCollect(GetRoots,rtsFalse);
1118 ready_to_gc = rtsFalse;
1120 pthread_cond_broadcast(&gc_pending_cond);
1123 /* add a ContinueThread event to continue execution of current thread */
1124 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
1126 t, (StgClosure*)NULL, (rtsSpark*)NULL);
1128 fprintf(stderr, "GRAN: eventq and runnableq after Garbage collection:\n");
1135 IF_GRAN_DEBUG(unused,
1136 print_eventq(EventHd));
1138 event = get_next_event();
1142 /* ToDo: wait for next message to arrive rather than busy wait */
1147 t = take_off_run_queue(END_TSO_QUEUE);
1150 } /* end of while(1) */
1153 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
1154 void deleteAllThreads ( void )
1157 IF_DEBUG(scheduler,sched_belch("deleteAllThreads()"));
1158 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
1161 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
1164 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
1165 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
1168 /* startThread and insertThread are now in GranSim.c -- HWL */
1170 //@node Suspend and Resume, Run queue code, Main scheduling loop, Main scheduling code
1171 //@subsection Suspend and Resume
1173 /* ---------------------------------------------------------------------------
1174 * Suspending & resuming Haskell threads.
1176 * When making a "safe" call to C (aka _ccall_GC), the task gives back
1177 * its capability before calling the C function. This allows another
1178 * task to pick up the capability and carry on running Haskell
1179 * threads. It also means that if the C call blocks, it won't lock
1182 * The Haskell thread making the C call is put to sleep for the
1183 * duration of the call, on the susepended_ccalling_threads queue. We
1184 * give out a token to the task, which it can use to resume the thread
1185 * on return from the C function.
1186 * ------------------------------------------------------------------------- */
1189 suspendThread( Capability *cap )
1193 ACQUIRE_LOCK(&sched_mutex);
1196 sched_belch("thread %d did a _ccall_gc", cap->rCurrentTSO->id));
1198 threadPaused(cap->rCurrentTSO);
1199 cap->rCurrentTSO->link = suspended_ccalling_threads;
1200 suspended_ccalling_threads = cap->rCurrentTSO;
1202 /* Use the thread ID as the token; it should be unique */
1203 tok = cap->rCurrentTSO->id;
1206 cap->link = free_capabilities;
1207 free_capabilities = cap;
1208 n_free_capabilities++;
1211 RELEASE_LOCK(&sched_mutex);
1216 resumeThread( StgInt tok )
1218 StgTSO *tso, **prev;
1221 ACQUIRE_LOCK(&sched_mutex);
1223 prev = &suspended_ccalling_threads;
1224 for (tso = suspended_ccalling_threads;
1225 tso != END_TSO_QUEUE;
1226 prev = &tso->link, tso = tso->link) {
1227 if (tso->id == (StgThreadID)tok) {
1232 if (tso == END_TSO_QUEUE) {
1233 barf("resumeThread: thread not found");
1237 while (free_capabilities == NULL) {
1238 IF_DEBUG(scheduler, sched_belch("waiting to resume"));
1239 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
1240 IF_DEBUG(scheduler, sched_belch("resuming thread %d", tso->id));
1242 cap = free_capabilities;
1243 free_capabilities = cap->link;
1244 n_free_capabilities--;
1246 cap = &MainRegTable;
1249 cap->rCurrentTSO = tso;
1251 RELEASE_LOCK(&sched_mutex);
1256 /* ---------------------------------------------------------------------------
1258 * ------------------------------------------------------------------------ */
1259 static void unblockThread(StgTSO *tso);
1261 /* ---------------------------------------------------------------------------
1262 * Comparing Thread ids.
1264 * This is used from STG land in the implementation of the
1265 * instances of Eq/Ord for ThreadIds.
1266 * ------------------------------------------------------------------------ */
1268 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
1270 StgThreadID id1 = tso1->id;
1271 StgThreadID id2 = tso2->id;
1273 if (id1 < id2) return (-1);
1274 if (id1 > id2) return 1;
1278 /* ---------------------------------------------------------------------------
1279 Create a new thread.
1281 The new thread starts with the given stack size. Before the
1282 scheduler can run, however, this thread needs to have a closure
1283 (and possibly some arguments) pushed on its stack. See
1284 pushClosure() in Schedule.h.
1286 createGenThread() and createIOThread() (in SchedAPI.h) are
1287 convenient packaged versions of this function.
1289 currently pri (priority) is only used in a GRAN setup -- HWL
1290 ------------------------------------------------------------------------ */
1291 //@cindex createThread
1293 /* currently pri (priority) is only used in a GRAN setup -- HWL */
1295 createThread(nat stack_size, StgInt pri)
1297 return createThread_(stack_size, rtsFalse, pri);
1301 createThread_(nat size, rtsBool have_lock, StgInt pri)
1305 createThread(nat stack_size)
1307 return createThread_(stack_size, rtsFalse);
1311 createThread_(nat size, rtsBool have_lock)
1318 /* First check whether we should create a thread at all */
1320 /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
1321 if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
1323 belch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
1324 RtsFlags.ParFlags.maxThreads, advisory_thread_count);
1325 return END_TSO_QUEUE;
1331 ASSERT(!RtsFlags.GranFlags.Light || CurrentProc==0);
1334 // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW
1336 /* catch ridiculously small stack sizes */
1337 if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
1338 size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
1341 stack_size = size - TSO_STRUCT_SIZEW;
1343 tso = (StgTSO *)allocate(size);
1344 TICK_ALLOC_TSO(size-TSO_STRUCT_SIZEW, 0);
1346 SET_HDR(tso, &TSO_info, CCS_SYSTEM);
1348 SET_GRAN_HDR(tso, ThisPE);
1350 tso->what_next = ThreadEnterGHC;
1352 /* tso->id needs to be unique. For now we use a heavyweight mutex to
1353 * protect the increment operation on next_thread_id.
1354 * In future, we could use an atomic increment instead.
1356 if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
1357 tso->id = next_thread_id++;
1358 if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
1360 tso->why_blocked = NotBlocked;
1361 tso->blocked_exceptions = NULL;
1363 tso->stack_size = stack_size;
1364 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
1366 tso->sp = (P_)&(tso->stack) + stack_size;
1369 tso->prof.CCCS = CCS_MAIN;
1372 /* put a stop frame on the stack */
1373 tso->sp -= sizeofW(StgStopFrame);
1374 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
1375 tso->su = (StgUpdateFrame*)tso->sp;
1379 tso->link = END_TSO_QUEUE;
1380 /* uses more flexible routine in GranSim */
1381 insertThread(tso, CurrentProc);
1383 /* In a non-GranSim setup the pushing of a TSO onto the runq is separated
1388 #if defined(GRAN) || defined(PAR)
1389 DumpGranEvent(GR_START,tso);
1392 /* Link the new thread on the global thread list.
1394 tso->global_link = all_threads;
1398 tso->gran.pri = pri;
1400 tso->gran.magic = TSO_MAGIC; // debugging only
1402 tso->gran.sparkname = 0;
1403 tso->gran.startedat = CURRENT_TIME;
1404 tso->gran.exported = 0;
1405 tso->gran.basicblocks = 0;
1406 tso->gran.allocs = 0;
1407 tso->gran.exectime = 0;
1408 tso->gran.fetchtime = 0;
1409 tso->gran.fetchcount = 0;
1410 tso->gran.blocktime = 0;
1411 tso->gran.blockcount = 0;
1412 tso->gran.blockedat = 0;
1413 tso->gran.globalsparks = 0;
1414 tso->gran.localsparks = 0;
1415 if (RtsFlags.GranFlags.Light)
1416 tso->gran.clock = Now; /* local clock */
1418 tso->gran.clock = 0;
1420 IF_DEBUG(gran,printTSO(tso));
1423 tso->par.magic = TSO_MAGIC; // debugging only
1425 tso->par.sparkname = 0;
1426 tso->par.startedat = CURRENT_TIME;
1427 tso->par.exported = 0;
1428 tso->par.basicblocks = 0;
1429 tso->par.allocs = 0;
1430 tso->par.exectime = 0;
1431 tso->par.fetchtime = 0;
1432 tso->par.fetchcount = 0;
1433 tso->par.blocktime = 0;
1434 tso->par.blockcount = 0;
1435 tso->par.blockedat = 0;
1436 tso->par.globalsparks = 0;
1437 tso->par.localsparks = 0;
1441 globalGranStats.tot_threads_created++;
1442 globalGranStats.threads_created_on_PE[CurrentProc]++;
1443 globalGranStats.tot_sq_len += spark_queue_len(CurrentProc);
1444 globalGranStats.tot_sq_probes++;
1449 belch("==__ schedule: Created TSO %d (%p);",
1450 CurrentProc, tso, tso->id));
1452 IF_PAR_DEBUG(verbose,
1453 belch("==__ schedule: Created TSO %d (%p); %d threads active",
1454 tso->id, tso, advisory_thread_count));
1456 IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
1457 tso->id, tso->stack_size));
1463 Turn a spark into a thread.
1464 ToDo: fix for SMP (needs to acquire SCHED_MUTEX!)
1467 //@cindex activateSpark
1469 activateSpark (rtsSpark spark)
1473 ASSERT(spark != (rtsSpark)NULL);
1474 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
1475 if (tso!=END_TSO_QUEUE) {
1476 pushClosure(tso,spark);
1477 PUSH_ON_RUN_QUEUE(tso);
1478 advisory_thread_count++;
1480 if (RtsFlags.ParFlags.ParStats.Full) {
1481 //ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ...
1482 IF_PAR_DEBUG(verbose,
1483 belch("==^^ activateSpark: turning spark of closure %p (%s) into a thread",
1484 (StgClosure *)spark, info_type((StgClosure *)spark)));
1487 barf("activateSpark: Cannot create TSO");
1489 // ToDo: fwd info on local/global spark to thread -- HWL
1490 // tso->gran.exported = spark->exported;
1491 // tso->gran.locked = !spark->global;
1492 // tso->gran.sparkname = spark->name;
1498 /* ---------------------------------------------------------------------------
1501 * scheduleThread puts a thread on the head of the runnable queue.
1502 * This will usually be done immediately after a thread is created.
1503 * The caller of scheduleThread must create the thread using e.g.
1504 * createThread and push an appropriate closure
1505 * on this thread's stack before the scheduler is invoked.
1506 * ------------------------------------------------------------------------ */
1509 scheduleThread(StgTSO *tso)
1511 if (tso==END_TSO_QUEUE){
1516 ACQUIRE_LOCK(&sched_mutex);
1518 /* Put the new thread on the head of the runnable queue. The caller
1519 * better push an appropriate closure on this thread's stack
1520 * beforehand. In the SMP case, the thread may start running as
1521 * soon as we release the scheduler lock below.
1523 PUSH_ON_RUN_QUEUE(tso);
1527 IF_DEBUG(scheduler,printTSO(tso));
1529 RELEASE_LOCK(&sched_mutex);
1532 /* ---------------------------------------------------------------------------
1535 * Start up Posix threads to run each of the scheduler tasks.
1536 * I believe the task ids are not needed in the system as defined.
1538 * ------------------------------------------------------------------------ */
1540 #if defined(PAR) || defined(SMP)
1542 taskStart( void *arg STG_UNUSED )
1544 rts_evalNothing(NULL);
1548 /* ---------------------------------------------------------------------------
1551 * Initialise the scheduler. This resets all the queues - if the
1552 * queues contained any threads, they'll be garbage collected at the
1555 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
1556 * ------------------------------------------------------------------------ */
1560 term_handler(int sig STG_UNUSED)
1563 ACQUIRE_LOCK(&term_mutex);
1565 RELEASE_LOCK(&term_mutex);
1570 //@cindex initScheduler
1577 for (i=0; i<=MAX_PROC; i++) {
1578 run_queue_hds[i] = END_TSO_QUEUE;
1579 run_queue_tls[i] = END_TSO_QUEUE;
1580 blocked_queue_hds[i] = END_TSO_QUEUE;
1581 blocked_queue_tls[i] = END_TSO_QUEUE;
1582 ccalling_threadss[i] = END_TSO_QUEUE;
1585 run_queue_hd = END_TSO_QUEUE;
1586 run_queue_tl = END_TSO_QUEUE;
1587 blocked_queue_hd = END_TSO_QUEUE;
1588 blocked_queue_tl = END_TSO_QUEUE;
1591 suspended_ccalling_threads = END_TSO_QUEUE;
1593 main_threads = NULL;
1594 all_threads = END_TSO_QUEUE;
1599 RtsFlags.ConcFlags.ctxtSwitchTicks =
1600 RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS;
1603 ecafList = END_ECAF_LIST;
1607 /* Install the SIGHUP handler */
1610 struct sigaction action,oact;
1612 action.sa_handler = term_handler;
1613 sigemptyset(&action.sa_mask);
1614 action.sa_flags = 0;
1615 if (sigaction(SIGTERM, &action, &oact) != 0) {
1616 barf("can't install TERM handler");
1622 /* Allocate N Capabilities */
1625 Capability *cap, *prev;
1628 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1629 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
1633 free_capabilities = cap;
1634 n_free_capabilities = RtsFlags.ParFlags.nNodes;
1636 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Allocated %d capabilities\n",
1637 n_free_capabilities););
1640 #if defined(SMP) || defined(PAR)
1653 /* make some space for saving all the thread ids */
1654 task_ids = stgMallocBytes(RtsFlags.ParFlags.nNodes * sizeof(task_info),
1655 "initScheduler:task_ids");
1657 /* and create all the threads */
1658 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1659 r = pthread_create(&tid,NULL,taskStart,NULL);
1661 barf("startTasks: Can't create new Posix thread");
1663 task_ids[i].id = tid;
1664 task_ids[i].mut_time = 0.0;
1665 task_ids[i].mut_etime = 0.0;
1666 task_ids[i].gc_time = 0.0;
1667 task_ids[i].gc_etime = 0.0;
1668 task_ids[i].elapsedtimestart = elapsedtime();
1669 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Started task: %ld\n",tid););
1675 exitScheduler( void )
1680 /* Don't want to use pthread_cancel, since we'd have to install
1681 * these silly exception handlers (pthread_cleanup_{push,pop}) around
1685 /* Cancel all our tasks */
1686 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1687 pthread_cancel(task_ids[i].id);
1690 /* Wait for all the tasks to terminate */
1691 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1692 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: waiting for task %ld\n",
1694 pthread_join(task_ids[i].id, NULL);
1698 /* Send 'em all a SIGHUP. That should shut 'em up.
1700 await_death = RtsFlags.ParFlags.nNodes;
1701 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1702 pthread_kill(task_ids[i].id,SIGTERM);
1704 while (await_death > 0) {
1710 /* -----------------------------------------------------------------------------
1711 Managing the per-task allocation areas.
1713 Each capability comes with an allocation area. These are
1714 fixed-length block lists into which allocation can be done.
1716 ToDo: no support for two-space collection at the moment???
1717 -------------------------------------------------------------------------- */
1719 /* -----------------------------------------------------------------------------
1720 * waitThread is the external interface for running a new computation
1721 * and waiting for the result.
1723 * In the non-SMP case, we create a new main thread, push it on the
1724 * main-thread stack, and invoke the scheduler to run it. The
1725 * scheduler will return when the top main thread on the stack has
1726 * completed or died, and fill in the necessary fields of the
1727 * main_thread structure.
1729 * In the SMP case, we create a main thread as before, but we then
1730 * create a new condition variable and sleep on it. When our new
1731 * main thread has completed, we'll be woken up and the status/result
1732 * will be in the main_thread struct.
1733 * -------------------------------------------------------------------------- */
1736 howManyThreadsAvail ( void )
1740 for (q = run_queue_hd; q != END_TSO_QUEUE; q = q->link)
1742 for (q = blocked_queue_hd; q != END_TSO_QUEUE; q = q->link)
1748 finishAllThreads ( void )
1751 while (run_queue_hd != END_TSO_QUEUE) {
1752 waitThread ( run_queue_hd, NULL );
1754 while (blocked_queue_hd != END_TSO_QUEUE) {
1755 waitThread ( blocked_queue_hd, NULL );
1758 (blocked_queue_hd != END_TSO_QUEUE ||
1759 run_queue_hd != END_TSO_QUEUE);
1763 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
1766 SchedulerStatus stat;
1768 ACQUIRE_LOCK(&sched_mutex);
1770 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
1776 pthread_cond_init(&m->wakeup, NULL);
1779 m->link = main_threads;
1782 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: new main thread (%d)\n",
1787 pthread_cond_wait(&m->wakeup, &sched_mutex);
1788 } while (m->stat == NoStatus);
1790 /* GranSim specific init */
1791 CurrentTSO = m->tso; // the TSO to run
1792 procStatus[MainProc] = Busy; // status of main PE
1793 CurrentProc = MainProc; // PE to run it on
1798 ASSERT(m->stat != NoStatus);
1804 pthread_cond_destroy(&m->wakeup);
1807 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: main thread (%d) finished\n",
1811 RELEASE_LOCK(&sched_mutex);
1816 //@node Run queue code, Garbage Collextion Routines, Suspend and Resume, Main scheduling code
1817 //@subsection Run queue code
1821 NB: In GranSim we have many run queues; run_queue_hd is actually a macro
1822 unfolding to run_queue_hds[CurrentProc], thus CurrentProc is an
1823 implicit global variable that has to be correct when calling these
1827 /* Put the new thread on the head of the runnable queue.
1828 * The caller of createThread better push an appropriate closure
1829 * on this thread's stack before the scheduler is invoked.
1831 static /* inline */ void
1832 add_to_run_queue(tso)
1835 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1836 tso->link = run_queue_hd;
1838 if (run_queue_tl == END_TSO_QUEUE) {
1843 /* Put the new thread at the end of the runnable queue. */
1844 static /* inline */ void
1845 push_on_run_queue(tso)
1848 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
1849 ASSERT(run_queue_hd!=NULL && run_queue_tl!=NULL);
1850 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1851 if (run_queue_hd == END_TSO_QUEUE) {
1854 run_queue_tl->link = tso;
1860 Should be inlined because it's used very often in schedule. The tso
1861 argument is actually only needed in GranSim, where we want to have the
1862 possibility to schedule *any* TSO on the run queue, irrespective of the
1863 actual ordering. Therefore, if tso is not the nil TSO then we traverse
1864 the run queue and dequeue the tso, adjusting the links in the queue.
1866 //@cindex take_off_run_queue
1867 static /* inline */ StgTSO*
1868 take_off_run_queue(StgTSO *tso) {
1872 qetlaHbogh Qu' ngaSbogh ghomDaQ {tso} yIteq!
1874 if tso is specified, unlink that tso from the run_queue (doesn't have
1875 to be at the beginning of the queue); GranSim only
1877 if (tso!=END_TSO_QUEUE) {
1878 /* find tso in queue */
1879 for (t=run_queue_hd, prev=END_TSO_QUEUE;
1880 t!=END_TSO_QUEUE && t!=tso;
1884 /* now actually dequeue the tso */
1885 if (prev!=END_TSO_QUEUE) {
1886 ASSERT(run_queue_hd!=t);
1887 prev->link = t->link;
1889 /* t is at beginning of thread queue */
1890 ASSERT(run_queue_hd==t);
1891 run_queue_hd = t->link;
1893 /* t is at end of thread queue */
1894 if (t->link==END_TSO_QUEUE) {
1895 ASSERT(t==run_queue_tl);
1896 run_queue_tl = prev;
1898 ASSERT(run_queue_tl!=t);
1900 t->link = END_TSO_QUEUE;
1902 /* take tso from the beginning of the queue; std concurrent code */
1904 if (t != END_TSO_QUEUE) {
1905 run_queue_hd = t->link;
1906 t->link = END_TSO_QUEUE;
1907 if (run_queue_hd == END_TSO_QUEUE) {
1908 run_queue_tl = END_TSO_QUEUE;
1917 //@node Garbage Collextion Routines, Blocking Queue Routines, Run queue code, Main scheduling code
1918 //@subsection Garbage Collextion Routines
1920 /* ---------------------------------------------------------------------------
1921 Where are the roots that we know about?
1923 - all the threads on the runnable queue
1924 - all the threads on the blocked queue
1925 - all the thread currently executing a _ccall_GC
1926 - all the "main threads"
1928 ------------------------------------------------------------------------ */
1930 /* This has to be protected either by the scheduler monitor, or by the
1931 garbage collection monitor (probably the latter).
1935 static void GetRoots(void)
1942 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
1943 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
1944 run_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_hds[i]);
1945 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
1946 run_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_tls[i]);
1948 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
1949 blocked_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hds[i]);
1950 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
1951 blocked_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tls[i]);
1952 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
1953 ccalling_threadss[i] = (StgTSO *)MarkRoot((StgClosure *)ccalling_threadss[i]);
1960 if (run_queue_hd != END_TSO_QUEUE) {
1961 ASSERT(run_queue_tl != END_TSO_QUEUE);
1962 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1963 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1966 if (blocked_queue_hd != END_TSO_QUEUE) {
1967 ASSERT(blocked_queue_tl != END_TSO_QUEUE);
1968 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1969 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1973 for (m = main_threads; m != NULL; m = m->link) {
1974 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1976 if (suspended_ccalling_threads != END_TSO_QUEUE)
1977 suspended_ccalling_threads =
1978 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1980 #if defined(SMP) || defined(PAR) || defined(GRAN)
1985 /* -----------------------------------------------------------------------------
1988 This is the interface to the garbage collector from Haskell land.
1989 We provide this so that external C code can allocate and garbage
1990 collect when called from Haskell via _ccall_GC.
1992 It might be useful to provide an interface whereby the programmer
1993 can specify more roots (ToDo).
1995 This needs to be protected by the GC condition variable above. KH.
1996 -------------------------------------------------------------------------- */
1998 void (*extra_roots)(void);
2003 GarbageCollect(GetRoots,rtsFalse);
2007 performMajorGC(void)
2009 GarbageCollect(GetRoots,rtsTrue);
2015 GetRoots(); /* the scheduler's roots */
2016 extra_roots(); /* the user's roots */
2020 performGCWithRoots(void (*get_roots)(void))
2022 extra_roots = get_roots;
2024 GarbageCollect(AllRoots,rtsFalse);
2027 /* -----------------------------------------------------------------------------
2030 If the thread has reached its maximum stack size, then raise the
2031 StackOverflow exception in the offending thread. Otherwise
2032 relocate the TSO into a larger chunk of memory and adjust its stack
2034 -------------------------------------------------------------------------- */
2037 threadStackOverflow(StgTSO *tso)
2039 nat new_stack_size, new_tso_size, diff, stack_words;
2043 IF_DEBUG(sanity,checkTSO(tso));
2044 if (tso->stack_size >= tso->max_stack_size) {
2047 belch("@@ threadStackOverflow of TSO %d (%p): stack too large (now %ld; max is %ld",
2048 tso->id, tso, tso->stack_size, tso->max_stack_size);
2049 /* If we're debugging, just print out the top of the stack */
2050 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2054 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
2057 /* Send this thread the StackOverflow exception */
2058 raiseAsync(tso, (StgClosure *)stackOverflow_closure);
2063 /* Try to double the current stack size. If that takes us over the
2064 * maximum stack size for this thread, then use the maximum instead.
2065 * Finally round up so the TSO ends up as a whole number of blocks.
2067 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
2068 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
2069 TSO_STRUCT_SIZE)/sizeof(W_);
2070 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
2071 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
2073 IF_DEBUG(scheduler, fprintf(stderr,"scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
2075 dest = (StgTSO *)allocate(new_tso_size);
2076 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
2078 /* copy the TSO block and the old stack into the new area */
2079 memcpy(dest,tso,TSO_STRUCT_SIZE);
2080 stack_words = tso->stack + tso->stack_size - tso->sp;
2081 new_sp = (P_)dest + new_tso_size - stack_words;
2082 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
2084 /* relocate the stack pointers... */
2085 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
2086 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
2088 dest->stack_size = new_stack_size;
2090 /* and relocate the update frame list */
2091 relocate_TSO(tso, dest);
2093 /* Mark the old TSO as relocated. We have to check for relocated
2094 * TSOs in the garbage collector and any primops that deal with TSOs.
2096 * It's important to set the sp and su values to just beyond the end
2097 * of the stack, so we don't attempt to scavenge any part of the
2100 tso->what_next = ThreadRelocated;
2102 tso->sp = (P_)&(tso->stack[tso->stack_size]);
2103 tso->su = (StgUpdateFrame *)tso->sp;
2104 tso->why_blocked = NotBlocked;
2105 dest->mut_link = NULL;
2107 IF_PAR_DEBUG(verbose,
2108 belch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld",
2109 tso->id, tso, tso->stack_size);
2110 /* If we're debugging, just print out the top of the stack */
2111 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
2114 IF_DEBUG(sanity,checkTSO(tso));
2116 IF_DEBUG(scheduler,printTSO(dest));
2122 //@node Blocking Queue Routines, Exception Handling Routines, Garbage Collextion Routines, Main scheduling code
2123 //@subsection Blocking Queue Routines
2125 /* ---------------------------------------------------------------------------
2126 Wake up a queue that was blocked on some resource.
2127 ------------------------------------------------------------------------ */
2129 /* ToDo: check push_on_run_queue vs. PUSH_ON_RUN_QUEUE */
2133 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2138 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
2140 /* write RESUME events to log file and
2141 update blocked and fetch time (depending on type of the orig closure) */
2142 if (RtsFlags.ParFlags.ParStats.Full) {
2143 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
2144 GR_RESUME, ((StgTSO *)bqe), ((StgTSO *)bqe)->block_info.closure,
2145 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
2147 switch (get_itbl(node)->type) {
2149 ((StgTSO *)bqe)->par.fetchtime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2154 ((StgTSO *)bqe)->par.blocktime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
2157 barf("{unblockOneLocked}Daq Qagh: unexpected closure in blocking queue");
2164 static StgBlockingQueueElement *
2165 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
2168 PEs node_loc, tso_loc;
2170 node_loc = where_is(node); // should be lifted out of loop
2171 tso = (StgTSO *)bqe; // wastes an assignment to get the type right
2172 tso_loc = where_is((StgClosure *)tso);
2173 if (IS_LOCAL_TO(PROCS(node),tso_loc)) { // TSO is local
2174 /* !fake_fetch => TSO is on CurrentProc is same as IS_LOCAL_TO */
2175 ASSERT(CurrentProc!=node_loc || tso_loc==CurrentProc);
2176 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.lunblocktime;
2177 // insertThread(tso, node_loc);
2178 new_event(tso_loc, tso_loc, CurrentTime[CurrentProc],
2180 tso, node, (rtsSpark*)NULL);
2181 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2184 } else { // TSO is remote (actually should be FMBQ)
2185 CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.mpacktime +
2186 RtsFlags.GranFlags.Costs.gunblocktime +
2187 RtsFlags.GranFlags.Costs.latency;
2188 new_event(tso_loc, CurrentProc, CurrentTime[CurrentProc],
2190 tso, node, (rtsSpark*)NULL);
2191 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
2194 /* the thread-queue-overhead is accounted for in either Resume or UnblockThread */
2196 fprintf(stderr," %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,",
2197 (node_loc==tso_loc ? "Local" : "Global"),
2198 tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
2199 tso->block_info.closure = NULL;
2200 IF_DEBUG(scheduler,belch("-- Waking up thread %ld (%p)",
2204 static StgBlockingQueueElement *
2205 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
2207 StgBlockingQueueElement *next;
2209 switch (get_itbl(bqe)->type) {
2211 ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked);
2212 /* if it's a TSO just push it onto the run_queue */
2214 // ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
2215 PUSH_ON_RUN_QUEUE((StgTSO *)bqe);
2217 unblockCount(bqe, node);
2218 /* reset blocking status after dumping event */
2219 ((StgTSO *)bqe)->why_blocked = NotBlocked;
2223 /* if it's a BLOCKED_FETCH put it on the PendingFetches list */
2225 bqe->link = PendingFetches;
2226 PendingFetches = bqe;
2230 /* can ignore this case in a non-debugging setup;
2231 see comments on RBHSave closures above */
2233 /* check that the closure is an RBHSave closure */
2234 ASSERT(get_itbl((StgClosure *)bqe) == &RBH_Save_0_info ||
2235 get_itbl((StgClosure *)bqe) == &RBH_Save_1_info ||
2236 get_itbl((StgClosure *)bqe) == &RBH_Save_2_info);
2240 barf("{unblockOneLocked}Daq Qagh: Unexpected IP (%#lx; %s) in blocking queue at %#lx\n",
2241 get_itbl((StgClosure *)bqe), info_type((StgClosure *)bqe),
2245 // IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
2249 #else /* !GRAN && !PAR */
2251 unblockOneLocked(StgTSO *tso)
2255 ASSERT(get_itbl(tso)->type == TSO);
2256 ASSERT(tso->why_blocked != NotBlocked);
2257 tso->why_blocked = NotBlocked;
2259 PUSH_ON_RUN_QUEUE(tso);
2261 IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
2266 #if defined(GRAN) || defined(PAR)
2267 inline StgBlockingQueueElement *
2268 unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
2270 ACQUIRE_LOCK(&sched_mutex);
2271 bqe = unblockOneLocked(bqe, node);
2272 RELEASE_LOCK(&sched_mutex);
2277 unblockOne(StgTSO *tso)
2279 ACQUIRE_LOCK(&sched_mutex);
2280 tso = unblockOneLocked(tso);
2281 RELEASE_LOCK(&sched_mutex);
2288 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
2290 StgBlockingQueueElement *bqe;
2295 belch("## AwBQ for node %p on PE %d @ %ld by TSO %d (%p): ", \
2296 node, CurrentProc, CurrentTime[CurrentProc],
2297 CurrentTSO->id, CurrentTSO));
2299 node_loc = where_is(node);
2301 ASSERT(get_itbl(q)->type == TSO || // q is either a TSO or an RBHSave
2302 get_itbl(q)->type == CONSTR); // closure (type constructor)
2303 ASSERT(is_unique(node));
2305 /* FAKE FETCH: magically copy the node to the tso's proc;
2306 no Fetch necessary because in reality the node should not have been
2307 moved to the other PE in the first place
2309 if (CurrentProc!=node_loc) {
2311 belch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)",
2312 node, node_loc, CurrentProc, CurrentTSO->id,
2313 // CurrentTSO, where_is(CurrentTSO),
2314 node->header.gran.procs));
2315 node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc);
2317 belch("## new bitmask of node %p is %#x",
2318 node, node->header.gran.procs));
2319 if (RtsFlags.GranFlags.GranSimStats.Global) {
2320 globalGranStats.tot_fake_fetches++;
2325 // ToDo: check: ASSERT(CurrentProc==node_loc);
2326 while (get_itbl(bqe)->type==TSO) { // q != END_TSO_QUEUE) {
2329 bqe points to the current element in the queue
2330 next points to the next element in the queue
2332 //tso = (StgTSO *)bqe; // wastes an assignment to get the type right
2333 //tso_loc = where_is(tso);
2335 bqe = unblockOneLocked(bqe, node);
2338 /* if this is the BQ of an RBH, we have to put back the info ripped out of
2339 the closure to make room for the anchor of the BQ */
2340 if (bqe!=END_BQ_QUEUE) {
2341 ASSERT(get_itbl(node)->type == RBH && get_itbl(bqe)->type == CONSTR);
2343 ASSERT((info_ptr==&RBH_Save_0_info) ||
2344 (info_ptr==&RBH_Save_1_info) ||
2345 (info_ptr==&RBH_Save_2_info));
2347 /* cf. convertToRBH in RBH.c for writing the RBHSave closure */
2348 ((StgRBH *)node)->blocking_queue = (StgBlockingQueueElement *)((StgRBHSave *)bqe)->payload[0];
2349 ((StgRBH *)node)->mut_link = (StgMutClosure *)((StgRBHSave *)bqe)->payload[1];
2352 belch("## Filled in RBH_Save for %p (%s) at end of AwBQ",
2353 node, info_type(node)));
2356 /* statistics gathering */
2357 if (RtsFlags.GranFlags.GranSimStats.Global) {
2358 // globalGranStats.tot_bq_processing_time += bq_processing_time;
2359 globalGranStats.tot_bq_len += len; // total length of all bqs awakened
2360 // globalGranStats.tot_bq_len_local += len_local; // same for local TSOs only
2361 globalGranStats.tot_awbq++; // total no. of bqs awakened
2364 fprintf(stderr,"## BQ Stats of %p: [%d entries] %s\n",
2365 node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : ""));
2369 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
2371 StgBlockingQueueElement *bqe, *next;
2373 ACQUIRE_LOCK(&sched_mutex);
2375 IF_PAR_DEBUG(verbose,
2376 belch("## AwBQ for node %p on [%x]: ",
2379 ASSERT(get_itbl(q)->type == TSO ||
2380 get_itbl(q)->type == BLOCKED_FETCH ||
2381 get_itbl(q)->type == CONSTR);
2384 while (get_itbl(bqe)->type==TSO ||
2385 get_itbl(bqe)->type==BLOCKED_FETCH) {
2386 bqe = unblockOneLocked(bqe, node);
2388 RELEASE_LOCK(&sched_mutex);
2391 #else /* !GRAN && !PAR */
2393 awakenBlockedQueue(StgTSO *tso)
2395 ACQUIRE_LOCK(&sched_mutex);
2396 while (tso != END_TSO_QUEUE) {
2397 tso = unblockOneLocked(tso);
2399 RELEASE_LOCK(&sched_mutex);
2403 //@node Exception Handling Routines, Debugging Routines, Blocking Queue Routines, Main scheduling code
2404 //@subsection Exception Handling Routines
2406 /* ---------------------------------------------------------------------------
2408 - usually called inside a signal handler so it mustn't do anything fancy.
2409 ------------------------------------------------------------------------ */
2412 interruptStgRts(void)
2418 /* -----------------------------------------------------------------------------
2421 This is for use when we raise an exception in another thread, which
2423 This has nothing to do with the UnblockThread event in GranSim. -- HWL
2424 -------------------------------------------------------------------------- */
2426 #if defined(GRAN) || defined(PAR)
2428 NB: only the type of the blocking queue is different in GranSim and GUM
2429 the operations on the queue-elements are the same
2430 long live polymorphism!
2433 unblockThread(StgTSO *tso)
2435 StgBlockingQueueElement *t, **last;
2437 ACQUIRE_LOCK(&sched_mutex);
2438 switch (tso->why_blocked) {
2441 return; /* not blocked */
2444 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
2446 StgBlockingQueueElement *last_tso = END_BQ_QUEUE;
2447 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
2449 last = (StgBlockingQueueElement **)&mvar->head;
2450 for (t = (StgBlockingQueueElement *)mvar->head;
2452 last = &t->link, last_tso = t, t = t->link) {
2453 if (t == (StgBlockingQueueElement *)tso) {
2454 *last = (StgBlockingQueueElement *)tso->link;
2455 if (mvar->tail == tso) {
2456 mvar->tail = (StgTSO *)last_tso;
2461 barf("unblockThread (MVAR): TSO not found");
2464 case BlockedOnBlackHole:
2465 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
2467 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
2469 last = &bq->blocking_queue;
2470 for (t = bq->blocking_queue;
2472 last = &t->link, t = t->link) {
2473 if (t == (StgBlockingQueueElement *)tso) {
2474 *last = (StgBlockingQueueElement *)tso->link;
2478 barf("unblockThread (BLACKHOLE): TSO not found");
2481 case BlockedOnException:
2483 StgTSO *target = tso->block_info.tso;
2485 ASSERT(get_itbl(target)->type == TSO);
2486 ASSERT(target->blocked_exceptions != NULL);
2488 last = (StgBlockingQueueElement **)&target->blocked_exceptions;
2489 for (t = (StgBlockingQueueElement *)target->blocked_exceptions;
2491 last = &t->link, t = t->link) {
2492 ASSERT(get_itbl(t)->type == TSO);
2493 if (t == (StgBlockingQueueElement *)tso) {
2494 *last = (StgBlockingQueueElement *)tso->link;
2498 barf("unblockThread (Exception): TSO not found");
2501 case BlockedOnDelay:
2503 case BlockedOnWrite:
2505 StgBlockingQueueElement *prev = NULL;
2506 for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE;
2507 prev = t, t = t->link) {
2508 if (t == (StgBlockingQueueElement *)tso) {
2510 blocked_queue_hd = (StgTSO *)t->link;
2511 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
2512 blocked_queue_tl = END_TSO_QUEUE;
2515 prev->link = t->link;
2516 if ((StgBlockingQueueElement *)blocked_queue_tl == t) {
2517 blocked_queue_tl = (StgTSO *)prev;
2523 barf("unblockThread (I/O): TSO not found");
2527 barf("unblockThread");
2531 tso->link = END_TSO_QUEUE;
2532 tso->why_blocked = NotBlocked;
2533 tso->block_info.closure = NULL;
2534 PUSH_ON_RUN_QUEUE(tso);
2535 RELEASE_LOCK(&sched_mutex);
2539 unblockThread(StgTSO *tso)
2543 ACQUIRE_LOCK(&sched_mutex);
2544 switch (tso->why_blocked) {
2547 return; /* not blocked */
2550 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
2552 StgTSO *last_tso = END_TSO_QUEUE;
2553 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
2556 for (t = mvar->head; t != END_TSO_QUEUE;
2557 last = &t->link, last_tso = t, t = t->link) {
2560 if (mvar->tail == tso) {
2561 mvar->tail = last_tso;
2566 barf("unblockThread (MVAR): TSO not found");
2569 case BlockedOnBlackHole:
2570 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
2572 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
2574 last = &bq->blocking_queue;
2575 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
2576 last = &t->link, t = t->link) {
2582 barf("unblockThread (BLACKHOLE): TSO not found");
2585 case BlockedOnException:
2587 StgTSO *target = tso->block_info.tso;
2589 ASSERT(get_itbl(target)->type == TSO);
2590 ASSERT(target->blocked_exceptions != NULL);
2592 last = &target->blocked_exceptions;
2593 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
2594 last = &t->link, t = t->link) {
2595 ASSERT(get_itbl(t)->type == TSO);
2601 barf("unblockThread (Exception): TSO not found");
2604 case BlockedOnDelay:
2606 case BlockedOnWrite:
2608 StgTSO *prev = NULL;
2609 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
2610 prev = t, t = t->link) {
2613 blocked_queue_hd = t->link;
2614 if (blocked_queue_tl == t) {
2615 blocked_queue_tl = END_TSO_QUEUE;
2618 prev->link = t->link;
2619 if (blocked_queue_tl == t) {
2620 blocked_queue_tl = prev;
2626 barf("unblockThread (I/O): TSO not found");
2630 barf("unblockThread");
2634 tso->link = END_TSO_QUEUE;
2635 tso->why_blocked = NotBlocked;
2636 tso->block_info.closure = NULL;
2637 PUSH_ON_RUN_QUEUE(tso);
2638 RELEASE_LOCK(&sched_mutex);
2642 /* -----------------------------------------------------------------------------
2645 * The following function implements the magic for raising an
2646 * asynchronous exception in an existing thread.
2648 * We first remove the thread from any queue on which it might be
2649 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
2651 * We strip the stack down to the innermost CATCH_FRAME, building
2652 * thunks in the heap for all the active computations, so they can
2653 * be restarted if necessary. When we reach a CATCH_FRAME, we build
2654 * an application of the handler to the exception, and push it on
2655 * the top of the stack.
2657 * How exactly do we save all the active computations? We create an
2658 * AP_UPD for every UpdateFrame on the stack. Entering one of these
2659 * AP_UPDs pushes everything from the corresponding update frame
2660 * upwards onto the stack. (Actually, it pushes everything up to the
2661 * next update frame plus a pointer to the next AP_UPD object.
2662 * Entering the next AP_UPD object pushes more onto the stack until we
2663 * reach the last AP_UPD object - at which point the stack should look
2664 * exactly as it did when we killed the TSO and we can continue
2665 * execution by entering the closure on top of the stack.
2667 * We can also kill a thread entirely - this happens if either (a) the
2668 * exception passed to raiseAsync is NULL, or (b) there's no
2669 * CATCH_FRAME on the stack. In either case, we strip the entire
2670 * stack and replace the thread with a zombie.
2672 * -------------------------------------------------------------------------- */
2675 deleteThread(StgTSO *tso)
2677 raiseAsync(tso,NULL);
2681 raiseAsync(StgTSO *tso, StgClosure *exception)
2683 StgUpdateFrame* su = tso->su;
2684 StgPtr sp = tso->sp;
2686 /* Thread already dead? */
2687 if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
2691 IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
2693 /* Remove it from any blocking queues */
2696 /* The stack freezing code assumes there's a closure pointer on
2697 * the top of the stack. This isn't always the case with compiled
2698 * code, so we have to push a dummy closure on the top which just
2699 * returns to the next return address on the stack.
2701 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
2702 *(--sp) = (W_)&dummy_ret_closure;
2706 int words = ((P_)su - (P_)sp) - 1;
2710 /* If we find a CATCH_FRAME, and we've got an exception to raise,
2711 * then build PAP(handler,exception,realworld#), and leave it on
2712 * top of the stack ready to enter.
2714 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
2715 StgCatchFrame *cf = (StgCatchFrame *)su;
2716 /* we've got an exception to raise, so let's pass it to the
2717 * handler in this frame.
2719 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 2);
2720 TICK_ALLOC_UPD_PAP(3,0);
2721 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
2724 ap->fun = cf->handler; /* :: Exception -> IO a */
2725 ap->payload[0] = exception;
2726 ap->payload[1] = ARG_TAG(0); /* realworld token */
2728 /* throw away the stack from Sp up to and including the
2731 sp = (P_)su + sizeofW(StgCatchFrame) - 1;
2734 /* Restore the blocked/unblocked state for asynchronous exceptions
2735 * at the CATCH_FRAME.
2737 * If exceptions were unblocked at the catch, arrange that they
2738 * are unblocked again after executing the handler by pushing an
2739 * unblockAsyncExceptions_ret stack frame.
2741 if (!cf->exceptions_blocked) {
2742 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
2745 /* Ensure that async exceptions are blocked when running the handler.
2747 if (tso->blocked_exceptions == NULL) {
2748 tso->blocked_exceptions = END_TSO_QUEUE;
2751 /* Put the newly-built PAP on top of the stack, ready to execute
2752 * when the thread restarts.
2756 tso->what_next = ThreadEnterGHC;
2757 IF_DEBUG(sanity, checkTSO(tso));
2761 /* First build an AP_UPD consisting of the stack chunk above the
2762 * current update frame, with the top word on the stack as the
2765 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
2770 ap->fun = (StgClosure *)sp[0];
2772 for(i=0; i < (nat)words; ++i) {
2773 ap->payload[i] = (StgClosure *)*sp++;
2776 switch (get_itbl(su)->type) {
2780 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
2781 TICK_ALLOC_UP_THK(words+1,0);
2784 fprintf(stderr, "scheduler: Updating ");
2785 printPtr((P_)su->updatee);
2786 fprintf(stderr, " with ");
2787 printObj((StgClosure *)ap);
2790 /* Replace the updatee with an indirection - happily
2791 * this will also wake up any threads currently
2792 * waiting on the result.
2794 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
2796 sp += sizeofW(StgUpdateFrame) -1;
2797 sp[0] = (W_)ap; /* push onto stack */
2803 StgCatchFrame *cf = (StgCatchFrame *)su;
2806 /* We want a PAP, not an AP_UPD. Fortunately, the
2807 * layout's the same.
2809 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2810 TICK_ALLOC_UPD_PAP(words+1,0);
2812 /* now build o = FUN(catch,ap,handler) */
2813 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
2814 TICK_ALLOC_FUN(2,0);
2815 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
2816 o->payload[0] = (StgClosure *)ap;
2817 o->payload[1] = cf->handler;
2820 fprintf(stderr, "scheduler: Built ");
2821 printObj((StgClosure *)o);
2824 /* pop the old handler and put o on the stack */
2826 sp += sizeofW(StgCatchFrame) - 1;
2833 StgSeqFrame *sf = (StgSeqFrame *)su;
2836 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2837 TICK_ALLOC_UPD_PAP(words+1,0);
2839 /* now build o = FUN(seq,ap) */
2840 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
2841 TICK_ALLOC_SE_THK(1,0);
2842 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
2843 o->payload[0] = (StgClosure *)ap;
2846 fprintf(stderr, "scheduler: Built ");
2847 printObj((StgClosure *)o);
2850 /* pop the old handler and put o on the stack */
2852 sp += sizeofW(StgSeqFrame) - 1;
2858 /* We've stripped the entire stack, the thread is now dead. */
2859 sp += sizeofW(StgStopFrame) - 1;
2860 sp[0] = (W_)exception; /* save the exception */
2861 tso->what_next = ThreadKilled;
2862 tso->su = (StgUpdateFrame *)(sp+1);
2873 /* -----------------------------------------------------------------------------
2874 resurrectThreads is called after garbage collection on the list of
2875 threads found to be garbage. Each of these threads will be woken
2876 up and sent a signal: BlockedOnDeadMVar if the thread was blocked
2877 on an MVar, or NonTermination if the thread was blocked on a Black
2879 -------------------------------------------------------------------------- */
2882 resurrectThreads( StgTSO *threads )
2886 for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
2887 next = tso->global_link;
2888 tso->global_link = all_threads;
2890 IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id));
2892 switch (tso->why_blocked) {
2894 case BlockedOnException:
2895 raiseAsync(tso,(StgClosure *)BlockedOnDeadMVar_closure);
2897 case BlockedOnBlackHole:
2898 raiseAsync(tso,(StgClosure *)NonTermination_closure);
2901 /* This might happen if the thread was blocked on a black hole
2902 * belonging to a thread that we've just woken up (raiseAsync
2903 * can wake up threads, remember...).
2907 barf("resurrectThreads: thread blocked in a strange way");
2912 /* -----------------------------------------------------------------------------
2913 * Blackhole detection: if we reach a deadlock, test whether any
2914 * threads are blocked on themselves. Any threads which are found to
2915 * be self-blocked get sent a NonTermination exception.
2917 * This is only done in a deadlock situation in order to avoid
2918 * performance overhead in the normal case.
2919 * -------------------------------------------------------------------------- */
2922 detectBlackHoles( void )
2924 StgTSO *t = all_threads;
2925 StgUpdateFrame *frame;
2926 StgClosure *blocked_on;
2928 for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
2930 if (t->why_blocked != BlockedOnBlackHole) {
2934 blocked_on = t->block_info.closure;
2936 for (frame = t->su; ; frame = frame->link) {
2937 switch (get_itbl(frame)->type) {
2940 if (frame->updatee == blocked_on) {
2941 /* We are blocking on one of our own computations, so
2942 * send this thread the NonTermination exception.
2945 sched_belch("thread %d is blocked on itself", t->id));
2946 raiseAsync(t, (StgClosure *)NonTermination_closure);
2967 //@node Debugging Routines, Index, Exception Handling Routines, Main scheduling code
2968 //@subsection Debugging Routines
2970 /* -----------------------------------------------------------------------------
2971 Debugging: why is a thread blocked
2972 -------------------------------------------------------------------------- */
2977 printThreadBlockage(StgTSO *tso)
2979 switch (tso->why_blocked) {
2981 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
2983 case BlockedOnWrite:
2984 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
2986 case BlockedOnDelay:
2987 #if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
2988 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
2990 fprintf(stderr,"blocked on delay of %d ms",
2991 tso->block_info.target - getourtimeofday());
2995 fprintf(stderr,"blocked on an MVar");
2997 case BlockedOnException:
2998 fprintf(stderr,"blocked on delivering an exception to thread %d",
2999 tso->block_info.tso->id);
3001 case BlockedOnBlackHole:
3002 fprintf(stderr,"blocked on a black hole");
3005 fprintf(stderr,"not blocked");
3009 fprintf(stderr,"blocked on global address; local FM_BQ is %p (%s)",
3010 tso->block_info.closure, info_type(tso->block_info.closure));
3012 case BlockedOnGA_NoSend:
3013 fprintf(stderr,"blocked on global address (no send); local FM_BQ is %p (%s)",
3014 tso->block_info.closure, info_type(tso->block_info.closure));
3018 barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
3019 tso->why_blocked, tso->id, tso);
3024 printThreadStatus(StgTSO *tso)
3026 switch (tso->what_next) {
3028 fprintf(stderr,"has been killed");
3030 case ThreadComplete:
3031 fprintf(stderr,"has completed");
3034 printThreadBlockage(tso);
3039 printAllThreads(void)
3043 sched_belch("all threads:");
3044 for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
3045 fprintf(stderr, "\tthread %d is ", t->id);
3046 printThreadStatus(t);
3047 fprintf(stderr,"\n");
3052 Print a whole blocking queue attached to node (debugging only).
3057 print_bq (StgClosure *node)
3059 StgBlockingQueueElement *bqe;
3063 fprintf(stderr,"## BQ of closure %p (%s): ",
3064 node, info_type(node));
3066 /* should cover all closures that may have a blocking queue */
3067 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
3068 get_itbl(node)->type == FETCH_ME_BQ ||
3069 get_itbl(node)->type == RBH);
3071 ASSERT(node!=(StgClosure*)NULL); // sanity check
3073 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
3075 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
3076 !end; // iterate until bqe points to a CONSTR
3077 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
3078 ASSERT(bqe != END_BQ_QUEUE); // sanity check
3079 ASSERT(bqe != (StgTSO*)NULL); // sanity check
3080 /* types of closures that may appear in a blocking queue */
3081 ASSERT(get_itbl(bqe)->type == TSO ||
3082 get_itbl(bqe)->type == BLOCKED_FETCH ||
3083 get_itbl(bqe)->type == CONSTR);
3084 /* only BQs of an RBH end with an RBH_Save closure */
3085 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
3087 switch (get_itbl(bqe)->type) {
3089 fprintf(stderr," TSO %d (%x),",
3090 ((StgTSO *)bqe)->id, ((StgTSO *)bqe));
3093 fprintf(stderr," BF (node=%p, ga=((%x, %d, %x)),",
3094 ((StgBlockedFetch *)bqe)->node,
3095 ((StgBlockedFetch *)bqe)->ga.payload.gc.gtid,
3096 ((StgBlockedFetch *)bqe)->ga.payload.gc.slot,
3097 ((StgBlockedFetch *)bqe)->ga.weight);
3100 fprintf(stderr," %s (IP %p),",
3101 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
3102 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
3103 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
3104 "RBH_Save_?"), get_itbl(bqe));
3107 barf("Unexpected closure type %s in blocking queue of %p (%s)",
3108 info_type(bqe), node, info_type(node));
3112 fputc('\n', stderr);
3114 # elif defined(GRAN)
3116 print_bq (StgClosure *node)
3118 StgBlockingQueueElement *bqe;
3119 PEs node_loc, tso_loc;
3122 /* should cover all closures that may have a blocking queue */
3123 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
3124 get_itbl(node)->type == FETCH_ME_BQ ||
3125 get_itbl(node)->type == RBH);
3127 ASSERT(node!=(StgClosure*)NULL); // sanity check
3128 node_loc = where_is(node);
3130 fprintf(stderr,"## BQ of closure %p (%s) on [PE %d]: ",
3131 node, info_type(node), node_loc);
3134 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
3136 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
3137 !end; // iterate until bqe points to a CONSTR
3138 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
3139 ASSERT(bqe != END_BQ_QUEUE); // sanity check
3140 ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check
3141 /* types of closures that may appear in a blocking queue */
3142 ASSERT(get_itbl(bqe)->type == TSO ||
3143 get_itbl(bqe)->type == CONSTR);
3144 /* only BQs of an RBH end with an RBH_Save closure */
3145 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
3147 tso_loc = where_is((StgClosure *)bqe);
3148 switch (get_itbl(bqe)->type) {
3150 fprintf(stderr," TSO %d (%p) on [PE %d],",
3151 ((StgTSO *)bqe)->id, (StgTSO *)bqe, tso_loc);
3154 fprintf(stderr," %s (IP %p),",
3155 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
3156 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
3157 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
3158 "RBH_Save_?"), get_itbl(bqe));
3161 barf("Unexpected closure type %s in blocking queue of %p (%s)",
3162 info_type((StgClosure *)bqe), node, info_type(node));
3166 fputc('\n', stderr);
3170 Nice and easy: only TSOs on the blocking queue
3173 print_bq (StgClosure *node)
3177 ASSERT(node!=(StgClosure*)NULL); // sanity check
3178 for (tso = ((StgBlockingQueue*)node)->blocking_queue;
3179 tso != END_TSO_QUEUE;
3181 ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
3182 ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
3183 fprintf(stderr," TSO %d (%p),", tso->id, tso);
3185 fputc('\n', stderr);
3196 for (i=0, tso=run_queue_hd;
3197 tso != END_TSO_QUEUE;
3206 sched_belch(char *s, ...)
3211 fprintf(stderr, "scheduler (task %ld): ", pthread_self());
3213 fprintf(stderr, "scheduler: ");
3215 vfprintf(stderr, s, ap);
3216 fprintf(stderr, "\n");
3222 //@node Index, , Debugging Routines, Main scheduling code
3226 //* MainRegTable:: @cindex\s-+MainRegTable
3227 //* StgMainThread:: @cindex\s-+StgMainThread
3228 //* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue
3229 //* blocked_queue_hd:: @cindex\s-+blocked_queue_hd
3230 //* blocked_queue_tl:: @cindex\s-+blocked_queue_tl
3231 //* context_switch:: @cindex\s-+context_switch
3232 //* createThread:: @cindex\s-+createThread
3233 //* free_capabilities:: @cindex\s-+free_capabilities
3234 //* gc_pending_cond:: @cindex\s-+gc_pending_cond
3235 //* initScheduler:: @cindex\s-+initScheduler
3236 //* interrupted:: @cindex\s-+interrupted
3237 //* n_free_capabilities:: @cindex\s-+n_free_capabilities
3238 //* next_thread_id:: @cindex\s-+next_thread_id
3239 //* print_bq:: @cindex\s-+print_bq
3240 //* run_queue_hd:: @cindex\s-+run_queue_hd
3241 //* run_queue_tl:: @cindex\s-+run_queue_tl
3242 //* sched_mutex:: @cindex\s-+sched_mutex
3243 //* schedule:: @cindex\s-+schedule
3244 //* take_off_run_queue:: @cindex\s-+take_off_run_queue
3245 //* task_ids:: @cindex\s-+task_ids
3246 //* term_mutex:: @cindex\s-+term_mutex
3247 //* thread_ready_cond:: @cindex\s-+thread_ready_cond