1 /* ---------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.45 2000/01/22 18:00:03 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * The main scheduling code in GranSim is quite different from that in std
9 * (concurrent) Haskell: while concurrent Haskell just iterates over the
10 * threads in the runnable queue, GranSim is event driven, i.e. it iterates
11 * over the events in the global event queue. -- HWL
12 * --------------------------------------------------------------------------*/
14 //@node Main scheduling code, , ,
15 //@section Main scheduling code
17 /* Version with scheduler monitor support for SMPs.
19 This design provides a high-level API to create and schedule threads etc.
20 as documented in the SMP design document.
22 It uses a monitor design controlled by a single mutex to exercise control
23 over accesses to shared data structures, and builds on the Posix threads
26 The majority of state is shared. In order to keep essential per-task state,
27 there is a Capability structure, which contains all the information
28 needed to run a thread: its STG registers, a pointer to its TSO, a
29 nursery etc. During STG execution, a pointer to the capability is
30 kept in a register (BaseReg).
32 In a non-SMP build, there is one global capability, namely MainRegTable.
39 //* Variables and Data structures::
41 //* Main scheduling loop::
42 //* Suspend and Resume::
44 //* Garbage Collextion Routines::
45 //* Blocking Queue Routines::
46 //* Exception Handling Routines::
47 //* Debugging Routines::
51 //@node Includes, Variables and Data structures, Main scheduling code, Main scheduling code
52 //@subsection Includes
60 #include "StgStartup.h"
64 #include "StgMiscClosures.h"
66 #include "Evaluator.h"
67 #include "Exception.h"
71 #include "Profiling.h"
75 #if defined(GRAN) || defined(PAR)
76 # include "GranSimRts.h"
78 # include "ParallelRts.h"
79 # include "Parallel.h"
80 # include "ParallelDebug.h"
87 //@node Variables and Data structures, Prototypes, Includes, Main scheduling code
88 //@subsection Variables and Data structures
92 * These are the threads which clients have requested that we run.
94 * In an SMP build, we might have several concurrent clients all
95 * waiting for results, and each one will wait on a condition variable
96 * until the result is available.
98 * In non-SMP, clients are strictly nested: the first client calls
99 * into the RTS, which might call out again to C with a _ccall_GC, and
100 * eventually re-enter the RTS.
102 * Main threads information is kept in a linked list:
104 //@cindex StgMainThread
105 typedef struct StgMainThread_ {
107 SchedulerStatus stat;
110 pthread_cond_t wakeup;
112 struct StgMainThread_ *link;
115 /* Main thread queue.
116 * Locks required: sched_mutex.
118 static StgMainThread *main_threads;
121 * Locks required: sched_mutex.
126 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
127 /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */
130 In GranSim we have a runable and a blocked queue for each processor.
131 In order to minimise code changes new arrays run_queue_hds/tls
132 are created. run_queue_hd is then a short cut (macro) for
133 run_queue_hds[CurrentProc] (see GranSim.h).
136 StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
137 StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
138 StgTSO *ccalling_threadss[MAX_PROC];
142 //@cindex run_queue_hd
143 //@cindex run_queue_tl
144 //@cindex blocked_queue_hd
145 //@cindex blocked_queue_tl
146 StgTSO *run_queue_hd, *run_queue_tl;
147 StgTSO *blocked_queue_hd, *blocked_queue_tl;
149 /* Threads suspended in _ccall_GC.
150 * Locks required: sched_mutex.
152 static StgTSO *suspended_ccalling_threads;
154 static void GetRoots(void);
155 static StgTSO *threadStackOverflow(StgTSO *tso);
158 /* KH: The following two flags are shared memory locations. There is no need
159 to lock them, since they are only unset at the end of a scheduler
163 /* flag set by signal handler to precipitate a context switch */
164 //@cindex context_switch
167 /* if this flag is set as well, give up execution */
168 //@cindex interrupted
171 /* Next thread ID to allocate.
172 * Locks required: sched_mutex
174 //@cindex next_thread_id
175 StgThreadID next_thread_id = 1;
178 * Pointers to the state of the current thread.
179 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
180 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
183 /* The smallest stack size that makes any sense is:
184 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
185 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
186 * + 1 (the realworld token for an IO thread)
187 * + 1 (the closure to enter)
189 * A thread with this stack will bomb immediately with a stack
190 * overflow, which will increase its stack size.
193 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
195 /* Free capability list.
196 * Locks required: sched_mutex.
199 //@cindex free_capabilities
200 //@cindex n_free_capabilities
201 Capability *free_capabilities; /* Available capabilities for running threads */
202 nat n_free_capabilities; /* total number of available capabilities */
204 //@cindex MainRegTable
205 Capability MainRegTable; /* for non-SMP, we have one global capability */
209 StgTSO *CurrentTSOs[MAX_PROC];
216 /* All our current task ids, saved in case we need to kill them later.
223 void addToBlockedQueue ( StgTSO *tso );
225 static void schedule ( void );
226 void interruptStgRts ( void );
227 static StgTSO * createThread_ ( nat size, rtsBool have_lock );
230 static void sched_belch(char *s, ...);
234 //@cindex sched_mutex
236 //@cindex thread_ready_cond
237 //@cindex gc_pending_cond
238 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
239 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
240 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
241 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
248 rtsTime TimeOfLastYield;
252 * The thread state for the main thread.
253 // ToDo: check whether not needed any more
258 //@node Prototypes, Main scheduling loop, Variables and Data structures, Main scheduling code
259 //@subsection Prototypes
261 //@node Main scheduling loop, Suspend and Resume, Prototypes, Main scheduling code
262 //@subsection Main scheduling loop
264 /* ---------------------------------------------------------------------------
265 Main scheduling loop.
267 We use round-robin scheduling, each thread returning to the
268 scheduler loop when one of these conditions is detected:
271 * timer expires (thread yields)
276 Locking notes: we acquire the scheduler lock once at the beginning
277 of the scheduler loop, and release it when
279 * running a thread, or
280 * waiting for work, or
281 * waiting for a GC to complete.
283 ------------------------------------------------------------------------ */
290 StgThreadReturnCode ret;
299 ACQUIRE_LOCK(&sched_mutex);
302 # error ToDo: implement GranSim scheduler
304 while (!GlobalStopPending) { /* GlobalStopPending set in par_exit */
306 if (PendingFetches != END_BF_QUEUE) {
313 /* If we're interrupted (the user pressed ^C, or some other
314 * termination condition occurred), kill all the currently running
318 IF_DEBUG(scheduler, sched_belch("interrupted"));
319 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
322 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
325 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
326 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
329 /* Go through the list of main threads and wake up any
330 * clients whose computations have finished. ToDo: this
331 * should be done more efficiently without a linear scan
332 * of the main threads list, somehow...
336 StgMainThread *m, **prev;
337 prev = &main_threads;
338 for (m = main_threads; m != NULL; m = m->link) {
339 switch (m->tso->whatNext) {
342 *(m->ret) = (StgClosure *)m->tso->sp[0];
346 pthread_cond_broadcast(&m->wakeup);
351 pthread_cond_broadcast(&m->wakeup);
359 /* If our main thread has finished or been killed, return.
362 StgMainThread *m = main_threads;
363 if (m->tso->whatNext == ThreadComplete
364 || m->tso->whatNext == ThreadKilled) {
365 main_threads = main_threads->link;
366 if (m->tso->whatNext == ThreadComplete) {
367 /* we finished successfully, fill in the return value */
368 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
379 /* Top up the run queue from our spark pool. We try to make the
380 * number of threads in the run queue equal to the number of
385 nat n = n_free_capabilities;
386 StgTSO *tso = run_queue_hd;
388 /* Count the run queue */
389 while (n > 0 && tso != END_TSO_QUEUE) {
398 break; /* no more sparks in the pool */
400 /* I'd prefer this to be done in activateSpark -- HWL */
401 /* tricky - it needs to hold the scheduler lock and
402 * not try to re-acquire it -- SDM */
404 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
405 pushClosure(tso,spark);
406 PUSH_ON_RUN_QUEUE(tso);
408 advisory_thread_count++;
412 sched_belch("turning spark of closure %p into a thread",
413 (StgClosure *)spark));
416 /* We need to wake up the other tasks if we just created some
419 if (n_free_capabilities - n > 1) {
420 pthread_cond_signal(&thread_ready_cond);
425 /* Check whether any waiting threads need to be woken up. If the
426 * run queue is empty, and there are no other tasks running, we
427 * can wait indefinitely for something to happen.
428 * ToDo: what if another client comes along & requests another
431 if (blocked_queue_hd != END_TSO_QUEUE) {
433 (run_queue_hd == END_TSO_QUEUE)
435 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
440 /* check for signals each time around the scheduler */
442 if (signals_pending()) {
443 start_signal_handlers();
447 /* Detect deadlock: when we have no threads to run, there are
448 * no threads waiting on I/O or sleeping, and all the other
449 * tasks are waiting for work, we must have a deadlock. Inform
450 * all the main threads.
453 if (blocked_queue_hd == END_TSO_QUEUE
454 && run_queue_hd == END_TSO_QUEUE
455 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
458 for (m = main_threads; m != NULL; m = m->link) {
461 pthread_cond_broadcast(&m->wakeup);
466 if (blocked_queue_hd == END_TSO_QUEUE
467 && run_queue_hd == END_TSO_QUEUE) {
468 StgMainThread *m = main_threads;
471 main_threads = m->link;
477 /* If there's a GC pending, don't do anything until it has
481 IF_DEBUG(scheduler,sched_belch("waiting for GC"));
482 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
485 /* block until we've got a thread on the run queue and a free
488 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
489 IF_DEBUG(scheduler, sched_belch("waiting for work"));
490 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
491 IF_DEBUG(scheduler, sched_belch("work now available"));
496 # error ToDo: implement GranSim scheduler
498 /* ToDo: phps merge with spark activation above */
499 /* check whether we have local work and send requests if we have none */
500 if (run_queue_hd == END_TSO_QUEUE) { /* no runnable threads */
501 /* :-[ no local threads => look out for local sparks */
502 if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
503 (pending_sparks_hd[REQUIRED_POOL] < pending_sparks_tl[REQUIRED_POOL] ||
504 pending_sparks_hd[ADVISORY_POOL] < pending_sparks_tl[ADVISORY_POOL])) {
506 * ToDo: add GC code check that we really have enough heap afterwards!!
508 * If we're here (no runnable threads) and we have pending
509 * sparks, we must have a space problem. Get enough space
510 * to turn one of those pending sparks into a
514 spark = findSpark(); /* get a spark */
515 if (spark != (rtsSpark) NULL) {
516 tso = activateSpark(spark); /* turn the spark into a thread */
517 IF_PAR_DEBUG(verbose,
518 belch("== [%x] schedule: Created TSO %p (%d); %d threads active",
519 mytid, tso, tso->id, advisory_thread_count));
521 if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
522 belch("^^ failed to activate spark");
524 } /* otherwise fall through & pick-up new tso */
526 IF_PAR_DEBUG(verbose,
527 belch("^^ no local sparks (spark pool contains only NFs: %d)",
528 spark_queue_len(ADVISORY_POOL)));
532 /* =8-[ no local sparks => look for work on other PEs */
535 * We really have absolutely no work. Send out a fish
536 * (there may be some out there already), and wait for
537 * something to arrive. We clearly can't run any threads
538 * until a SCHEDULE or RESUME arrives, and so that's what
539 * we're hoping to see. (Of course, we still have to
540 * respond to other types of messages.)
543 outstandingFishes < RtsFlags.ParFlags.maxFishes ) { // &&
544 // (last_fish_arrived_at+FISH_DELAY < CURRENT_TIME)) {
545 /* fishing set in sendFish, processFish;
546 avoid flooding system with fishes via delay */
548 sendFish(pe, mytid, NEW_FISH_AGE, NEW_FISH_HISTORY,
556 } else if (PacketsWaiting()) { /* Look for incoming messages */
560 /* Now we are sure that we have some work available */
561 ASSERT(run_queue_hd != END_TSO_QUEUE);
562 /* Take a thread from the run queue, if we have work */
563 t = take_off_run_queue(END_TSO_QUEUE);
565 /* ToDo: write something to the log-file
566 if (RTSflags.ParFlags.granSimStats && !sameThread)
567 DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
572 IF_DEBUG(scheduler, belch("--^^ %d sparks on [%#x] (hd=%x; tl=%x; lim=%x)",
573 spark_queue_len(ADVISORY_POOL), CURRENT_PROC,
574 pending_sparks_hd[ADVISORY_POOL],
575 pending_sparks_tl[ADVISORY_POOL],
576 pending_sparks_lim[ADVISORY_POOL]));
578 IF_DEBUG(scheduler, belch("--== %d threads on [%#x] (hd=%x; tl=%x)",
579 run_queue_len(), CURRENT_PROC,
580 run_queue_hd, run_queue_tl));
584 we are running a different TSO, so write a schedule event to log file
585 NB: If we use fair scheduling we also have to write a deschedule
586 event for LastTSO; with unfair scheduling we know that the
587 previous tso has blocked whenever we switch to another tso, so
588 we don't need it in GUM for now
590 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
591 GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
595 #else /* !GRAN && !PAR */
597 /* grab a thread from the run queue
606 cap = free_capabilities;
607 free_capabilities = cap->link;
608 n_free_capabilities--;
613 cap->rCurrentTSO = t;
615 /* set the context_switch flag
617 if (run_queue_hd == END_TSO_QUEUE)
622 RELEASE_LOCK(&sched_mutex);
624 IF_DEBUG(scheduler,sched_belch("running thread %d", t->id));
626 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
627 /* Run the current thread
629 switch (cap->rCurrentTSO->whatNext) {
632 /* Thread already finished, return to scheduler. */
633 ret = ThreadFinished;
636 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
639 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
641 case ThreadEnterHugs:
645 IF_DEBUG(scheduler,sched_belch("entering Hugs"));
646 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
647 cap->rCurrentTSO->sp += 1;
652 barf("Panic: entered a BCO but no bytecode interpreter in this build");
655 barf("schedule: invalid whatNext field");
657 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
659 /* Costs for the scheduler are assigned to CCS_SYSTEM */
664 ACQUIRE_LOCK(&sched_mutex);
667 IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", pthread_self()););
669 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: "););
671 t = cap->rCurrentTSO;
675 /* make all the running tasks block on a condition variable,
676 * maybe set context_switch and wait till they all pile in,
677 * then have them wait on a GC condition variable.
679 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
682 ready_to_gc = rtsTrue;
683 context_switch = 1; /* stop other threads ASAP */
684 PUSH_ON_RUN_QUEUE(t);
688 /* just adjust the stack for this thread, then pop it back
691 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
695 /* enlarge the stack */
696 StgTSO *new_t = threadStackOverflow(t);
698 /* This TSO has moved, so update any pointers to it from the
699 * main thread stack. It better not be on any other queues...
702 for (m = main_threads; m != NULL; m = m->link) {
707 ready_to_gc = rtsTrue;
709 PUSH_ON_RUN_QUEUE(new_t);
716 DumpGranEvent(GR_DESCHEDULE, t));
717 globalGranStats.tot_yields++;
720 DumpGranEvent(GR_DESCHEDULE, t));
722 /* put the thread back on the run queue. Then, if we're ready to
723 * GC, check whether this is the last task to stop. If so, wake
724 * up the GC thread. getThread will block during a GC until the
728 if (t->whatNext == ThreadEnterHugs) {
729 /* ToDo: or maybe a timer expired when we were in Hugs?
730 * or maybe someone hit ctrl-C
732 belch("thread %ld stopped to switch to Hugs", t->id);
734 belch("thread %ld stopped, yielding", t->id);
738 APPEND_TO_RUN_QUEUE(t);
743 # error ToDo: implement GranSim scheduler
746 DumpGranEvent(GR_DESCHEDULE, t));
749 /* don't need to do anything. Either the thread is blocked on
750 * I/O, in which case we'll have called addToBlockedQueue
751 * previously, or it's blocked on an MVar or Blackhole, in which
752 * case it'll be on the relevant queue already.
755 fprintf(stderr, "thread %d stopped, ", t->id);
756 printThreadBlockage(t);
757 fprintf(stderr, "\n"));
762 /* Need to check whether this was a main thread, and if so, signal
763 * the task that started it with the return value. If we have no
764 * more main threads, we probably need to stop all the tasks until
767 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
768 t->whatNext = ThreadComplete;
770 // ToDo: endThread(t, CurrentProc); // clean-up the thread
772 advisory_thread_count--;
773 if (RtsFlags.ParFlags.ParStats.Full)
774 DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
779 barf("doneThread: invalid thread return code");
783 cap->link = free_capabilities;
784 free_capabilities = cap;
785 n_free_capabilities++;
789 if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes)
794 /* everybody back, start the GC.
795 * Could do it in this thread, or signal a condition var
796 * to do it in another thread. Either way, we need to
797 * broadcast on gc_pending_cond afterward.
800 IF_DEBUG(scheduler,sched_belch("doing GC"));
802 GarbageCollect(GetRoots);
803 ready_to_gc = rtsFalse;
805 pthread_cond_broadcast(&gc_pending_cond);
810 IF_GRAN_DEBUG(unused,
811 print_eventq(EventHd));
813 event = get_next_event();
817 /* ToDo: wait for next message to arrive rather than busy wait */
822 t = take_off_run_queue(END_TSO_QUEUE);
825 } /* end of while(1) */
828 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
829 void deleteAllThreads ( void )
832 IF_DEBUG(scheduler,sched_belch("deleteAllThreads()"));
833 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
836 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
839 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
840 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
843 /* startThread and insertThread are now in GranSim.c -- HWL */
845 //@node Suspend and Resume, Run queue code, Main scheduling loop, Main scheduling code
846 //@subsection Suspend and Resume
848 /* ---------------------------------------------------------------------------
849 * Suspending & resuming Haskell threads.
851 * When making a "safe" call to C (aka _ccall_GC), the task gives back
852 * its capability before calling the C function. This allows another
853 * task to pick up the capability and carry on running Haskell
854 * threads. It also means that if the C call blocks, it won't lock
857 * The Haskell thread making the C call is put to sleep for the
858 * duration of the call, on the susepended_ccalling_threads queue. We
859 * give out a token to the task, which it can use to resume the thread
860 * on return from the C function.
861 * ------------------------------------------------------------------------- */
864 suspendThread( Capability *cap )
868 ACQUIRE_LOCK(&sched_mutex);
871 sched_belch("thread %d did a _ccall_gc\n", cap->rCurrentTSO->id));
873 threadPaused(cap->rCurrentTSO);
874 cap->rCurrentTSO->link = suspended_ccalling_threads;
875 suspended_ccalling_threads = cap->rCurrentTSO;
877 /* Use the thread ID as the token; it should be unique */
878 tok = cap->rCurrentTSO->id;
881 cap->link = free_capabilities;
882 free_capabilities = cap;
883 n_free_capabilities++;
886 RELEASE_LOCK(&sched_mutex);
891 resumeThread( StgInt tok )
896 ACQUIRE_LOCK(&sched_mutex);
898 prev = &suspended_ccalling_threads;
899 for (tso = suspended_ccalling_threads;
900 tso != END_TSO_QUEUE;
901 prev = &tso->link, tso = tso->link) {
902 if (tso->id == (StgThreadID)tok) {
907 if (tso == END_TSO_QUEUE) {
908 barf("resumeThread: thread not found");
912 while (free_capabilities == NULL) {
913 IF_DEBUG(scheduler, sched_belch("waiting to resume"));
914 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
915 IF_DEBUG(scheduler, sched_belch("resuming thread %d", tso->id));
917 cap = free_capabilities;
918 free_capabilities = cap->link;
919 n_free_capabilities--;
924 cap->rCurrentTSO = tso;
926 RELEASE_LOCK(&sched_mutex);
931 /* ---------------------------------------------------------------------------
933 * ------------------------------------------------------------------------ */
934 static void unblockThread(StgTSO *tso);
936 /* ---------------------------------------------------------------------------
937 * Comparing Thread ids.
939 * This is used from STG land in the implementation of the
940 * instances of Eq/Ord for ThreadIds.
941 * ------------------------------------------------------------------------ */
943 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
945 StgThreadID id1 = tso1->id;
946 StgThreadID id2 = tso2->id;
948 if (id1 < id2) return (-1);
949 if (id1 > id2) return 1;
953 /* ---------------------------------------------------------------------------
956 The new thread starts with the given stack size. Before the
957 scheduler can run, however, this thread needs to have a closure
958 (and possibly some arguments) pushed on its stack. See
959 pushClosure() in Schedule.h.
961 createGenThread() and createIOThread() (in SchedAPI.h) are
962 convenient packaged versions of this function.
963 ------------------------------------------------------------------------ */
964 //@cindex createThread
966 /* currently pri (priority) is only used in a GRAN setup -- HWL */
968 createThread(nat stack_size, StgInt pri)
970 return createThread_(stack_size, rtsFalse, pri);
974 createThread_(nat size, rtsBool have_lock, StgInt pri)
978 createThread(nat stack_size)
980 return createThread_(stack_size, rtsFalse);
984 createThread_(nat size, rtsBool have_lock)
990 /* First check whether we should create a thread at all */
992 /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
993 if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
995 belch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
996 RtsFlags.ParFlags.maxThreads, advisory_thread_count);
997 return END_TSO_QUEUE;
1003 ASSERT(!RtsFlags.GranFlags.Light || CurrentProc==0);
1006 // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW
1008 /* catch ridiculously small stack sizes */
1009 if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
1010 size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
1013 tso = (StgTSO *)allocate(size);
1014 TICK_ALLOC_TSO(size-sizeofW(StgTSO),0);
1016 stack_size = size - TSO_STRUCT_SIZEW;
1018 // Hmm, this CCS_MAIN is not protected by a PROFILING cpp var;
1019 SET_HDR(tso, &TSO_info, CCS_MAIN);
1021 SET_GRAN_HDR(tso, ThisPE);
1023 tso->whatNext = ThreadEnterGHC;
1025 /* tso->id needs to be unique. For now we use a heavyweight mutex to
1026 protect the increment operation on next_thread_id.
1027 In future, we could use an atomic increment instead.
1030 if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
1031 tso->id = next_thread_id++;
1032 if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
1034 tso->why_blocked = NotBlocked;
1035 tso->blocked_exceptions = NULL;
1037 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
1038 tso->stack_size = stack_size;
1039 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
1041 tso->sp = (P_)&(tso->stack) + stack_size;
1044 tso->prof.CCCS = CCS_MAIN;
1047 /* put a stop frame on the stack */
1048 tso->sp -= sizeofW(StgStopFrame);
1049 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
1050 tso->su = (StgUpdateFrame*)tso->sp;
1052 IF_DEBUG(scheduler,belch("---- Initialised TSO %ld (%p), stack size = %lx words",
1053 tso->id, tso, tso->stack_size));
1057 tso->link = END_TSO_QUEUE;
1058 /* uses more flexible routine in GranSim */
1059 insertThread(tso, CurrentProc);
1061 /* In a non-GranSim setup the pushing of a TSO onto the runq is separated
1067 tso->gran.pri = pri;
1068 tso->gran.magic = TSO_MAGIC; // debugging only
1069 tso->gran.sparkname = 0;
1070 tso->gran.startedat = CURRENT_TIME;
1071 tso->gran.exported = 0;
1072 tso->gran.basicblocks = 0;
1073 tso->gran.allocs = 0;
1074 tso->gran.exectime = 0;
1075 tso->gran.fetchtime = 0;
1076 tso->gran.fetchcount = 0;
1077 tso->gran.blocktime = 0;
1078 tso->gran.blockcount = 0;
1079 tso->gran.blockedat = 0;
1080 tso->gran.globalsparks = 0;
1081 tso->gran.localsparks = 0;
1082 if (RtsFlags.GranFlags.Light)
1083 tso->gran.clock = Now; /* local clock */
1085 tso->gran.clock = 0;
1087 IF_DEBUG(gran,printTSO(tso));
1089 tso->par.sparkname = 0;
1090 tso->par.startedat = CURRENT_TIME;
1091 tso->par.exported = 0;
1092 tso->par.basicblocks = 0;
1093 tso->par.allocs = 0;
1094 tso->par.exectime = 0;
1095 tso->par.fetchtime = 0;
1096 tso->par.fetchcount = 0;
1097 tso->par.blocktime = 0;
1098 tso->par.blockcount = 0;
1099 tso->par.blockedat = 0;
1100 tso->par.globalsparks = 0;
1101 tso->par.localsparks = 0;
1105 globalGranStats.tot_threads_created++;
1106 globalGranStats.threads_created_on_PE[CurrentProc]++;
1107 globalGranStats.tot_sq_len += spark_queue_len(CurrentProc);
1108 globalGranStats.tot_sq_probes++;
1111 IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
1112 tso->id, tso->stack_size));
1116 /* ---------------------------------------------------------------------------
1119 * scheduleThread puts a thread on the head of the runnable queue.
1120 * This will usually be done immediately after a thread is created.
1121 * The caller of scheduleThread must create the thread using e.g.
1122 * createThread and push an appropriate closure
1123 * on this thread's stack before the scheduler is invoked.
1124 * ------------------------------------------------------------------------ */
1127 scheduleThread(StgTSO *tso)
1129 ACQUIRE_LOCK(&sched_mutex);
1131 /* Put the new thread on the head of the runnable queue. The caller
1132 * better push an appropriate closure on this thread's stack
1133 * beforehand. In the SMP case, the thread may start running as
1134 * soon as we release the scheduler lock below.
1136 PUSH_ON_RUN_QUEUE(tso);
1139 IF_DEBUG(scheduler,printTSO(tso));
1140 RELEASE_LOCK(&sched_mutex);
1143 /* ---------------------------------------------------------------------------
1146 * Start up Posix threads to run each of the scheduler tasks.
1147 * I believe the task ids are not needed in the system as defined.
1149 * ------------------------------------------------------------------------ */
1153 taskStart( void *arg STG_UNUSED )
1160 /* ---------------------------------------------------------------------------
1163 * Initialise the scheduler. This resets all the queues - if the
1164 * queues contained any threads, they'll be garbage collected at the
1167 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
1168 * ------------------------------------------------------------------------ */
1172 term_handler(int sig STG_UNUSED)
1175 ACQUIRE_LOCK(&term_mutex);
1177 RELEASE_LOCK(&term_mutex);
1182 //@cindex initScheduler
1189 for (i=0; i<=MAX_PROC; i++) {
1190 run_queue_hds[i] = END_TSO_QUEUE;
1191 run_queue_tls[i] = END_TSO_QUEUE;
1192 blocked_queue_hds[i] = END_TSO_QUEUE;
1193 blocked_queue_tls[i] = END_TSO_QUEUE;
1194 ccalling_threadss[i] = END_TSO_QUEUE;
1197 run_queue_hd = END_TSO_QUEUE;
1198 run_queue_tl = END_TSO_QUEUE;
1199 blocked_queue_hd = END_TSO_QUEUE;
1200 blocked_queue_tl = END_TSO_QUEUE;
1203 suspended_ccalling_threads = END_TSO_QUEUE;
1205 main_threads = NULL;
1210 enteredCAFs = END_CAF_LIST;
1212 /* Install the SIGHUP handler */
1215 struct sigaction action,oact;
1217 action.sa_handler = term_handler;
1218 sigemptyset(&action.sa_mask);
1219 action.sa_flags = 0;
1220 if (sigaction(SIGTERM, &action, &oact) != 0) {
1221 barf("can't install TERM handler");
1227 /* Allocate N Capabilities */
1230 Capability *cap, *prev;
1233 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1234 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
1238 free_capabilities = cap;
1239 n_free_capabilities = RtsFlags.ParFlags.nNodes;
1241 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Allocated %d capabilities\n",
1242 n_free_capabilities););
1245 #if defined(SMP) || defined(PAR)
1258 /* make some space for saving all the thread ids */
1259 task_ids = stgMallocBytes(RtsFlags.ParFlags.nNodes * sizeof(task_info),
1260 "initScheduler:task_ids");
1262 /* and create all the threads */
1263 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1264 r = pthread_create(&tid,NULL,taskStart,NULL);
1266 barf("startTasks: Can't create new Posix thread");
1268 task_ids[i].id = tid;
1269 task_ids[i].mut_time = 0.0;
1270 task_ids[i].mut_etime = 0.0;
1271 task_ids[i].gc_time = 0.0;
1272 task_ids[i].gc_etime = 0.0;
1273 task_ids[i].elapsedtimestart = elapsedtime();
1274 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Started task: %ld\n",tid););
1280 exitScheduler( void )
1285 /* Don't want to use pthread_cancel, since we'd have to install
1286 * these silly exception handlers (pthread_cleanup_{push,pop}) around
1290 /* Cancel all our tasks */
1291 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1292 pthread_cancel(task_ids[i].id);
1295 /* Wait for all the tasks to terminate */
1296 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1297 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: waiting for task %ld\n",
1299 pthread_join(task_ids[i].id, NULL);
1303 /* Send 'em all a SIGHUP. That should shut 'em up.
1305 await_death = RtsFlags.ParFlags.nNodes;
1306 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
1307 pthread_kill(task_ids[i].id,SIGTERM);
1309 while (await_death > 0) {
1315 /* -----------------------------------------------------------------------------
1316 Managing the per-task allocation areas.
1318 Each capability comes with an allocation area. These are
1319 fixed-length block lists into which allocation can be done.
1321 ToDo: no support for two-space collection at the moment???
1322 -------------------------------------------------------------------------- */
1324 /* -----------------------------------------------------------------------------
1325 * waitThread is the external interface for running a new computataion
1326 * and waiting for the result.
1328 * In the non-SMP case, we create a new main thread, push it on the
1329 * main-thread stack, and invoke the scheduler to run it. The
1330 * scheduler will return when the top main thread on the stack has
1331 * completed or died, and fill in the necessary fields of the
1332 * main_thread structure.
1334 * In the SMP case, we create a main thread as before, but we then
1335 * create a new condition variable and sleep on it. When our new
1336 * main thread has completed, we'll be woken up and the status/result
1337 * will be in the main_thread struct.
1338 * -------------------------------------------------------------------------- */
1341 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
1344 SchedulerStatus stat;
1346 ACQUIRE_LOCK(&sched_mutex);
1348 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
1354 pthread_cond_init(&m->wakeup, NULL);
1357 m->link = main_threads;
1360 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: new main thread (%d)\n",
1365 pthread_cond_wait(&m->wakeup, &sched_mutex);
1366 } while (m->stat == NoStatus);
1369 ASSERT(m->stat != NoStatus);
1375 pthread_cond_destroy(&m->wakeup);
1378 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: main thread (%d) finished\n",
1382 RELEASE_LOCK(&sched_mutex);
1387 //@node Run queue code, Garbage Collextion Routines, Suspend and Resume, Main scheduling code
1388 //@subsection Run queue code
1392 NB: In GranSim we have many run queues; run_queue_hd is actually a macro
1393 unfolding to run_queue_hds[CurrentProc], thus CurrentProc is an
1394 implicit global variable that has to be correct when calling these
1398 /* Put the new thread on the head of the runnable queue.
1399 * The caller of createThread better push an appropriate closure
1400 * on this thread's stack before the scheduler is invoked.
1402 static /* inline */ void
1403 add_to_run_queue(tso)
1406 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1407 tso->link = run_queue_hd;
1409 if (run_queue_tl == END_TSO_QUEUE) {
1414 /* Put the new thread at the end of the runnable queue. */
1415 static /* inline */ void
1416 push_on_run_queue(tso)
1419 ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
1420 ASSERT(run_queue_hd!=NULL && run_queue_tl!=NULL);
1421 ASSERT(tso!=run_queue_hd && tso!=run_queue_tl);
1422 if (run_queue_hd == END_TSO_QUEUE) {
1425 run_queue_tl->link = tso;
1431 Should be inlined because it's used very often in schedule. The tso
1432 argument is actually only needed in GranSim, where we want to have the
1433 possibility to schedule *any* TSO on the run queue, irrespective of the
1434 actual ordering. Therefore, if tso is not the nil TSO then we traverse
1435 the run queue and dequeue the tso, adjusting the links in the queue.
1437 //@cindex take_off_run_queue
1438 static /* inline */ StgTSO*
1439 take_off_run_queue(StgTSO *tso) {
1443 qetlaHbogh Qu' ngaSbogh ghomDaQ {tso} yIteq!
1445 if tso is specified, unlink that tso from the run_queue (doesn't have
1446 to be at the beginning of the queue); GranSim only
1448 if (tso!=END_TSO_QUEUE) {
1449 /* find tso in queue */
1450 for (t=run_queue_hd, prev=END_TSO_QUEUE;
1451 t!=END_TSO_QUEUE && t!=tso;
1455 /* now actually dequeue the tso */
1456 if (prev!=END_TSO_QUEUE) {
1457 ASSERT(run_queue_hd!=t);
1458 prev->link = t->link;
1460 /* t is at beginning of thread queue */
1461 ASSERT(run_queue_hd==t);
1462 run_queue_hd = t->link;
1464 /* t is at end of thread queue */
1465 if (t->link==END_TSO_QUEUE) {
1466 ASSERT(t==run_queue_tl);
1467 run_queue_tl = prev;
1469 ASSERT(run_queue_tl!=t);
1471 t->link = END_TSO_QUEUE;
1473 /* take tso from the beginning of the queue; std concurrent code */
1475 if (t != END_TSO_QUEUE) {
1476 run_queue_hd = t->link;
1477 t->link = END_TSO_QUEUE;
1478 if (run_queue_hd == END_TSO_QUEUE) {
1479 run_queue_tl = END_TSO_QUEUE;
1488 //@node Garbage Collextion Routines, Blocking Queue Routines, Run queue code, Main scheduling code
1489 //@subsection Garbage Collextion Routines
1491 /* ---------------------------------------------------------------------------
1492 Where are the roots that we know about?
1494 - all the threads on the runnable queue
1495 - all the threads on the blocked queue
1496 - all the thread currently executing a _ccall_GC
1497 - all the "main threads"
1499 ------------------------------------------------------------------------ */
1501 /* This has to be protected either by the scheduler monitor, or by the
1502 garbage collection monitor (probably the latter).
1506 static void GetRoots(void)
1513 for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
1514 if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
1515 run_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_hds[i]);
1516 if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
1517 run_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_tls[i]);
1519 if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
1520 blocked_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hds[i]);
1521 if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
1522 blocked_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tls[i]);
1523 if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
1524 ccalling_threadss[i] = (StgTSO *)MarkRoot((StgClosure *)ccalling_threadss[i]);
1531 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1532 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1534 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1535 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1538 for (m = main_threads; m != NULL; m = m->link) {
1539 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1541 suspended_ccalling_threads =
1542 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1544 #if defined(SMP) || defined(PAR) || defined(GRAN)
1549 /* -----------------------------------------------------------------------------
1552 This is the interface to the garbage collector from Haskell land.
1553 We provide this so that external C code can allocate and garbage
1554 collect when called from Haskell via _ccall_GC.
1556 It might be useful to provide an interface whereby the programmer
1557 can specify more roots (ToDo).
1559 This needs to be protected by the GC condition variable above. KH.
1560 -------------------------------------------------------------------------- */
1562 void (*extra_roots)(void);
1567 GarbageCollect(GetRoots);
1573 GetRoots(); /* the scheduler's roots */
1574 extra_roots(); /* the user's roots */
1578 performGCWithRoots(void (*get_roots)(void))
1580 extra_roots = get_roots;
1582 GarbageCollect(AllRoots);
1585 /* -----------------------------------------------------------------------------
1588 If the thread has reached its maximum stack size, then raise the
1589 StackOverflow exception in the offending thread. Otherwise
1590 relocate the TSO into a larger chunk of memory and adjust its stack
1592 -------------------------------------------------------------------------- */
1595 threadStackOverflow(StgTSO *tso)
1597 nat new_stack_size, new_tso_size, diff, stack_words;
1601 if (tso->stack_size >= tso->max_stack_size) {
1603 /* If we're debugging, just print out the top of the stack */
1604 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1608 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1611 /* Send this thread the StackOverflow exception */
1612 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1617 /* Try to double the current stack size. If that takes us over the
1618 * maximum stack size for this thread, then use the maximum instead.
1619 * Finally round up so the TSO ends up as a whole number of blocks.
1621 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1622 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1623 TSO_STRUCT_SIZE)/sizeof(W_);
1624 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1625 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1627 IF_DEBUG(scheduler, fprintf(stderr,"scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1629 dest = (StgTSO *)allocate(new_tso_size);
1630 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1632 /* copy the TSO block and the old stack into the new area */
1633 memcpy(dest,tso,TSO_STRUCT_SIZE);
1634 stack_words = tso->stack + tso->stack_size - tso->sp;
1635 new_sp = (P_)dest + new_tso_size - stack_words;
1636 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1638 /* relocate the stack pointers... */
1639 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1640 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1642 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1643 dest->stack_size = new_stack_size;
1645 /* and relocate the update frame list */
1646 relocate_TSO(tso, dest);
1648 /* Mark the old TSO as relocated. We have to check for relocated
1649 * TSOs in the garbage collector and any primops that deal with TSOs.
1651 * It's important to set the sp and su values to just beyond the end
1652 * of the stack, so we don't attempt to scavenge any part of the
1655 tso->whatNext = ThreadRelocated;
1657 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1658 tso->su = (StgUpdateFrame *)tso->sp;
1659 tso->why_blocked = NotBlocked;
1660 dest->mut_link = NULL;
1662 IF_DEBUG(sanity,checkTSO(tso));
1664 IF_DEBUG(scheduler,printTSO(dest));
1670 //@node Blocking Queue Routines, Exception Handling Routines, Garbage Collextion Routines, Main scheduling code
1671 //@subsection Blocking Queue Routines
1673 /* ---------------------------------------------------------------------------
1674 Wake up a queue that was blocked on some resource.
1675 ------------------------------------------------------------------------ */
1677 /* ToDo: check push_on_run_queue vs. PUSH_ON_RUN_QUEUE */
1681 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
1686 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
1688 /* write RESUME events to log file and
1689 update blocked and fetch time (depending on type of the orig closure) */
1690 if (RtsFlags.ParFlags.ParStats.Full) {
1691 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
1692 GR_RESUMEQ, ((StgTSO *)bqe), ((StgTSO *)bqe)->block_info.closure,
1693 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
1695 switch (get_itbl(node)->type) {
1697 ((StgTSO *)bqe)->par.fetchtime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
1702 ((StgTSO *)bqe)->par.blocktime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
1705 barf("{unblockOneLocked}Daq Qagh: unexpected closure in blocking queue");
1712 static StgBlockingQueueElement *
1713 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
1715 StgBlockingQueueElement *next;
1716 PEs node_loc, tso_loc;
1718 node_loc = where_is(node); // should be lifted out of loop
1719 tso = (StgTSO *)bqe; // wastes an assignment to get the type right
1720 tso_loc = where_is(tso);
1721 if (IS_LOCAL_TO(PROCS(node),tso_loc)) { // TSO is local
1722 /* !fake_fetch => TSO is on CurrentProc is same as IS_LOCAL_TO */
1723 ASSERT(CurrentProc!=node_loc || tso_loc==CurrentProc);
1724 bq_processing_time += RtsFlags.GranFlags.Costs.lunblocktime;
1725 // insertThread(tso, node_loc);
1726 new_event(tso_loc, tso_loc,
1727 CurrentTime[CurrentProc]+bq_processing_time,
1729 tso, node, (rtsSpark*)NULL);
1730 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
1733 } else { // TSO is remote (actually should be FMBQ)
1734 bq_processing_time += RtsFlags.GranFlags.Costs.mpacktime;
1735 bq_processing_time += RtsFlags.GranFlags.Costs.gunblocktime;
1736 new_event(tso_loc, CurrentProc,
1737 CurrentTime[CurrentProc]+bq_processing_time+
1738 RtsFlags.GranFlags.Costs.latency,
1740 tso, node, (rtsSpark*)NULL);
1741 tso->link = END_TSO_QUEUE; // overwrite link just to be sure
1742 bq_processing_time += RtsFlags.GranFlags.Costs.mtidytime;
1745 /* the thread-queue-overhead is accounted for in either Resume or UnblockThread */
1747 fprintf(stderr," %s TSO %d (%p) [PE %d] (blocked_on=%p) (next=%p) ,",
1748 (node_loc==tso_loc ? "Local" : "Global"),
1749 tso->id, tso, CurrentProc, tso->blocked_on, tso->link))
1750 tso->blocked_on = NULL;
1751 IF_DEBUG(scheduler,belch("-- Waking up thread %ld (%p)",
1755 /* if this is the BQ of an RBH, we have to put back the info ripped out of
1756 the closure to make room for the anchor of the BQ */
1757 if (next!=END_BQ_QUEUE) {
1758 ASSERT(get_itbl(node)->type == RBH && get_itbl(next)->type == CONSTR);
1760 ASSERT((info_ptr==&RBH_Save_0_info) ||
1761 (info_ptr==&RBH_Save_1_info) ||
1762 (info_ptr==&RBH_Save_2_info));
1764 /* cf. convertToRBH in RBH.c for writing the RBHSave closure */
1765 ((StgRBH *)node)->blocking_queue = ((StgRBHSave *)next)->payload[0];
1766 ((StgRBH *)node)->mut_link = ((StgRBHSave *)next)->payload[1];
1769 belch("## Filled in RBH_Save for %p (%s) at end of AwBQ",
1770 node, info_type(node)));
1774 static StgBlockingQueueElement *
1775 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
1777 StgBlockingQueueElement *next;
1779 switch (get_itbl(bqe)->type) {
1781 ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked);
1782 /* if it's a TSO just push it onto the run_queue */
1784 // ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
1785 PUSH_ON_RUN_QUEUE((StgTSO *)bqe);
1787 unblockCount(bqe, node);
1788 /* reset blocking status after dumping event */
1789 ((StgTSO *)bqe)->why_blocked = NotBlocked;
1793 /* if it's a BLOCKED_FETCH put it on the PendingFetches list */
1795 bqe->link = PendingFetches;
1796 PendingFetches = bqe;
1800 /* can ignore this case in a non-debugging setup;
1801 see comments on RBHSave closures above */
1803 /* check that the closure is an RBHSave closure */
1804 ASSERT(get_itbl((StgClosure *)bqe) == &RBH_Save_0_info ||
1805 get_itbl((StgClosure *)bqe) == &RBH_Save_1_info ||
1806 get_itbl((StgClosure *)bqe) == &RBH_Save_2_info);
1810 barf("{unblockOneLocked}Daq Qagh: Unexpected IP (%#lx; %s) in blocking queue at %#lx\n",
1811 get_itbl((StgClosure *)bqe), info_type((StgClosure *)bqe),
1815 // IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
1819 #else /* !GRAN && !PAR */
1821 unblockOneLocked(StgTSO *tso)
1825 ASSERT(get_itbl(tso)->type == TSO);
1826 ASSERT(tso->why_blocked != NotBlocked);
1827 tso->why_blocked = NotBlocked;
1829 PUSH_ON_RUN_QUEUE(tso);
1831 IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
1838 unblockOne(StgTSO *tso, StgClosure *node)
1840 ACQUIRE_LOCK(&sched_mutex);
1841 tso = unblockOneLocked(tso, node);
1842 RELEASE_LOCK(&sched_mutex);
1847 unblockOne(StgTSO *tso, StgClosure *node)
1849 ACQUIRE_LOCK(&sched_mutex);
1850 tso = unblockOneLocked(tso, node);
1851 RELEASE_LOCK(&sched_mutex);
1856 unblockOne(StgTSO *tso)
1858 ACQUIRE_LOCK(&sched_mutex);
1859 tso = unblockOneLocked(tso);
1860 RELEASE_LOCK(&sched_mutex);
1867 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
1869 StgBlockingQueueElement *bqe, *next;
1871 PEs node_loc, tso_loc;
1872 rtsTime bq_processing_time = 0;
1873 nat len = 0, len_local = 0;
1876 belch("## AwBQ for node %p on PE %d @ %ld by TSO %d (%p): ", \
1877 node, CurrentProc, CurrentTime[CurrentProc],
1878 CurrentTSO->id, CurrentTSO));
1880 node_loc = where_is(node);
1882 ASSERT(get_itbl(q)->type == TSO || // q is either a TSO or an RBHSave
1883 get_itbl(q)->type == CONSTR); // closure (type constructor)
1884 ASSERT(is_unique(node));
1886 /* FAKE FETCH: magically copy the node to the tso's proc;
1887 no Fetch necessary because in reality the node should not have been
1888 moved to the other PE in the first place
1890 if (CurrentProc!=node_loc) {
1892 belch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)",
1893 node, node_loc, CurrentProc, CurrentTSO->id,
1894 // CurrentTSO, where_is(CurrentTSO),
1895 node->header.gran.procs));
1896 node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc);
1898 belch("## new bitmask of node %p is %#x",
1899 node, node->header.gran.procs));
1900 if (RtsFlags.GranFlags.GranSimStats.Global) {
1901 globalGranStats.tot_fake_fetches++;
1906 // ToDo: check: ASSERT(CurrentProc==node_loc);
1907 while (get_itbl(bqe)->type==TSO) { // q != END_TSO_QUEUE) {
1910 bqe points to the current element in the queue
1911 next points to the next element in the queue
1913 //tso = (StgTSO *)bqe; // wastes an assignment to get the type right
1914 //tso_loc = where_is(tso);
1915 bqe = unblockOneLocked(bqe, node);
1918 /* statistics gathering */
1919 /* ToDo: fix counters
1920 if (RtsFlags.GranFlags.GranSimStats.Global) {
1921 globalGranStats.tot_bq_processing_time += bq_processing_time;
1922 globalGranStats.tot_bq_len += len; // total length of all bqs awakened
1923 globalGranStats.tot_bq_len_local += len_local; // same for local TSOs only
1924 globalGranStats.tot_awbq++; // total no. of bqs awakened
1927 fprintf(stderr,"## BQ Stats of %p: [%d entries, %d local] %s\n",
1928 node, len, len_local, (next!=END_TSO_QUEUE) ? "RBH" : ""));
1933 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
1935 StgBlockingQueueElement *bqe, *next;
1937 ACQUIRE_LOCK(&sched_mutex);
1939 IF_PAR_DEBUG(verbose,
1940 belch("## AwBQ for node %p on [%x]: ",
1943 ASSERT(get_itbl(q)->type == TSO ||
1944 get_itbl(q)->type == BLOCKED_FETCH ||
1945 get_itbl(q)->type == CONSTR);
1948 while (get_itbl(bqe)->type==TSO ||
1949 get_itbl(bqe)->type==BLOCKED_FETCH) {
1950 bqe = unblockOneLocked(bqe, node);
1952 RELEASE_LOCK(&sched_mutex);
1955 #else /* !GRAN && !PAR */
1957 awakenBlockedQueue(StgTSO *tso)
1959 ACQUIRE_LOCK(&sched_mutex);
1960 while (tso != END_TSO_QUEUE) {
1961 tso = unblockOneLocked(tso);
1963 RELEASE_LOCK(&sched_mutex);
1967 //@node Exception Handling Routines, Debugging Routines, Blocking Queue Routines, Main scheduling code
1968 //@subsection Exception Handling Routines
1970 /* ---------------------------------------------------------------------------
1972 - usually called inside a signal handler so it mustn't do anything fancy.
1973 ------------------------------------------------------------------------ */
1976 interruptStgRts(void)
1982 /* -----------------------------------------------------------------------------
1985 This is for use when we raise an exception in another thread, which
1987 This has nothing to do with the UnblockThread event in GranSim. -- HWL
1988 -------------------------------------------------------------------------- */
1991 unblockThread(StgTSO *tso)
1995 ACQUIRE_LOCK(&sched_mutex);
1996 switch (tso->why_blocked) {
1999 return; /* not blocked */
2002 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
2004 StgTSO *last_tso = END_TSO_QUEUE;
2005 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
2008 for (t = mvar->head; t != END_TSO_QUEUE;
2009 last = &t->link, last_tso = t, t = t->link) {
2012 if (mvar->tail == tso) {
2013 mvar->tail = last_tso;
2018 barf("unblockThread (MVAR): TSO not found");
2021 case BlockedOnBlackHole:
2022 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
2024 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
2026 last = &bq->blocking_queue;
2027 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
2028 last = &t->link, t = t->link) {
2034 barf("unblockThread (BLACKHOLE): TSO not found");
2037 case BlockedOnException:
2039 StgTSO *target = tso->block_info.tso;
2041 ASSERT(get_itbl(target)->type == TSO);
2042 ASSERT(target->blocked_exceptions != NULL);
2044 last = &target->blocked_exceptions;
2045 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
2046 last = &t->link, t = t->link) {
2047 ASSERT(get_itbl(t)->type == TSO);
2053 barf("unblockThread (Exception): TSO not found");
2056 case BlockedOnDelay:
2058 case BlockedOnWrite:
2060 StgTSO *prev = NULL;
2061 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
2062 prev = t, t = t->link) {
2065 blocked_queue_hd = t->link;
2066 if (blocked_queue_tl == t) {
2067 blocked_queue_tl = END_TSO_QUEUE;
2070 prev->link = t->link;
2071 if (blocked_queue_tl == t) {
2072 blocked_queue_tl = prev;
2078 barf("unblockThread (I/O): TSO not found");
2082 barf("unblockThread");
2086 tso->link = END_TSO_QUEUE;
2087 tso->why_blocked = NotBlocked;
2088 tso->block_info.closure = NULL;
2089 PUSH_ON_RUN_QUEUE(tso);
2090 RELEASE_LOCK(&sched_mutex);
2093 /* -----------------------------------------------------------------------------
2096 * The following function implements the magic for raising an
2097 * asynchronous exception in an existing thread.
2099 * We first remove the thread from any queue on which it might be
2100 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
2102 * We strip the stack down to the innermost CATCH_FRAME, building
2103 * thunks in the heap for all the active computations, so they can
2104 * be restarted if necessary. When we reach a CATCH_FRAME, we build
2105 * an application of the handler to the exception, and push it on
2106 * the top of the stack.
2108 * How exactly do we save all the active computations? We create an
2109 * AP_UPD for every UpdateFrame on the stack. Entering one of these
2110 * AP_UPDs pushes everything from the corresponding update frame
2111 * upwards onto the stack. (Actually, it pushes everything up to the
2112 * next update frame plus a pointer to the next AP_UPD object.
2113 * Entering the next AP_UPD object pushes more onto the stack until we
2114 * reach the last AP_UPD object - at which point the stack should look
2115 * exactly as it did when we killed the TSO and we can continue
2116 * execution by entering the closure on top of the stack.
2118 * We can also kill a thread entirely - this happens if either (a) the
2119 * exception passed to raiseAsync is NULL, or (b) there's no
2120 * CATCH_FRAME on the stack. In either case, we strip the entire
2121 * stack and replace the thread with a zombie.
2123 * -------------------------------------------------------------------------- */
2126 deleteThread(StgTSO *tso)
2128 raiseAsync(tso,NULL);
2132 raiseAsync(StgTSO *tso, StgClosure *exception)
2134 StgUpdateFrame* su = tso->su;
2135 StgPtr sp = tso->sp;
2137 /* Thread already dead? */
2138 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
2142 IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
2144 /* Remove it from any blocking queues */
2147 /* The stack freezing code assumes there's a closure pointer on
2148 * the top of the stack. This isn't always the case with compiled
2149 * code, so we have to push a dummy closure on the top which just
2150 * returns to the next return address on the stack.
2152 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
2153 *(--sp) = (W_)&dummy_ret_closure;
2157 int words = ((P_)su - (P_)sp) - 1;
2161 /* If we find a CATCH_FRAME, and we've got an exception to raise,
2162 * then build PAP(handler,exception), and leave it on top of
2163 * the stack ready to enter.
2165 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
2166 StgCatchFrame *cf = (StgCatchFrame *)su;
2167 /* we've got an exception to raise, so let's pass it to the
2168 * handler in this frame.
2170 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
2171 TICK_ALLOC_UPD_PAP(2,0);
2172 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
2175 ap->fun = cf->handler;
2176 ap->payload[0] = (P_)exception;
2178 /* sp currently points to the word above the CATCH_FRAME on the stack.
2180 sp += sizeofW(StgCatchFrame);
2183 /* Restore the blocked/unblocked state for asynchronous exceptions
2184 * at the CATCH_FRAME.
2186 * If exceptions were unblocked at the catch, arrange that they
2187 * are unblocked again after executing the handler by pushing an
2188 * unblockAsyncExceptions_ret stack frame.
2190 if (!cf->exceptions_blocked) {
2191 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
2194 /* Ensure that async exceptions are blocked when running the handler.
2196 if (tso->blocked_exceptions == NULL) {
2197 tso->blocked_exceptions = END_TSO_QUEUE;
2200 /* Put the newly-built PAP on top of the stack, ready to execute
2201 * when the thread restarts.
2205 tso->whatNext = ThreadEnterGHC;
2209 /* First build an AP_UPD consisting of the stack chunk above the
2210 * current update frame, with the top word on the stack as the
2213 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
2218 ap->fun = (StgClosure *)sp[0];
2220 for(i=0; i < (nat)words; ++i) {
2221 ap->payload[i] = (P_)*sp++;
2224 switch (get_itbl(su)->type) {
2228 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
2229 TICK_ALLOC_UP_THK(words+1,0);
2232 fprintf(stderr, "scheduler: Updating ");
2233 printPtr((P_)su->updatee);
2234 fprintf(stderr, " with ");
2235 printObj((StgClosure *)ap);
2238 /* Replace the updatee with an indirection - happily
2239 * this will also wake up any threads currently
2240 * waiting on the result.
2242 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
2244 sp += sizeofW(StgUpdateFrame) -1;
2245 sp[0] = (W_)ap; /* push onto stack */
2251 StgCatchFrame *cf = (StgCatchFrame *)su;
2254 /* We want a PAP, not an AP_UPD. Fortunately, the
2255 * layout's the same.
2257 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2258 TICK_ALLOC_UPD_PAP(words+1,0);
2260 /* now build o = FUN(catch,ap,handler) */
2261 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
2262 TICK_ALLOC_FUN(2,0);
2263 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
2264 o->payload[0] = (StgClosure *)ap;
2265 o->payload[1] = cf->handler;
2268 fprintf(stderr, "scheduler: Built ");
2269 printObj((StgClosure *)o);
2272 /* pop the old handler and put o on the stack */
2274 sp += sizeofW(StgCatchFrame) - 1;
2281 StgSeqFrame *sf = (StgSeqFrame *)su;
2284 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
2285 TICK_ALLOC_UPD_PAP(words+1,0);
2287 /* now build o = FUN(seq,ap) */
2288 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
2289 TICK_ALLOC_SE_THK(1,0);
2290 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
2291 payloadCPtr(o,0) = (StgClosure *)ap;
2294 fprintf(stderr, "scheduler: Built ");
2295 printObj((StgClosure *)o);
2298 /* pop the old handler and put o on the stack */
2300 sp += sizeofW(StgSeqFrame) - 1;
2306 /* We've stripped the entire stack, the thread is now dead. */
2307 sp += sizeofW(StgStopFrame) - 1;
2308 sp[0] = (W_)exception; /* save the exception */
2309 tso->whatNext = ThreadKilled;
2310 tso->su = (StgUpdateFrame *)(sp+1);
2321 //@node Debugging Routines, Index, Exception Handling Routines, Main scheduling code
2322 //@subsection Debugging Routines
2324 /* -----------------------------------------------------------------------------
2325 Debugging: why is a thread blocked
2326 -------------------------------------------------------------------------- */
2330 void printThreadBlockage(StgTSO *tso)
2332 switch (tso->why_blocked) {
2334 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
2336 case BlockedOnWrite:
2337 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
2339 case BlockedOnDelay:
2340 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
2343 fprintf(stderr,"blocked on an MVar");
2345 case BlockedOnException:
2346 fprintf(stderr,"blocked on delivering an exception to thread %d",
2347 tso->block_info.tso->id);
2349 case BlockedOnBlackHole:
2350 fprintf(stderr,"blocked on a black hole");
2353 fprintf(stderr,"not blocked");
2357 fprintf(stderr,"blocked on global address");
2364 Print a whole blocking queue attached to node (debugging only).
2369 print_bq (StgClosure *node)
2371 StgBlockingQueueElement *bqe;
2375 fprintf(stderr,"## BQ of closure %p (%s): ",
2376 node, info_type(node));
2378 /* should cover all closures that may have a blocking queue */
2379 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
2380 get_itbl(node)->type == FETCH_ME_BQ ||
2381 get_itbl(node)->type == RBH);
2383 ASSERT(node!=(StgClosure*)NULL); // sanity check
2385 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
2387 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
2388 !end; // iterate until bqe points to a CONSTR
2389 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
2390 ASSERT(bqe != END_BQ_QUEUE); // sanity check
2391 ASSERT(bqe != (StgTSO*)NULL); // sanity check
2392 /* types of closures that may appear in a blocking queue */
2393 ASSERT(get_itbl(bqe)->type == TSO ||
2394 get_itbl(bqe)->type == BLOCKED_FETCH ||
2395 get_itbl(bqe)->type == CONSTR);
2396 /* only BQs of an RBH end with an RBH_Save closure */
2397 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
2399 switch (get_itbl(bqe)->type) {
2401 fprintf(stderr," TSO %d (%x),",
2402 ((StgTSO *)bqe)->id, ((StgTSO *)bqe));
2405 fprintf(stderr," BF (node=%p, ga=((%x, %d, %x)),",
2406 ((StgBlockedFetch *)bqe)->node,
2407 ((StgBlockedFetch *)bqe)->ga.payload.gc.gtid,
2408 ((StgBlockedFetch *)bqe)->ga.payload.gc.slot,
2409 ((StgBlockedFetch *)bqe)->ga.weight);
2412 fprintf(stderr," %s (IP %p),",
2413 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
2414 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
2415 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
2416 "RBH_Save_?"), get_itbl(bqe));
2419 barf("Unexpected closure type %s in blocking queue of %p (%s)",
2420 info_type(bqe), node, info_type(node));
2424 fputc('\n', stderr);
2426 # elif defined(GRAN)
2428 print_bq (StgClosure *node)
2430 StgBlockingQueueElement *bqe;
2432 PEs node_loc, tso_loc;
2435 /* should cover all closures that may have a blocking queue */
2436 ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
2437 get_itbl(node)->type == FETCH_ME_BQ ||
2438 get_itbl(node)->type == RBH);
2440 ASSERT(node!=(StgClosure*)NULL); // sanity check
2441 node_loc = where_is(node);
2443 fprintf(stderr,"## BQ of closure %p (%s) on [PE %d]: ",
2444 node, info_type(node), node_loc);
2447 NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
2449 for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
2450 !end; // iterate until bqe points to a CONSTR
2451 end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
2452 ASSERT(bqe != END_BQ_QUEUE); // sanity check
2453 ASSERT(bqe != (StgTSO*)NULL); // sanity check
2454 /* types of closures that may appear in a blocking queue */
2455 ASSERT(get_itbl(bqe)->type == TSO ||
2456 get_itbl(bqe)->type == CONSTR);
2457 /* only BQs of an RBH end with an RBH_Save closure */
2458 ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
2460 tso_loc = where_is((StgClosure *)bqe);
2461 switch (get_itbl(bqe)->type) {
2463 fprintf(stderr," TSO %d (%x) on [PE %d],",
2464 ((StgTSO *)bqe)->id, ((StgTSO *)bqe), tso_loc);
2467 fprintf(stderr," %s (IP %p),",
2468 (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" :
2469 get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" :
2470 get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" :
2471 "RBH_Save_?"), get_itbl(bqe));
2474 barf("Unexpected closure type %s in blocking queue of %p (%s)",
2475 info_type(bqe), node, info_type(node));
2479 fputc('\n', stderr);
2483 Nice and easy: only TSOs on the blocking queue
2486 print_bq (StgClosure *node)
2490 ASSERT(node!=(StgClosure*)NULL); // sanity check
2491 for (tso = ((StgBlockingQueue*)node)->blocking_queue;
2492 tso != END_TSO_QUEUE;
2494 ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
2495 ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
2496 fprintf(stderr," TSO %d (%p),", tso->id, tso);
2498 fputc('\n', stderr);
2509 for (i=0, tso=run_queue_hd;
2510 tso != END_TSO_QUEUE;
2519 sched_belch(char *s, ...)
2524 fprintf(stderr, "scheduler (task %ld): ", pthread_self());
2526 fprintf(stderr, "scheduler: ");
2528 vfprintf(stderr, s, ap);
2529 fprintf(stderr, "\n");
2534 //@node Index, , Debugging Routines, Main scheduling code
2538 //* MainRegTable:: @cindex\s-+MainRegTable
2539 //* StgMainThread:: @cindex\s-+StgMainThread
2540 //* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue
2541 //* blocked_queue_hd:: @cindex\s-+blocked_queue_hd
2542 //* blocked_queue_tl:: @cindex\s-+blocked_queue_tl
2543 //* context_switch:: @cindex\s-+context_switch
2544 //* createThread:: @cindex\s-+createThread
2545 //* free_capabilities:: @cindex\s-+free_capabilities
2546 //* gc_pending_cond:: @cindex\s-+gc_pending_cond
2547 //* initScheduler:: @cindex\s-+initScheduler
2548 //* interrupted:: @cindex\s-+interrupted
2549 //* n_free_capabilities:: @cindex\s-+n_free_capabilities
2550 //* next_thread_id:: @cindex\s-+next_thread_id
2551 //* print_bq:: @cindex\s-+print_bq
2552 //* run_queue_hd:: @cindex\s-+run_queue_hd
2553 //* run_queue_tl:: @cindex\s-+run_queue_tl
2554 //* sched_mutex:: @cindex\s-+sched_mutex
2555 //* schedule:: @cindex\s-+schedule
2556 //* take_off_run_queue:: @cindex\s-+take_off_run_queue
2557 //* task_ids:: @cindex\s-+task_ids
2558 //* term_mutex:: @cindex\s-+term_mutex
2559 //* thread_ready_cond:: @cindex\s-+thread_ready_cond