1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.39 2000/01/12 15:15:17 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
43 #include "Exception.h"
47 #include "Profiling.h"
56 * These are the threads which clients have requested that we run.
58 * In an SMP build, we might have several concurrent clients all
59 * waiting for results, and each one will wait on a condition variable
60 * until the result is available.
62 * In non-SMP, clients are strictly nested: the first client calls
63 * into the RTS, which might call out again to C with a _ccall_GC, and
64 * eventually re-enter the RTS.
66 * Main threads information is kept in a linked list:
68 typedef struct StgMainThread_ {
73 pthread_cond_t wakeup;
75 struct StgMainThread_ *link;
79 * Locks required: sched_mutex.
81 static StgMainThread *main_threads;
84 * Locks required: sched_mutex.
86 StgTSO *run_queue_hd, *run_queue_tl;
87 StgTSO *blocked_queue_hd, *blocked_queue_tl;
89 /* Threads suspended in _ccall_GC.
90 * Locks required: sched_mutex.
92 static StgTSO *suspended_ccalling_threads;
94 static void GetRoots(void);
95 static StgTSO *threadStackOverflow(StgTSO *tso);
97 /* KH: The following two flags are shared memory locations. There is no need
98 to lock them, since they are only unset at the end of a scheduler
102 /* flag set by signal handler to precipitate a context switch */
105 /* if this flag is set as well, give up execution */
108 /* Next thread ID to allocate.
109 * Locks required: sched_mutex
111 StgThreadID next_thread_id = 1;
114 * Pointers to the state of the current thread.
115 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
116 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
119 /* The smallest stack size that makes any sense is:
120 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
121 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
122 * + 1 (the realworld token for an IO thread)
123 * + 1 (the closure to enter)
125 * A thread with this stack will bomb immediately with a stack
126 * overflow, which will increase its stack size.
129 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
131 /* Free capability list.
132 * Locks required: sched_mutex.
135 Capability *free_capabilities; /* Available capabilities for running threads */
136 nat n_free_capabilities; /* total number of available capabilities */
138 Capability MainRegTable; /* for non-SMP, we have one global capability */
143 /* All our current task ids, saved in case we need to kill them later.
149 void addToBlockedQueue ( StgTSO *tso );
151 static void schedule ( void );
152 void interruptStgRts ( void );
153 static StgTSO * createThread_ ( nat size, rtsBool have_lock );
156 static void sched_belch(char *s, ...);
160 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
161 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
162 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
163 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
168 /* -----------------------------------------------------------------------------
169 Main scheduling loop.
171 We use round-robin scheduling, each thread returning to the
172 scheduler loop when one of these conditions is detected:
175 * timer expires (thread yields)
180 Locking notes: we acquire the scheduler lock once at the beginning
181 of the scheduler loop, and release it when
183 * running a thread, or
184 * waiting for work, or
185 * waiting for a GC to complete.
187 -------------------------------------------------------------------------- */
194 StgThreadReturnCode ret;
196 ACQUIRE_LOCK(&sched_mutex);
200 /* If we're interrupted (the user pressed ^C, or some other
201 * termination condition occurred), kill all the currently running
205 IF_DEBUG(scheduler, sched_belch("interrupted"));
206 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
209 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
212 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
213 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
216 /* Go through the list of main threads and wake up any
217 * clients whose computations have finished. ToDo: this
218 * should be done more efficiently without a linear scan
219 * of the main threads list, somehow...
223 StgMainThread *m, **prev;
224 prev = &main_threads;
225 for (m = main_threads; m != NULL; m = m->link) {
226 switch (m->tso->whatNext) {
229 *(m->ret) = (StgClosure *)m->tso->sp[0];
233 pthread_cond_broadcast(&m->wakeup);
238 pthread_cond_broadcast(&m->wakeup);
246 /* If our main thread has finished or been killed, return.
249 StgMainThread *m = main_threads;
250 if (m->tso->whatNext == ThreadComplete
251 || m->tso->whatNext == ThreadKilled) {
252 main_threads = main_threads->link;
253 if (m->tso->whatNext == ThreadComplete) {
254 /* we finished successfully, fill in the return value */
255 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
266 /* Top up the run queue from our spark pool. We try to make the
267 * number of threads in the run queue equal to the number of
270 #if defined(SMP) || defined(PAR)
272 nat n = n_free_capabilities;
273 StgTSO *tso = run_queue_hd;
275 /* Count the run queue */
276 while (n > 0 && tso != END_TSO_QUEUE) {
285 break; /* no more sparks in the pool */
288 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
289 pushClosure(tso,spark);
290 PUSH_ON_RUN_QUEUE(tso);
292 advisory_thread_count++;
296 sched_belch("turning spark of closure %p into a thread",
297 (StgClosure *)spark));
300 /* We need to wake up the other tasks if we just created some
303 if (n_free_capabilities - n > 1) {
304 pthread_cond_signal(&thread_ready_cond);
307 #endif /* SMP || PAR */
309 /* Check whether any waiting threads need to be woken up. If the
310 * run queue is empty, and there are no other tasks running, we
311 * can wait indefinitely for something to happen.
312 * ToDo: what if another client comes along & requests another
315 if (blocked_queue_hd != END_TSO_QUEUE) {
317 (run_queue_hd == END_TSO_QUEUE)
319 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
324 /* check for signals each time around the scheduler */
326 if (signals_pending()) {
327 start_signal_handlers();
331 /* Detect deadlock: when we have no threads to run, there are
332 * no threads waiting on I/O or sleeping, and all the other
333 * tasks are waiting for work, we must have a deadlock. Inform
334 * all the main threads.
337 if (blocked_queue_hd == END_TSO_QUEUE
338 && run_queue_hd == END_TSO_QUEUE
339 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
342 for (m = main_threads; m != NULL; m = m->link) {
345 pthread_cond_broadcast(&m->wakeup);
350 if (blocked_queue_hd == END_TSO_QUEUE
351 && run_queue_hd == END_TSO_QUEUE) {
352 StgMainThread *m = main_threads;
355 main_threads = m->link;
361 /* If there's a GC pending, don't do anything until it has
365 IF_DEBUG(scheduler,sched_belch("waiting for GC"));
366 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
369 /* block until we've got a thread on the run queue and a free
372 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
373 IF_DEBUG(scheduler, sched_belch("waiting for work"));
374 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
375 IF_DEBUG(scheduler, sched_belch("work now available"));
379 /* grab a thread from the run queue
386 cap = free_capabilities;
387 free_capabilities = cap->link;
388 n_free_capabilities--;
393 cap->rCurrentTSO = t;
395 /* set the context_switch flag
397 if (run_queue_hd == END_TSO_QUEUE)
402 RELEASE_LOCK(&sched_mutex);
404 IF_DEBUG(scheduler,sched_belch("running thread %d", t->id));
406 /* Run the current thread
408 switch (cap->rCurrentTSO->whatNext) {
411 /* Thread already finished, return to scheduler. */
412 ret = ThreadFinished;
415 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
418 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
420 case ThreadEnterHugs:
424 IF_DEBUG(scheduler,sched_belch("entering Hugs"));
425 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
426 cap->rCurrentTSO->sp += 1;
431 barf("Panic: entered a BCO but no bytecode interpreter in this build");
434 barf("schedule: invalid whatNext field");
437 /* Costs for the scheduler are assigned to CCS_SYSTEM */
442 ACQUIRE_LOCK(&sched_mutex);
445 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
447 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
449 t = cap->rCurrentTSO;
453 /* make all the running tasks block on a condition variable,
454 * maybe set context_switch and wait till they all pile in,
455 * then have them wait on a GC condition variable.
457 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
460 ready_to_gc = rtsTrue;
461 context_switch = 1; /* stop other threads ASAP */
462 PUSH_ON_RUN_QUEUE(t);
466 /* just adjust the stack for this thread, then pop it back
469 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
473 /* enlarge the stack */
474 StgTSO *new_t = threadStackOverflow(t);
476 /* This TSO has moved, so update any pointers to it from the
477 * main thread stack. It better not be on any other queues...
480 for (m = main_threads; m != NULL; m = m->link) {
485 PUSH_ON_RUN_QUEUE(new_t);
490 /* put the thread back on the run queue. Then, if we're ready to
491 * GC, check whether this is the last task to stop. If so, wake
492 * up the GC thread. getThread will block during a GC until the
496 if (t->whatNext == ThreadEnterHugs) {
497 /* ToDo: or maybe a timer expired when we were in Hugs?
498 * or maybe someone hit ctrl-C
500 belch("thread %ld stopped to switch to Hugs", t->id);
502 belch("thread %ld stopped, yielding", t->id);
506 APPEND_TO_RUN_QUEUE(t);
510 /* don't need to do anything. Either the thread is blocked on
511 * I/O, in which case we'll have called addToBlockedQueue
512 * previously, or it's blocked on an MVar or Blackhole, in which
513 * case it'll be on the relevant queue already.
516 fprintf(stderr, "thread %d stopped, ", t->id);
517 printThreadBlockage(t);
518 fprintf(stderr, "\n"));
523 /* Need to check whether this was a main thread, and if so, signal
524 * the task that started it with the return value. If we have no
525 * more main threads, we probably need to stop all the tasks until
528 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
529 t->whatNext = ThreadComplete;
533 barf("doneThread: invalid thread return code");
537 cap->link = free_capabilities;
538 free_capabilities = cap;
539 n_free_capabilities++;
543 if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes) {
547 /* everybody back, start the GC.
548 * Could do it in this thread, or signal a condition var
549 * to do it in another thread. Either way, we need to
550 * broadcast on gc_pending_cond afterward.
553 IF_DEBUG(scheduler,sched_belch("doing GC"));
555 GarbageCollect(GetRoots);
556 ready_to_gc = rtsFalse;
558 pthread_cond_broadcast(&gc_pending_cond);
561 } /* end of while(1) */
565 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
566 void deleteAllThreads ( void )
569 IF_DEBUG(scheduler,sched_belch("deleteAllThreads()"));
570 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
573 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
576 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
577 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
581 /* -----------------------------------------------------------------------------
582 * Suspending & resuming Haskell threads.
584 * When making a "safe" call to C (aka _ccall_GC), the task gives back
585 * its capability before calling the C function. This allows another
586 * task to pick up the capability and carry on running Haskell
587 * threads. It also means that if the C call blocks, it won't lock
590 * The Haskell thread making the C call is put to sleep for the
591 * duration of the call, on the susepended_ccalling_threads queue. We
592 * give out a token to the task, which it can use to resume the thread
593 * on return from the C function.
594 * -------------------------------------------------------------------------- */
597 suspendThread( Capability *cap )
601 ACQUIRE_LOCK(&sched_mutex);
604 sched_belch("thread %d did a _ccall_gc\n", cap->rCurrentTSO->id));
606 threadPaused(cap->rCurrentTSO);
607 cap->rCurrentTSO->link = suspended_ccalling_threads;
608 suspended_ccalling_threads = cap->rCurrentTSO;
610 /* Use the thread ID as the token; it should be unique */
611 tok = cap->rCurrentTSO->id;
614 cap->link = free_capabilities;
615 free_capabilities = cap;
616 n_free_capabilities++;
619 RELEASE_LOCK(&sched_mutex);
624 resumeThread( StgInt tok )
629 ACQUIRE_LOCK(&sched_mutex);
631 prev = &suspended_ccalling_threads;
632 for (tso = suspended_ccalling_threads;
633 tso != END_TSO_QUEUE;
634 prev = &tso->link, tso = tso->link) {
635 if (tso->id == (StgThreadID)tok) {
640 if (tso == END_TSO_QUEUE) {
641 barf("resumeThread: thread not found");
645 while (free_capabilities == NULL) {
646 IF_DEBUG(scheduler, sched_belch("waiting to resume"));
647 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
648 IF_DEBUG(scheduler, sched_belch("resuming thread %d", tso->id));
650 cap = free_capabilities;
651 free_capabilities = cap->link;
652 n_free_capabilities--;
657 cap->rCurrentTSO = tso;
659 RELEASE_LOCK(&sched_mutex);
663 /* -----------------------------------------------------------------------------
665 * -------------------------------------------------------------------------- */
666 static void unblockThread(StgTSO *tso);
668 /* -----------------------------------------------------------------------------
669 * Comparing Thread ids.
671 * This is used from STG land in the implementation of the
672 * instances of Eq/Ord for ThreadIds.
673 * -------------------------------------------------------------------------- */
675 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
677 StgThreadID id1 = tso1->id;
678 StgThreadID id2 = tso2->id;
680 if (id1 < id2) return (-1);
681 if (id1 > id2) return 1;
685 /* -----------------------------------------------------------------------------
688 The new thread starts with the given stack size. Before the
689 scheduler can run, however, this thread needs to have a closure
690 (and possibly some arguments) pushed on its stack. See
691 pushClosure() in Schedule.h.
693 createGenThread() and createIOThread() (in SchedAPI.h) are
694 convenient packaged versions of this function.
695 -------------------------------------------------------------------------- */
698 createThread(nat size)
700 return createThread_(size, rtsFalse);
704 createThread_(nat size, rtsBool have_lock)
709 /* catch ridiculously small stack sizes */
710 if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
711 size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
714 tso = (StgTSO *)allocate(size);
715 TICK_ALLOC_TSO(size-sizeofW(StgTSO),0);
717 stack_size = size - TSO_STRUCT_SIZEW;
719 SET_HDR(tso, &TSO_info, CCS_MAIN);
720 tso->whatNext = ThreadEnterGHC;
722 /* tso->id needs to be unique. For now we use a heavyweight mutex to
723 protect the increment operation on next_thread_id.
724 In future, we could use an atomic increment instead.
727 if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
728 tso->id = next_thread_id++;
729 if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
731 tso->why_blocked = NotBlocked;
732 tso->blocked_exceptions = NULL;
734 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
735 tso->stack_size = stack_size;
736 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
738 tso->sp = (P_)&(tso->stack) + stack_size;
741 tso->prof.CCCS = CCS_MAIN;
744 /* put a stop frame on the stack */
745 tso->sp -= sizeofW(StgStopFrame);
746 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
747 tso->su = (StgUpdateFrame*)tso->sp;
749 IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
750 tso->id, tso->stack_size));
755 /* -----------------------------------------------------------------------------
758 * scheduleThread puts a thread on the head of the runnable queue.
759 * This will usually be done immediately after a thread is created.
760 * The caller of scheduleThread must create the thread using e.g.
761 * createThread and push an appropriate closure
762 * on this thread's stack before the scheduler is invoked.
763 * -------------------------------------------------------------------------- */
766 scheduleThread(StgTSO *tso)
768 ACQUIRE_LOCK(&sched_mutex);
770 /* Put the new thread on the head of the runnable queue. The caller
771 * better push an appropriate closure on this thread's stack
772 * beforehand. In the SMP case, the thread may start running as
773 * soon as we release the scheduler lock below.
775 PUSH_ON_RUN_QUEUE(tso);
778 IF_DEBUG(scheduler,printTSO(tso));
779 RELEASE_LOCK(&sched_mutex);
783 /* -----------------------------------------------------------------------------
786 * Start up Posix threads to run each of the scheduler tasks.
787 * I believe the task ids are not needed in the system as defined.
789 * -------------------------------------------------------------------------- */
793 taskStart( void *arg STG_UNUSED )
800 /* -----------------------------------------------------------------------------
803 * Initialise the scheduler. This resets all the queues - if the
804 * queues contained any threads, they'll be garbage collected at the
807 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
808 * -------------------------------------------------------------------------- */
812 term_handler(int sig STG_UNUSED)
815 ACQUIRE_LOCK(&term_mutex);
817 RELEASE_LOCK(&term_mutex);
822 void initScheduler(void)
824 run_queue_hd = END_TSO_QUEUE;
825 run_queue_tl = END_TSO_QUEUE;
826 blocked_queue_hd = END_TSO_QUEUE;
827 blocked_queue_tl = END_TSO_QUEUE;
829 suspended_ccalling_threads = END_TSO_QUEUE;
836 enteredCAFs = END_CAF_LIST;
838 /* Install the SIGHUP handler */
841 struct sigaction action,oact;
843 action.sa_handler = term_handler;
844 sigemptyset(&action.sa_mask);
846 if (sigaction(SIGTERM, &action, &oact) != 0) {
847 barf("can't install TERM handler");
853 /* Allocate N Capabilities */
856 Capability *cap, *prev;
859 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
860 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
864 free_capabilities = cap;
865 n_free_capabilities = RtsFlags.ParFlags.nNodes;
867 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
868 n_free_capabilities););
882 /* make some space for saving all the thread ids */
883 task_ids = stgMallocBytes(RtsFlags.ParFlags.nNodes * sizeof(task_info),
884 "initScheduler:task_ids");
886 /* and create all the threads */
887 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
888 r = pthread_create(&tid,NULL,taskStart,NULL);
890 barf("startTasks: Can't create new Posix thread");
892 task_ids[i].id = tid;
893 task_ids[i].mut_time = 0.0;
894 task_ids[i].mut_etime = 0.0;
895 task_ids[i].gc_time = 0.0;
896 task_ids[i].gc_etime = 0.0;
897 task_ids[i].elapsedtimestart = elapsedtime();
898 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
904 exitScheduler( void )
909 /* Don't want to use pthread_cancel, since we'd have to install
910 * these silly exception handlers (pthread_cleanup_{push,pop}) around
914 /* Cancel all our tasks */
915 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
916 pthread_cancel(task_ids[i].id);
919 /* Wait for all the tasks to terminate */
920 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
921 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
923 pthread_join(task_ids[i].id, NULL);
927 /* Send 'em all a SIGHUP. That should shut 'em up.
929 await_death = RtsFlags.ParFlags.nNodes;
930 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
931 pthread_kill(task_ids[i].id,SIGTERM);
933 while (await_death > 0) {
939 /* -----------------------------------------------------------------------------
940 Managing the per-task allocation areas.
942 Each capability comes with an allocation area. These are
943 fixed-length block lists into which allocation can be done.
945 ToDo: no support for two-space collection at the moment???
946 -------------------------------------------------------------------------- */
948 /* -----------------------------------------------------------------------------
949 * waitThread is the external interface for running a new computataion
950 * and waiting for the result.
952 * In the non-SMP case, we create a new main thread, push it on the
953 * main-thread stack, and invoke the scheduler to run it. The
954 * scheduler will return when the top main thread on the stack has
955 * completed or died, and fill in the necessary fields of the
956 * main_thread structure.
958 * In the SMP case, we create a main thread as before, but we then
959 * create a new condition variable and sleep on it. When our new
960 * main thread has completed, we'll be woken up and the status/result
961 * will be in the main_thread struct.
962 * -------------------------------------------------------------------------- */
965 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
968 SchedulerStatus stat;
970 ACQUIRE_LOCK(&sched_mutex);
972 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
978 pthread_cond_init(&m->wakeup, NULL);
981 m->link = main_threads;
984 IF_DEBUG(scheduler, fprintf(stderr, "schedule: new main thread (%d)\n",
989 pthread_cond_wait(&m->wakeup, &sched_mutex);
990 } while (m->stat == NoStatus);
993 ASSERT(m->stat != NoStatus);
999 pthread_cond_destroy(&m->wakeup);
1002 IF_DEBUG(scheduler, fprintf(stderr, "schedule: main thread (%d) finished\n",
1006 RELEASE_LOCK(&sched_mutex);
1011 /* -----------------------------------------------------------------------------
1012 Debugging: why is a thread blocked
1013 -------------------------------------------------------------------------- */
1016 void printThreadBlockage(StgTSO *tso)
1018 switch (tso->why_blocked) {
1020 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
1022 case BlockedOnWrite:
1023 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
1025 case BlockedOnDelay:
1026 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
1029 fprintf(stderr,"blocked on an MVar");
1031 case BlockedOnException:
1032 fprintf(stderr,"blocked on delivering an exception to thread %d",
1033 tso->block_info.tso->id);
1035 case BlockedOnBlackHole:
1036 fprintf(stderr,"blocked on a black hole");
1039 fprintf(stderr,"not blocked");
1045 /* -----------------------------------------------------------------------------
1046 Where are the roots that we know about?
1048 - all the threads on the runnable queue
1049 - all the threads on the blocked queue
1050 - all the thread currently executing a _ccall_GC
1051 - all the "main threads"
1053 -------------------------------------------------------------------------- */
1055 /* This has to be protected either by the scheduler monitor, or by the
1056 garbage collection monitor (probably the latter).
1060 static void GetRoots(void)
1064 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1065 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1067 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1068 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1070 for (m = main_threads; m != NULL; m = m->link) {
1071 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1073 suspended_ccalling_threads =
1074 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1076 #if defined(SMP) || defined(PAR) || defined(GRAN)
1081 /* -----------------------------------------------------------------------------
1084 This is the interface to the garbage collector from Haskell land.
1085 We provide this so that external C code can allocate and garbage
1086 collect when called from Haskell via _ccall_GC.
1088 It might be useful to provide an interface whereby the programmer
1089 can specify more roots (ToDo).
1091 This needs to be protected by the GC condition variable above. KH.
1092 -------------------------------------------------------------------------- */
1094 void (*extra_roots)(void);
1099 GarbageCollect(GetRoots);
1105 GetRoots(); /* the scheduler's roots */
1106 extra_roots(); /* the user's roots */
1110 performGCWithRoots(void (*get_roots)(void))
1112 extra_roots = get_roots;
1114 GarbageCollect(AllRoots);
1117 /* -----------------------------------------------------------------------------
1120 If the thread has reached its maximum stack size,
1121 then bomb out. Otherwise relocate the TSO into a larger chunk of
1122 memory and adjust its stack size appropriately.
1123 -------------------------------------------------------------------------- */
1126 threadStackOverflow(StgTSO *tso)
1128 nat new_stack_size, new_tso_size, diff, stack_words;
1132 if (tso->stack_size >= tso->max_stack_size) {
1134 /* If we're debugging, just print out the top of the stack */
1135 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1139 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1142 /* Send this thread the StackOverflow exception */
1143 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1148 /* Try to double the current stack size. If that takes us over the
1149 * maximum stack size for this thread, then use the maximum instead.
1150 * Finally round up so the TSO ends up as a whole number of blocks.
1152 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1153 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1154 TSO_STRUCT_SIZE)/sizeof(W_);
1155 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1156 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1158 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1160 dest = (StgTSO *)allocate(new_tso_size);
1161 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1163 /* copy the TSO block and the old stack into the new area */
1164 memcpy(dest,tso,TSO_STRUCT_SIZE);
1165 stack_words = tso->stack + tso->stack_size - tso->sp;
1166 new_sp = (P_)dest + new_tso_size - stack_words;
1167 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1169 /* relocate the stack pointers... */
1170 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1171 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1173 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1174 dest->stack_size = new_stack_size;
1176 /* and relocate the update frame list */
1177 relocate_TSO(tso, dest);
1179 /* Mark the old one as dead so we don't try to scavenge it during
1180 * garbage collection (the TSO will likely be on a mutables list in
1181 * some generation, but it'll get collected soon enough). It's
1182 * important to set the sp and su values to just beyond the end of
1183 * the stack, so we don't attempt to scavenge any part of the dead
1186 tso->whatNext = ThreadKilled;
1187 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1188 tso->su = (StgUpdateFrame *)tso->sp;
1189 tso->why_blocked = NotBlocked;
1190 dest->mut_link = NULL;
1192 IF_DEBUG(sanity,checkTSO(tso));
1194 IF_DEBUG(scheduler,printTSO(dest));
1198 /* This will no longer work: KH */
1199 if (tso == MainTSO) { /* hack */
1206 /* -----------------------------------------------------------------------------
1207 Wake up a queue that was blocked on some resource.
1208 -------------------------------------------------------------------------- */
1211 unblockOneLocked(StgTSO *tso)
1215 ASSERT(get_itbl(tso)->type == TSO);
1216 ASSERT(tso->why_blocked != NotBlocked);
1217 tso->why_blocked = NotBlocked;
1219 PUSH_ON_RUN_QUEUE(tso);
1221 IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
1226 unblockOne(StgTSO *tso)
1228 ACQUIRE_LOCK(&sched_mutex);
1229 tso = unblockOneLocked(tso);
1230 RELEASE_LOCK(&sched_mutex);
1235 awakenBlockedQueue(StgTSO *tso)
1237 ACQUIRE_LOCK(&sched_mutex);
1238 while (tso != END_TSO_QUEUE) {
1239 tso = unblockOneLocked(tso);
1241 RELEASE_LOCK(&sched_mutex);
1244 /* -----------------------------------------------------------------------------
1246 - usually called inside a signal handler so it mustn't do anything fancy.
1247 -------------------------------------------------------------------------- */
1250 interruptStgRts(void)
1256 /* -----------------------------------------------------------------------------
1259 This is for use when we raise an exception in another thread, which
1261 -------------------------------------------------------------------------- */
1264 unblockThread(StgTSO *tso)
1268 ACQUIRE_LOCK(&sched_mutex);
1269 switch (tso->why_blocked) {
1272 return; /* not blocked */
1275 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1277 StgTSO *last_tso = END_TSO_QUEUE;
1278 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1281 for (t = mvar->head; t != END_TSO_QUEUE;
1282 last = &t->link, last_tso = t, t = t->link) {
1285 if (mvar->tail == tso) {
1286 mvar->tail = last_tso;
1291 barf("unblockThread (MVAR): TSO not found");
1294 case BlockedOnBlackHole:
1295 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1297 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1299 last = &bq->blocking_queue;
1300 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1301 last = &t->link, t = t->link) {
1307 barf("unblockThread (BLACKHOLE): TSO not found");
1310 case BlockedOnException:
1312 StgTSO *target = tso->block_info.tso;
1314 ASSERT(get_itbl(target)->type == TSO);
1315 ASSERT(target->blocked_exceptions != NULL);
1317 last = &target->blocked_exceptions;
1318 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
1319 last = &t->link, t = t->link) {
1320 ASSERT(get_itbl(t)->type == TSO);
1326 barf("unblockThread (Exception): TSO not found");
1329 case BlockedOnDelay:
1331 case BlockedOnWrite:
1333 StgTSO *prev = NULL;
1334 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1335 prev = t, t = t->link) {
1338 blocked_queue_hd = t->link;
1339 if (blocked_queue_tl == t) {
1340 blocked_queue_tl = END_TSO_QUEUE;
1343 prev->link = t->link;
1344 if (blocked_queue_tl == t) {
1345 blocked_queue_tl = prev;
1351 barf("unblockThread (I/O): TSO not found");
1355 barf("unblockThread");
1359 tso->link = END_TSO_QUEUE;
1360 tso->why_blocked = NotBlocked;
1361 tso->block_info.closure = NULL;
1362 PUSH_ON_RUN_QUEUE(tso);
1363 RELEASE_LOCK(&sched_mutex);
1366 /* -----------------------------------------------------------------------------
1369 * The following function implements the magic for raising an
1370 * asynchronous exception in an existing thread.
1372 * We first remove the thread from any queue on which it might be
1373 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1375 * We strip the stack down to the innermost CATCH_FRAME, building
1376 * thunks in the heap for all the active computations, so they can
1377 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1378 * an application of the handler to the exception, and push it on
1379 * the top of the stack.
1381 * How exactly do we save all the active computations? We create an
1382 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1383 * AP_UPDs pushes everything from the corresponding update frame
1384 * upwards onto the stack. (Actually, it pushes everything up to the
1385 * next update frame plus a pointer to the next AP_UPD object.
1386 * Entering the next AP_UPD object pushes more onto the stack until we
1387 * reach the last AP_UPD object - at which point the stack should look
1388 * exactly as it did when we killed the TSO and we can continue
1389 * execution by entering the closure on top of the stack.
1391 * We can also kill a thread entirely - this happens if either (a) the
1392 * exception passed to raiseAsync is NULL, or (b) there's no
1393 * CATCH_FRAME on the stack. In either case, we strip the entire
1394 * stack and replace the thread with a zombie.
1396 * -------------------------------------------------------------------------- */
1399 deleteThread(StgTSO *tso)
1401 raiseAsync(tso,NULL);
1405 raiseAsync(StgTSO *tso, StgClosure *exception)
1407 StgUpdateFrame* su = tso->su;
1408 StgPtr sp = tso->sp;
1410 /* Thread already dead? */
1411 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1415 IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
1417 /* Remove it from any blocking queues */
1420 /* The stack freezing code assumes there's a closure pointer on
1421 * the top of the stack. This isn't always the case with compiled
1422 * code, so we have to push a dummy closure on the top which just
1423 * returns to the next return address on the stack.
1425 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1426 *(--sp) = (W_)&dummy_ret_closure;
1430 int words = ((P_)su - (P_)sp) - 1;
1434 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1435 * then build PAP(handler,exception), and leave it on top of
1436 * the stack ready to enter.
1438 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1439 StgCatchFrame *cf = (StgCatchFrame *)su;
1440 /* we've got an exception to raise, so let's pass it to the
1441 * handler in this frame.
1443 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1444 TICK_ALLOC_UPD_PAP(2,0);
1445 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1448 ap->fun = cf->handler;
1449 ap->payload[0] = (P_)exception;
1451 /* sp currently points to the word above the CATCH_FRAME on the stack.
1453 sp += sizeofW(StgCatchFrame);
1456 /* Restore the blocked/unblocked state for asynchronous exceptions
1457 * at the CATCH_FRAME.
1459 * If exceptions were unblocked at the catch, arrange that they
1460 * are unblocked again after executing the handler by pushing an
1461 * unblockAsyncExceptions_ret stack frame.
1463 if (!cf->exceptions_blocked) {
1464 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
1467 /* Ensure that async exceptions are blocked when running the handler.
1469 if (tso->blocked_exceptions == NULL) {
1470 tso->blocked_exceptions = END_TSO_QUEUE;
1473 /* Put the newly-built PAP on top of the stack, ready to execute
1474 * when the thread restarts.
1478 tso->whatNext = ThreadEnterGHC;
1482 /* First build an AP_UPD consisting of the stack chunk above the
1483 * current update frame, with the top word on the stack as the
1486 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1491 ap->fun = (StgClosure *)sp[0];
1493 for(i=0; i < (nat)words; ++i) {
1494 ap->payload[i] = (P_)*sp++;
1497 switch (get_itbl(su)->type) {
1501 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1502 TICK_ALLOC_UP_THK(words+1,0);
1505 fprintf(stderr, "schedule: Updating ");
1506 printPtr((P_)su->updatee);
1507 fprintf(stderr, " with ");
1508 printObj((StgClosure *)ap);
1511 /* Replace the updatee with an indirection - happily
1512 * this will also wake up any threads currently
1513 * waiting on the result.
1515 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
1517 sp += sizeofW(StgUpdateFrame) -1;
1518 sp[0] = (W_)ap; /* push onto stack */
1524 StgCatchFrame *cf = (StgCatchFrame *)su;
1527 /* We want a PAP, not an AP_UPD. Fortunately, the
1528 * layout's the same.
1530 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1531 TICK_ALLOC_UPD_PAP(words+1,0);
1533 /* now build o = FUN(catch,ap,handler) */
1534 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1535 TICK_ALLOC_FUN(2,0);
1536 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1537 o->payload[0] = (StgClosure *)ap;
1538 o->payload[1] = cf->handler;
1541 fprintf(stderr, "schedule: Built ");
1542 printObj((StgClosure *)o);
1545 /* pop the old handler and put o on the stack */
1547 sp += sizeofW(StgCatchFrame) - 1;
1554 StgSeqFrame *sf = (StgSeqFrame *)su;
1557 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1558 TICK_ALLOC_UPD_PAP(words+1,0);
1560 /* now build o = FUN(seq,ap) */
1561 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1562 TICK_ALLOC_SE_THK(1,0);
1563 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1564 payloadCPtr(o,0) = (StgClosure *)ap;
1567 fprintf(stderr, "schedule: Built ");
1568 printObj((StgClosure *)o);
1571 /* pop the old handler and put o on the stack */
1573 sp += sizeofW(StgSeqFrame) - 1;
1579 /* We've stripped the entire stack, the thread is now dead. */
1580 sp += sizeofW(StgStopFrame) - 1;
1581 sp[0] = (W_)exception; /* save the exception */
1582 tso->whatNext = ThreadKilled;
1583 tso->su = (StgUpdateFrame *)(sp+1);
1594 /* -----------------------------------------------------------------------------
1596 -------------------------------------------------------------------------- */
1600 sched_belch(char *s, ...)
1605 fprintf(stderr, "scheduler (task %ld): ", pthread_self());
1607 fprintf(stderr, "scheduler: ");
1609 vfprintf(stderr, s, ap);
1610 fprintf(stderr, "\n");