1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.40 2000/01/13 10:37:31 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
43 #include "Exception.h"
47 #include "Profiling.h"
56 * These are the threads which clients have requested that we run.
58 * In an SMP build, we might have several concurrent clients all
59 * waiting for results, and each one will wait on a condition variable
60 * until the result is available.
62 * In non-SMP, clients are strictly nested: the first client calls
63 * into the RTS, which might call out again to C with a _ccall_GC, and
64 * eventually re-enter the RTS.
66 * Main threads information is kept in a linked list:
68 typedef struct StgMainThread_ {
73 pthread_cond_t wakeup;
75 struct StgMainThread_ *link;
79 * Locks required: sched_mutex.
81 static StgMainThread *main_threads;
84 * Locks required: sched_mutex.
86 StgTSO *run_queue_hd, *run_queue_tl;
87 StgTSO *blocked_queue_hd, *blocked_queue_tl;
89 /* Threads suspended in _ccall_GC.
90 * Locks required: sched_mutex.
92 static StgTSO *suspended_ccalling_threads;
94 static void GetRoots(void);
95 static StgTSO *threadStackOverflow(StgTSO *tso);
97 /* KH: The following two flags are shared memory locations. There is no need
98 to lock them, since they are only unset at the end of a scheduler
102 /* flag set by signal handler to precipitate a context switch */
105 /* if this flag is set as well, give up execution */
108 /* Next thread ID to allocate.
109 * Locks required: sched_mutex
111 StgThreadID next_thread_id = 1;
114 * Pointers to the state of the current thread.
115 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
116 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
119 /* The smallest stack size that makes any sense is:
120 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
121 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
122 * + 1 (the realworld token for an IO thread)
123 * + 1 (the closure to enter)
125 * A thread with this stack will bomb immediately with a stack
126 * overflow, which will increase its stack size.
129 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
131 /* Free capability list.
132 * Locks required: sched_mutex.
135 Capability *free_capabilities; /* Available capabilities for running threads */
136 nat n_free_capabilities; /* total number of available capabilities */
138 Capability MainRegTable; /* for non-SMP, we have one global capability */
143 /* All our current task ids, saved in case we need to kill them later.
149 void addToBlockedQueue ( StgTSO *tso );
151 static void schedule ( void );
152 void interruptStgRts ( void );
153 static StgTSO * createThread_ ( nat size, rtsBool have_lock );
156 static void sched_belch(char *s, ...);
160 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
161 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
162 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
163 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
168 /* -----------------------------------------------------------------------------
169 Main scheduling loop.
171 We use round-robin scheduling, each thread returning to the
172 scheduler loop when one of these conditions is detected:
175 * timer expires (thread yields)
180 Locking notes: we acquire the scheduler lock once at the beginning
181 of the scheduler loop, and release it when
183 * running a thread, or
184 * waiting for work, or
185 * waiting for a GC to complete.
187 -------------------------------------------------------------------------- */
194 StgThreadReturnCode ret;
196 ACQUIRE_LOCK(&sched_mutex);
200 /* If we're interrupted (the user pressed ^C, or some other
201 * termination condition occurred), kill all the currently running
205 IF_DEBUG(scheduler, sched_belch("interrupted"));
206 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
209 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
212 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
213 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
216 /* Go through the list of main threads and wake up any
217 * clients whose computations have finished. ToDo: this
218 * should be done more efficiently without a linear scan
219 * of the main threads list, somehow...
223 StgMainThread *m, **prev;
224 prev = &main_threads;
225 for (m = main_threads; m != NULL; m = m->link) {
226 switch (m->tso->whatNext) {
229 *(m->ret) = (StgClosure *)m->tso->sp[0];
233 pthread_cond_broadcast(&m->wakeup);
238 pthread_cond_broadcast(&m->wakeup);
246 /* If our main thread has finished or been killed, return.
249 StgMainThread *m = main_threads;
250 if (m->tso->whatNext == ThreadComplete
251 || m->tso->whatNext == ThreadKilled) {
252 main_threads = main_threads->link;
253 if (m->tso->whatNext == ThreadComplete) {
254 /* we finished successfully, fill in the return value */
255 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
266 /* Top up the run queue from our spark pool. We try to make the
267 * number of threads in the run queue equal to the number of
270 #if defined(SMP) || defined(PAR)
272 nat n = n_free_capabilities;
273 StgTSO *tso = run_queue_hd;
275 /* Count the run queue */
276 while (n > 0 && tso != END_TSO_QUEUE) {
285 break; /* no more sparks in the pool */
288 tso = createThread_(RtsFlags.GcFlags.initialStkSize, rtsTrue);
289 pushClosure(tso,spark);
290 PUSH_ON_RUN_QUEUE(tso);
292 advisory_thread_count++;
296 sched_belch("turning spark of closure %p into a thread",
297 (StgClosure *)spark));
300 /* We need to wake up the other tasks if we just created some
303 if (n_free_capabilities - n > 1) {
304 pthread_cond_signal(&thread_ready_cond);
307 #endif /* SMP || PAR */
309 /* Check whether any waiting threads need to be woken up. If the
310 * run queue is empty, and there are no other tasks running, we
311 * can wait indefinitely for something to happen.
312 * ToDo: what if another client comes along & requests another
315 if (blocked_queue_hd != END_TSO_QUEUE) {
317 (run_queue_hd == END_TSO_QUEUE)
319 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
324 /* check for signals each time around the scheduler */
326 if (signals_pending()) {
327 start_signal_handlers();
331 /* Detect deadlock: when we have no threads to run, there are
332 * no threads waiting on I/O or sleeping, and all the other
333 * tasks are waiting for work, we must have a deadlock. Inform
334 * all the main threads.
337 if (blocked_queue_hd == END_TSO_QUEUE
338 && run_queue_hd == END_TSO_QUEUE
339 && (n_free_capabilities == RtsFlags.ParFlags.nNodes)
342 for (m = main_threads; m != NULL; m = m->link) {
345 pthread_cond_broadcast(&m->wakeup);
350 if (blocked_queue_hd == END_TSO_QUEUE
351 && run_queue_hd == END_TSO_QUEUE) {
352 StgMainThread *m = main_threads;
355 main_threads = m->link;
361 /* If there's a GC pending, don't do anything until it has
365 IF_DEBUG(scheduler,sched_belch("waiting for GC"));
366 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
369 /* block until we've got a thread on the run queue and a free
372 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
373 IF_DEBUG(scheduler, sched_belch("waiting for work"));
374 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
375 IF_DEBUG(scheduler, sched_belch("work now available"));
379 /* grab a thread from the run queue
386 cap = free_capabilities;
387 free_capabilities = cap->link;
388 n_free_capabilities--;
393 cap->rCurrentTSO = t;
395 /* set the context_switch flag
397 if (run_queue_hd == END_TSO_QUEUE)
402 RELEASE_LOCK(&sched_mutex);
404 IF_DEBUG(scheduler,sched_belch("running thread %d", t->id));
406 /* Run the current thread
408 switch (cap->rCurrentTSO->whatNext) {
411 /* Thread already finished, return to scheduler. */
412 ret = ThreadFinished;
415 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
418 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
420 case ThreadEnterHugs:
424 IF_DEBUG(scheduler,sched_belch("entering Hugs"));
425 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
426 cap->rCurrentTSO->sp += 1;
431 barf("Panic: entered a BCO but no bytecode interpreter in this build");
434 barf("schedule: invalid whatNext field");
437 /* Costs for the scheduler are assigned to CCS_SYSTEM */
442 ACQUIRE_LOCK(&sched_mutex);
445 IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", pthread_self()););
447 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: "););
449 t = cap->rCurrentTSO;
453 /* make all the running tasks block on a condition variable,
454 * maybe set context_switch and wait till they all pile in,
455 * then have them wait on a GC condition variable.
457 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
460 ready_to_gc = rtsTrue;
461 context_switch = 1; /* stop other threads ASAP */
462 PUSH_ON_RUN_QUEUE(t);
466 /* just adjust the stack for this thread, then pop it back
469 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
473 /* enlarge the stack */
474 StgTSO *new_t = threadStackOverflow(t);
476 /* This TSO has moved, so update any pointers to it from the
477 * main thread stack. It better not be on any other queues...
480 for (m = main_threads; m != NULL; m = m->link) {
485 PUSH_ON_RUN_QUEUE(new_t);
490 /* put the thread back on the run queue. Then, if we're ready to
491 * GC, check whether this is the last task to stop. If so, wake
492 * up the GC thread. getThread will block during a GC until the
496 if (t->whatNext == ThreadEnterHugs) {
497 /* ToDo: or maybe a timer expired when we were in Hugs?
498 * or maybe someone hit ctrl-C
500 belch("thread %ld stopped to switch to Hugs", t->id);
502 belch("thread %ld stopped, yielding", t->id);
506 APPEND_TO_RUN_QUEUE(t);
510 /* don't need to do anything. Either the thread is blocked on
511 * I/O, in which case we'll have called addToBlockedQueue
512 * previously, or it's blocked on an MVar or Blackhole, in which
513 * case it'll be on the relevant queue already.
516 fprintf(stderr, "thread %d stopped, ", t->id);
517 printThreadBlockage(t);
518 fprintf(stderr, "\n"));
523 /* Need to check whether this was a main thread, and if so, signal
524 * the task that started it with the return value. If we have no
525 * more main threads, we probably need to stop all the tasks until
528 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
529 t->whatNext = ThreadComplete;
533 barf("doneThread: invalid thread return code");
537 cap->link = free_capabilities;
538 free_capabilities = cap;
539 n_free_capabilities++;
543 if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes) {
547 /* everybody back, start the GC.
548 * Could do it in this thread, or signal a condition var
549 * to do it in another thread. Either way, we need to
550 * broadcast on gc_pending_cond afterward.
553 IF_DEBUG(scheduler,sched_belch("doing GC"));
555 GarbageCollect(GetRoots);
556 ready_to_gc = rtsFalse;
558 pthread_cond_broadcast(&gc_pending_cond);
561 } /* end of while(1) */
565 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
566 void deleteAllThreads ( void )
569 IF_DEBUG(scheduler,sched_belch("deleteAllThreads()"));
570 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
573 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
576 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
577 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
581 /* -----------------------------------------------------------------------------
582 * Suspending & resuming Haskell threads.
584 * When making a "safe" call to C (aka _ccall_GC), the task gives back
585 * its capability before calling the C function. This allows another
586 * task to pick up the capability and carry on running Haskell
587 * threads. It also means that if the C call blocks, it won't lock
590 * The Haskell thread making the C call is put to sleep for the
591 * duration of the call, on the susepended_ccalling_threads queue. We
592 * give out a token to the task, which it can use to resume the thread
593 * on return from the C function.
594 * -------------------------------------------------------------------------- */
597 suspendThread( Capability *cap )
601 ACQUIRE_LOCK(&sched_mutex);
604 sched_belch("thread %d did a _ccall_gc\n", cap->rCurrentTSO->id));
606 threadPaused(cap->rCurrentTSO);
607 cap->rCurrentTSO->link = suspended_ccalling_threads;
608 suspended_ccalling_threads = cap->rCurrentTSO;
610 /* Use the thread ID as the token; it should be unique */
611 tok = cap->rCurrentTSO->id;
614 cap->link = free_capabilities;
615 free_capabilities = cap;
616 n_free_capabilities++;
619 RELEASE_LOCK(&sched_mutex);
624 resumeThread( StgInt tok )
629 ACQUIRE_LOCK(&sched_mutex);
631 prev = &suspended_ccalling_threads;
632 for (tso = suspended_ccalling_threads;
633 tso != END_TSO_QUEUE;
634 prev = &tso->link, tso = tso->link) {
635 if (tso->id == (StgThreadID)tok) {
640 if (tso == END_TSO_QUEUE) {
641 barf("resumeThread: thread not found");
645 while (free_capabilities == NULL) {
646 IF_DEBUG(scheduler, sched_belch("waiting to resume"));
647 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
648 IF_DEBUG(scheduler, sched_belch("resuming thread %d", tso->id));
650 cap = free_capabilities;
651 free_capabilities = cap->link;
652 n_free_capabilities--;
657 cap->rCurrentTSO = tso;
659 RELEASE_LOCK(&sched_mutex);
663 /* -----------------------------------------------------------------------------
665 * -------------------------------------------------------------------------- */
666 static void unblockThread(StgTSO *tso);
668 /* -----------------------------------------------------------------------------
669 * Comparing Thread ids.
671 * This is used from STG land in the implementation of the
672 * instances of Eq/Ord for ThreadIds.
673 * -------------------------------------------------------------------------- */
675 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
677 StgThreadID id1 = tso1->id;
678 StgThreadID id2 = tso2->id;
680 if (id1 < id2) return (-1);
681 if (id1 > id2) return 1;
685 /* -----------------------------------------------------------------------------
688 The new thread starts with the given stack size. Before the
689 scheduler can run, however, this thread needs to have a closure
690 (and possibly some arguments) pushed on its stack. See
691 pushClosure() in Schedule.h.
693 createGenThread() and createIOThread() (in SchedAPI.h) are
694 convenient packaged versions of this function.
695 -------------------------------------------------------------------------- */
698 createThread(nat size)
700 return createThread_(size, rtsFalse);
704 createThread_(nat size, rtsBool have_lock)
709 /* catch ridiculously small stack sizes */
710 if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
711 size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
714 tso = (StgTSO *)allocate(size);
715 TICK_ALLOC_TSO(size-sizeofW(StgTSO),0);
717 stack_size = size - TSO_STRUCT_SIZEW;
719 SET_HDR(tso, &TSO_info, CCS_MAIN);
720 tso->whatNext = ThreadEnterGHC;
722 /* tso->id needs to be unique. For now we use a heavyweight mutex to
723 protect the increment operation on next_thread_id.
724 In future, we could use an atomic increment instead.
727 if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); }
728 tso->id = next_thread_id++;
729 if (!have_lock) { RELEASE_LOCK(&sched_mutex); }
731 tso->why_blocked = NotBlocked;
732 tso->blocked_exceptions = NULL;
734 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
735 tso->stack_size = stack_size;
736 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
738 tso->sp = (P_)&(tso->stack) + stack_size;
741 tso->prof.CCCS = CCS_MAIN;
744 /* put a stop frame on the stack */
745 tso->sp -= sizeofW(StgStopFrame);
746 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
747 tso->su = (StgUpdateFrame*)tso->sp;
749 IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
750 tso->id, tso->stack_size));
755 /* -----------------------------------------------------------------------------
758 * scheduleThread puts a thread on the head of the runnable queue.
759 * This will usually be done immediately after a thread is created.
760 * The caller of scheduleThread must create the thread using e.g.
761 * createThread and push an appropriate closure
762 * on this thread's stack before the scheduler is invoked.
763 * -------------------------------------------------------------------------- */
766 scheduleThread(StgTSO *tso)
768 ACQUIRE_LOCK(&sched_mutex);
770 /* Put the new thread on the head of the runnable queue. The caller
771 * better push an appropriate closure on this thread's stack
772 * beforehand. In the SMP case, the thread may start running as
773 * soon as we release the scheduler lock below.
775 PUSH_ON_RUN_QUEUE(tso);
778 IF_DEBUG(scheduler,printTSO(tso));
779 RELEASE_LOCK(&sched_mutex);
783 /* -----------------------------------------------------------------------------
786 * Start up Posix threads to run each of the scheduler tasks.
787 * I believe the task ids are not needed in the system as defined.
789 * -------------------------------------------------------------------------- */
793 taskStart( void *arg STG_UNUSED )
800 /* -----------------------------------------------------------------------------
803 * Initialise the scheduler. This resets all the queues - if the
804 * queues contained any threads, they'll be garbage collected at the
807 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
808 * -------------------------------------------------------------------------- */
812 term_handler(int sig STG_UNUSED)
815 ACQUIRE_LOCK(&term_mutex);
817 RELEASE_LOCK(&term_mutex);
822 void initScheduler(void)
824 run_queue_hd = END_TSO_QUEUE;
825 run_queue_tl = END_TSO_QUEUE;
826 blocked_queue_hd = END_TSO_QUEUE;
827 blocked_queue_tl = END_TSO_QUEUE;
829 suspended_ccalling_threads = END_TSO_QUEUE;
836 enteredCAFs = END_CAF_LIST;
838 /* Install the SIGHUP handler */
841 struct sigaction action,oact;
843 action.sa_handler = term_handler;
844 sigemptyset(&action.sa_mask);
846 if (sigaction(SIGTERM, &action, &oact) != 0) {
847 barf("can't install TERM handler");
853 /* Allocate N Capabilities */
856 Capability *cap, *prev;
859 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
860 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
864 free_capabilities = cap;
865 n_free_capabilities = RtsFlags.ParFlags.nNodes;
867 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Allocated %d capabilities\n",
868 n_free_capabilities););
871 #if defined(SMP) || defined(PAR)
884 /* make some space for saving all the thread ids */
885 task_ids = stgMallocBytes(RtsFlags.ParFlags.nNodes * sizeof(task_info),
886 "initScheduler:task_ids");
888 /* and create all the threads */
889 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
890 r = pthread_create(&tid,NULL,taskStart,NULL);
892 barf("startTasks: Can't create new Posix thread");
894 task_ids[i].id = tid;
895 task_ids[i].mut_time = 0.0;
896 task_ids[i].mut_etime = 0.0;
897 task_ids[i].gc_time = 0.0;
898 task_ids[i].gc_etime = 0.0;
899 task_ids[i].elapsedtimestart = elapsedtime();
900 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Started task: %ld\n",tid););
906 exitScheduler( void )
911 /* Don't want to use pthread_cancel, since we'd have to install
912 * these silly exception handlers (pthread_cleanup_{push,pop}) around
916 /* Cancel all our tasks */
917 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
918 pthread_cancel(task_ids[i].id);
921 /* Wait for all the tasks to terminate */
922 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
923 IF_DEBUG(scheduler,fprintf(stderr,"scheduler: waiting for task %ld\n",
925 pthread_join(task_ids[i].id, NULL);
929 /* Send 'em all a SIGHUP. That should shut 'em up.
931 await_death = RtsFlags.ParFlags.nNodes;
932 for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
933 pthread_kill(task_ids[i].id,SIGTERM);
935 while (await_death > 0) {
941 /* -----------------------------------------------------------------------------
942 Managing the per-task allocation areas.
944 Each capability comes with an allocation area. These are
945 fixed-length block lists into which allocation can be done.
947 ToDo: no support for two-space collection at the moment???
948 -------------------------------------------------------------------------- */
950 /* -----------------------------------------------------------------------------
951 * waitThread is the external interface for running a new computataion
952 * and waiting for the result.
954 * In the non-SMP case, we create a new main thread, push it on the
955 * main-thread stack, and invoke the scheduler to run it. The
956 * scheduler will return when the top main thread on the stack has
957 * completed or died, and fill in the necessary fields of the
958 * main_thread structure.
960 * In the SMP case, we create a main thread as before, but we then
961 * create a new condition variable and sleep on it. When our new
962 * main thread has completed, we'll be woken up and the status/result
963 * will be in the main_thread struct.
964 * -------------------------------------------------------------------------- */
967 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
970 SchedulerStatus stat;
972 ACQUIRE_LOCK(&sched_mutex);
974 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
980 pthread_cond_init(&m->wakeup, NULL);
983 m->link = main_threads;
986 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: new main thread (%d)\n",
991 pthread_cond_wait(&m->wakeup, &sched_mutex);
992 } while (m->stat == NoStatus);
995 ASSERT(m->stat != NoStatus);
1001 pthread_cond_destroy(&m->wakeup);
1004 IF_DEBUG(scheduler, fprintf(stderr, "scheduler: main thread (%d) finished\n",
1008 RELEASE_LOCK(&sched_mutex);
1013 /* -----------------------------------------------------------------------------
1014 Debugging: why is a thread blocked
1015 -------------------------------------------------------------------------- */
1018 void printThreadBlockage(StgTSO *tso)
1020 switch (tso->why_blocked) {
1022 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
1024 case BlockedOnWrite:
1025 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
1027 case BlockedOnDelay:
1028 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
1031 fprintf(stderr,"blocked on an MVar");
1033 case BlockedOnException:
1034 fprintf(stderr,"blocked on delivering an exception to thread %d",
1035 tso->block_info.tso->id);
1037 case BlockedOnBlackHole:
1038 fprintf(stderr,"blocked on a black hole");
1041 fprintf(stderr,"not blocked");
1047 /* -----------------------------------------------------------------------------
1048 Where are the roots that we know about?
1050 - all the threads on the runnable queue
1051 - all the threads on the blocked queue
1052 - all the thread currently executing a _ccall_GC
1053 - all the "main threads"
1055 -------------------------------------------------------------------------- */
1057 /* This has to be protected either by the scheduler monitor, or by the
1058 garbage collection monitor (probably the latter).
1062 static void GetRoots(void)
1066 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1067 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1069 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1070 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1072 for (m = main_threads; m != NULL; m = m->link) {
1073 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1075 suspended_ccalling_threads =
1076 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1078 #if defined(SMP) || defined(PAR) || defined(GRAN)
1083 /* -----------------------------------------------------------------------------
1086 This is the interface to the garbage collector from Haskell land.
1087 We provide this so that external C code can allocate and garbage
1088 collect when called from Haskell via _ccall_GC.
1090 It might be useful to provide an interface whereby the programmer
1091 can specify more roots (ToDo).
1093 This needs to be protected by the GC condition variable above. KH.
1094 -------------------------------------------------------------------------- */
1096 void (*extra_roots)(void);
1101 GarbageCollect(GetRoots);
1107 GetRoots(); /* the scheduler's roots */
1108 extra_roots(); /* the user's roots */
1112 performGCWithRoots(void (*get_roots)(void))
1114 extra_roots = get_roots;
1116 GarbageCollect(AllRoots);
1119 /* -----------------------------------------------------------------------------
1122 If the thread has reached its maximum stack size,
1123 then bomb out. Otherwise relocate the TSO into a larger chunk of
1124 memory and adjust its stack size appropriately.
1125 -------------------------------------------------------------------------- */
1128 threadStackOverflow(StgTSO *tso)
1130 nat new_stack_size, new_tso_size, diff, stack_words;
1134 if (tso->stack_size >= tso->max_stack_size) {
1136 /* If we're debugging, just print out the top of the stack */
1137 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1141 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1144 /* Send this thread the StackOverflow exception */
1145 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1150 /* Try to double the current stack size. If that takes us over the
1151 * maximum stack size for this thread, then use the maximum instead.
1152 * Finally round up so the TSO ends up as a whole number of blocks.
1154 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1155 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1156 TSO_STRUCT_SIZE)/sizeof(W_);
1157 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1158 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1160 IF_DEBUG(scheduler, fprintf(stderr,"scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1162 dest = (StgTSO *)allocate(new_tso_size);
1163 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1165 /* copy the TSO block and the old stack into the new area */
1166 memcpy(dest,tso,TSO_STRUCT_SIZE);
1167 stack_words = tso->stack + tso->stack_size - tso->sp;
1168 new_sp = (P_)dest + new_tso_size - stack_words;
1169 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1171 /* relocate the stack pointers... */
1172 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1173 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1175 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1176 dest->stack_size = new_stack_size;
1178 /* and relocate the update frame list */
1179 relocate_TSO(tso, dest);
1181 /* Mark the old one as dead so we don't try to scavenge it during
1182 * garbage collection (the TSO will likely be on a mutables list in
1183 * some generation, but it'll get collected soon enough). It's
1184 * important to set the sp and su values to just beyond the end of
1185 * the stack, so we don't attempt to scavenge any part of the dead
1188 tso->whatNext = ThreadKilled;
1189 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1190 tso->su = (StgUpdateFrame *)tso->sp;
1191 tso->why_blocked = NotBlocked;
1192 dest->mut_link = NULL;
1194 IF_DEBUG(sanity,checkTSO(tso));
1196 IF_DEBUG(scheduler,printTSO(dest));
1200 /* This will no longer work: KH */
1201 if (tso == MainTSO) { /* hack */
1208 /* -----------------------------------------------------------------------------
1209 Wake up a queue that was blocked on some resource.
1210 -------------------------------------------------------------------------- */
1213 unblockOneLocked(StgTSO *tso)
1217 ASSERT(get_itbl(tso)->type == TSO);
1218 ASSERT(tso->why_blocked != NotBlocked);
1219 tso->why_blocked = NotBlocked;
1221 PUSH_ON_RUN_QUEUE(tso);
1223 IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
1228 unblockOne(StgTSO *tso)
1230 ACQUIRE_LOCK(&sched_mutex);
1231 tso = unblockOneLocked(tso);
1232 RELEASE_LOCK(&sched_mutex);
1237 awakenBlockedQueue(StgTSO *tso)
1239 ACQUIRE_LOCK(&sched_mutex);
1240 while (tso != END_TSO_QUEUE) {
1241 tso = unblockOneLocked(tso);
1243 RELEASE_LOCK(&sched_mutex);
1246 /* -----------------------------------------------------------------------------
1248 - usually called inside a signal handler so it mustn't do anything fancy.
1249 -------------------------------------------------------------------------- */
1252 interruptStgRts(void)
1258 /* -----------------------------------------------------------------------------
1261 This is for use when we raise an exception in another thread, which
1263 -------------------------------------------------------------------------- */
1266 unblockThread(StgTSO *tso)
1270 ACQUIRE_LOCK(&sched_mutex);
1271 switch (tso->why_blocked) {
1274 return; /* not blocked */
1277 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1279 StgTSO *last_tso = END_TSO_QUEUE;
1280 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1283 for (t = mvar->head; t != END_TSO_QUEUE;
1284 last = &t->link, last_tso = t, t = t->link) {
1287 if (mvar->tail == tso) {
1288 mvar->tail = last_tso;
1293 barf("unblockThread (MVAR): TSO not found");
1296 case BlockedOnBlackHole:
1297 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1299 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1301 last = &bq->blocking_queue;
1302 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1303 last = &t->link, t = t->link) {
1309 barf("unblockThread (BLACKHOLE): TSO not found");
1312 case BlockedOnException:
1314 StgTSO *target = tso->block_info.tso;
1316 ASSERT(get_itbl(target)->type == TSO);
1317 ASSERT(target->blocked_exceptions != NULL);
1319 last = &target->blocked_exceptions;
1320 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
1321 last = &t->link, t = t->link) {
1322 ASSERT(get_itbl(t)->type == TSO);
1328 barf("unblockThread (Exception): TSO not found");
1331 case BlockedOnDelay:
1333 case BlockedOnWrite:
1335 StgTSO *prev = NULL;
1336 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1337 prev = t, t = t->link) {
1340 blocked_queue_hd = t->link;
1341 if (blocked_queue_tl == t) {
1342 blocked_queue_tl = END_TSO_QUEUE;
1345 prev->link = t->link;
1346 if (blocked_queue_tl == t) {
1347 blocked_queue_tl = prev;
1353 barf("unblockThread (I/O): TSO not found");
1357 barf("unblockThread");
1361 tso->link = END_TSO_QUEUE;
1362 tso->why_blocked = NotBlocked;
1363 tso->block_info.closure = NULL;
1364 PUSH_ON_RUN_QUEUE(tso);
1365 RELEASE_LOCK(&sched_mutex);
1368 /* -----------------------------------------------------------------------------
1371 * The following function implements the magic for raising an
1372 * asynchronous exception in an existing thread.
1374 * We first remove the thread from any queue on which it might be
1375 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1377 * We strip the stack down to the innermost CATCH_FRAME, building
1378 * thunks in the heap for all the active computations, so they can
1379 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1380 * an application of the handler to the exception, and push it on
1381 * the top of the stack.
1383 * How exactly do we save all the active computations? We create an
1384 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1385 * AP_UPDs pushes everything from the corresponding update frame
1386 * upwards onto the stack. (Actually, it pushes everything up to the
1387 * next update frame plus a pointer to the next AP_UPD object.
1388 * Entering the next AP_UPD object pushes more onto the stack until we
1389 * reach the last AP_UPD object - at which point the stack should look
1390 * exactly as it did when we killed the TSO and we can continue
1391 * execution by entering the closure on top of the stack.
1393 * We can also kill a thread entirely - this happens if either (a) the
1394 * exception passed to raiseAsync is NULL, or (b) there's no
1395 * CATCH_FRAME on the stack. In either case, we strip the entire
1396 * stack and replace the thread with a zombie.
1398 * -------------------------------------------------------------------------- */
1401 deleteThread(StgTSO *tso)
1403 raiseAsync(tso,NULL);
1407 raiseAsync(StgTSO *tso, StgClosure *exception)
1409 StgUpdateFrame* su = tso->su;
1410 StgPtr sp = tso->sp;
1412 /* Thread already dead? */
1413 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1417 IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id));
1419 /* Remove it from any blocking queues */
1422 /* The stack freezing code assumes there's a closure pointer on
1423 * the top of the stack. This isn't always the case with compiled
1424 * code, so we have to push a dummy closure on the top which just
1425 * returns to the next return address on the stack.
1427 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1428 *(--sp) = (W_)&dummy_ret_closure;
1432 int words = ((P_)su - (P_)sp) - 1;
1436 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1437 * then build PAP(handler,exception), and leave it on top of
1438 * the stack ready to enter.
1440 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1441 StgCatchFrame *cf = (StgCatchFrame *)su;
1442 /* we've got an exception to raise, so let's pass it to the
1443 * handler in this frame.
1445 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1446 TICK_ALLOC_UPD_PAP(2,0);
1447 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1450 ap->fun = cf->handler;
1451 ap->payload[0] = (P_)exception;
1453 /* sp currently points to the word above the CATCH_FRAME on the stack.
1455 sp += sizeofW(StgCatchFrame);
1458 /* Restore the blocked/unblocked state for asynchronous exceptions
1459 * at the CATCH_FRAME.
1461 * If exceptions were unblocked at the catch, arrange that they
1462 * are unblocked again after executing the handler by pushing an
1463 * unblockAsyncExceptions_ret stack frame.
1465 if (!cf->exceptions_blocked) {
1466 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
1469 /* Ensure that async exceptions are blocked when running the handler.
1471 if (tso->blocked_exceptions == NULL) {
1472 tso->blocked_exceptions = END_TSO_QUEUE;
1475 /* Put the newly-built PAP on top of the stack, ready to execute
1476 * when the thread restarts.
1480 tso->whatNext = ThreadEnterGHC;
1484 /* First build an AP_UPD consisting of the stack chunk above the
1485 * current update frame, with the top word on the stack as the
1488 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1493 ap->fun = (StgClosure *)sp[0];
1495 for(i=0; i < (nat)words; ++i) {
1496 ap->payload[i] = (P_)*sp++;
1499 switch (get_itbl(su)->type) {
1503 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1504 TICK_ALLOC_UP_THK(words+1,0);
1507 fprintf(stderr, "scheduler: Updating ");
1508 printPtr((P_)su->updatee);
1509 fprintf(stderr, " with ");
1510 printObj((StgClosure *)ap);
1513 /* Replace the updatee with an indirection - happily
1514 * this will also wake up any threads currently
1515 * waiting on the result.
1517 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
1519 sp += sizeofW(StgUpdateFrame) -1;
1520 sp[0] = (W_)ap; /* push onto stack */
1526 StgCatchFrame *cf = (StgCatchFrame *)su;
1529 /* We want a PAP, not an AP_UPD. Fortunately, the
1530 * layout's the same.
1532 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1533 TICK_ALLOC_UPD_PAP(words+1,0);
1535 /* now build o = FUN(catch,ap,handler) */
1536 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1537 TICK_ALLOC_FUN(2,0);
1538 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1539 o->payload[0] = (StgClosure *)ap;
1540 o->payload[1] = cf->handler;
1543 fprintf(stderr, "scheduler: Built ");
1544 printObj((StgClosure *)o);
1547 /* pop the old handler and put o on the stack */
1549 sp += sizeofW(StgCatchFrame) - 1;
1556 StgSeqFrame *sf = (StgSeqFrame *)su;
1559 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1560 TICK_ALLOC_UPD_PAP(words+1,0);
1562 /* now build o = FUN(seq,ap) */
1563 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1564 TICK_ALLOC_SE_THK(1,0);
1565 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1566 payloadCPtr(o,0) = (StgClosure *)ap;
1569 fprintf(stderr, "scheduler: Built ");
1570 printObj((StgClosure *)o);
1573 /* pop the old handler and put o on the stack */
1575 sp += sizeofW(StgSeqFrame) - 1;
1581 /* We've stripped the entire stack, the thread is now dead. */
1582 sp += sizeofW(StgStopFrame) - 1;
1583 sp[0] = (W_)exception; /* save the exception */
1584 tso->whatNext = ThreadKilled;
1585 tso->su = (StgUpdateFrame *)(sp+1);
1596 /* -----------------------------------------------------------------------------
1598 -------------------------------------------------------------------------- */
1602 sched_belch(char *s, ...)
1607 fprintf(stderr, "scheduler (task %ld): ", pthread_self());
1609 fprintf(stderr, "scheduler: ");
1611 vfprintf(stderr, s, ap);
1612 fprintf(stderr, "\n");