1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.30 1999/11/08 15:30:39 sewardj Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
46 #include "Profiling.h"
52 * These are the threads which clients have requested that we run.
54 * In an SMP build, we might have several concurrent clients all
55 * waiting for results, and each one will wait on a condition variable
56 * until the result is available.
58 * In non-SMP, clients are strictly nested: the first client calls
59 * into the RTS, which might call out again to C with a _ccall_GC, and
60 * eventually re-enter the RTS.
62 * Main threads information is kept in a linked list:
64 typedef struct StgMainThread_ {
69 pthread_cond_t wakeup;
71 struct StgMainThread_ *link;
75 * Locks required: sched_mutex.
77 static StgMainThread *main_threads;
80 * Locks required: sched_mutex.
82 StgTSO *run_queue_hd, *run_queue_tl;
83 StgTSO *blocked_queue_hd, *blocked_queue_tl;
85 /* Threads suspended in _ccall_GC.
86 * Locks required: sched_mutex.
88 static StgTSO *suspended_ccalling_threads;
91 static rtsBool in_ccall_gc;
94 static void GetRoots(void);
95 static StgTSO *threadStackOverflow(StgTSO *tso);
97 /* KH: The following two flags are shared memory locations. There is no need
98 to lock them, since they are only unset at the end of a scheduler
102 /* flag set by signal handler to precipitate a context switch */
104 /* if this flag is set as well, give up execution */
105 static nat interrupted;
107 /* Next thread ID to allocate.
108 * Locks required: sched_mutex
110 StgThreadID next_thread_id = 1;
113 * Pointers to the state of the current thread.
114 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
115 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
118 /* The smallest stack size that makes any sense is:
119 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
120 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
121 * + 1 (the realworld token for an IO thread)
122 * + 1 (the closure to enter)
124 * A thread with this stack will bomb immediately with a stack
125 * overflow, which will increase its stack size.
128 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
130 /* Free capability list.
131 * Locks required: sched_mutex.
134 Capability *free_capabilities; /* Available capabilities for running threads */
135 nat n_free_capabilities; /* total number of available capabilities */
137 Capability MainRegTable; /* for non-SMP, we have one global capability */
142 /* All our current task ids, saved in case we need to kill them later.
148 void addToBlockedQueue ( StgTSO *tso );
150 static void schedule ( void );
151 static void initThread ( StgTSO *tso, nat stack_size );
152 void interruptStgRts ( void );
155 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
156 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
157 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
158 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
163 /* -----------------------------------------------------------------------------
164 Main scheduling loop.
166 We use round-robin scheduling, each thread returning to the
167 scheduler loop when one of these conditions is detected:
170 * timer expires (thread yields)
175 Locking notes: we acquire the scheduler lock once at the beginning
176 of the scheduler loop, and release it when
178 * running a thread, or
179 * waiting for work, or
180 * waiting for a GC to complete.
182 -------------------------------------------------------------------------- */
189 StgThreadReturnCode ret;
191 ACQUIRE_LOCK(&sched_mutex);
195 /* Check whether any waiting threads need to be woken up.
196 * If the run queue is empty, we can wait indefinitely for
197 * something to happen.
199 if (blocked_queue_hd != END_TSO_QUEUE) {
200 awaitEvent(run_queue_hd == END_TSO_QUEUE);
203 /* check for signals each time around the scheduler */
205 if (signals_pending()) {
206 start_signal_handlers();
211 /* If there's a GC pending, don't do anything until it has
215 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): waiting for GC\n",
217 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
220 /* block until we've got a thread on the run queue and a free
223 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
225 fprintf(stderr, "schedule (task %ld): waiting for work\n",
227 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
229 fprintf(stderr, "schedule (task %ld): work now available\n",
234 /* grab a thread from the run queue
241 cap = free_capabilities;
242 free_capabilities = cap->link;
243 n_free_capabilities--;
248 cap->rCurrentTSO = t;
250 /* set the context_switch flag
252 if (run_queue_hd == END_TSO_QUEUE)
257 RELEASE_LOCK(&sched_mutex);
260 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): running thread %d\n", pthread_self(),t->id));
262 IF_DEBUG(scheduler,fprintf(stderr,"schedule: running thread %d\n",t->id));
265 /* Run the current thread
267 switch (cap->rCurrentTSO->whatNext) {
270 /* Thread already finished, return to scheduler. */
271 ret = ThreadFinished;
274 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
277 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
279 case ThreadEnterHugs:
283 IF_DEBUG(scheduler,belch("schedule: entering Hugs"));
284 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
285 cap->rCurrentTSO->sp += 1;
290 barf("Panic: entered a BCO but no bytecode interpreter in this build");
293 barf("schedule: invalid whatNext field");
296 /* Costs for the scheduler are assigned to CCS_SYSTEM */
301 ACQUIRE_LOCK(&sched_mutex);
304 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
306 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
308 t = cap->rCurrentTSO;
312 /* make all the running tasks block on a condition variable,
313 * maybe set context_switch and wait till they all pile in,
314 * then have them wait on a GC condition variable.
316 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
319 ready_to_gc = rtsTrue;
320 context_switch = 1; /* stop other threads ASAP */
321 PUSH_ON_RUN_QUEUE(t);
325 /* just adjust the stack for this thread, then pop it back
328 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
332 /* enlarge the stack */
333 StgTSO *new_t = threadStackOverflow(t);
335 /* This TSO has moved, so update any pointers to it from the
336 * main thread stack. It better not be on any other queues...
339 for (m = main_threads; m != NULL; m = m->link) {
344 PUSH_ON_RUN_QUEUE(new_t);
349 /* put the thread back on the run queue. Then, if we're ready to
350 * GC, check whether this is the last task to stop. If so, wake
351 * up the GC thread. getThread will block during a GC until the
355 if (t->whatNext == ThreadEnterHugs) {
356 /* ToDo: or maybe a timer expired when we were in Hugs?
357 * or maybe someone hit ctrl-C
359 belch("thread %ld stopped to switch to Hugs", t->id);
361 belch("thread %ld stopped, yielding", t->id);
365 APPEND_TO_RUN_QUEUE(t);
369 /* don't need to do anything. Either the thread is blocked on
370 * I/O, in which case we'll have called addToBlockedQueue
371 * previously, or it's blocked on an MVar or Blackhole, in which
372 * case it'll be on the relevant queue already.
375 fprintf(stderr, "thread %d stopped, ", t->id);
376 printThreadBlockage(t);
377 fprintf(stderr, "\n"));
382 /* Need to check whether this was a main thread, and if so, signal
383 * the task that started it with the return value. If we have no
384 * more main threads, we probably need to stop all the tasks until
387 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
388 t->whatNext = ThreadComplete;
392 barf("doneThread: invalid thread return code");
396 cap->link = free_capabilities;
397 free_capabilities = cap;
398 n_free_capabilities++;
402 if (ready_to_gc && n_free_capabilities == RtsFlags.ConcFlags.nNodes) {
406 /* everybody back, start the GC.
407 * Could do it in this thread, or signal a condition var
408 * to do it in another thread. Either way, we need to
409 * broadcast on gc_pending_cond afterward.
412 IF_DEBUG(scheduler,belch("schedule (task %ld): doing GC", pthread_self()));
414 GarbageCollect(GetRoots);
415 ready_to_gc = rtsFalse;
417 pthread_cond_broadcast(&gc_pending_cond);
421 /* Go through the list of main threads and wake up any
422 * clients whose computations have finished. ToDo: this
423 * should be done more efficiently without a linear scan
424 * of the main threads list, somehow...
428 StgMainThread *m, **prev;
429 prev = &main_threads;
430 for (m = main_threads; m != NULL; m = m->link) {
431 if (m->tso->whatNext == ThreadComplete) {
433 *(m->ret) = (StgClosure *)m->tso->sp[0];
437 pthread_cond_broadcast(&m->wakeup);
439 if (m->tso->whatNext == ThreadKilled) {
442 pthread_cond_broadcast(&m->wakeup);
447 /* If our main thread has finished or been killed, return.
448 * If we were re-entered as a result of a _ccall_gc, then
449 * pop the blocked thread off the ccalling_threads stack back
453 StgMainThread *m = main_threads;
454 if (m->tso->whatNext == ThreadComplete
455 || m->tso->whatNext == ThreadKilled) {
456 main_threads = main_threads->link;
457 if (m->tso->whatNext == ThreadComplete) {
458 /* we finished successfully, fill in the return value */
459 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
470 } /* end of while(1) */
473 /* -----------------------------------------------------------------------------
474 * Suspending & resuming Haskell threads.
476 * When making a "safe" call to C (aka _ccall_GC), the task gives back
477 * its capability before calling the C function. This allows another
478 * task to pick up the capability and carry on running Haskell
479 * threads. It also means that if the C call blocks, it won't lock
482 * The Haskell thread making the C call is put to sleep for the
483 * duration of the call, on the susepended_ccalling_threads queue. We
484 * give out a token to the task, which it can use to resume the thread
485 * on return from the C function.
486 * -------------------------------------------------------------------------- */
489 suspendThread( Capability *cap )
493 ACQUIRE_LOCK(&sched_mutex);
497 fprintf(stderr, "schedule (task %ld): thread %d did a _ccall_gc\n",
498 pthread_self(), cap->rCurrentTSO->id));
501 fprintf(stderr, "schedule: thread %d did a _ccall_gc\n",
502 cap->rCurrentTSO->id));
505 threadPaused(cap->rCurrentTSO);
506 cap->rCurrentTSO->link = suspended_ccalling_threads;
507 suspended_ccalling_threads = cap->rCurrentTSO;
509 /* Use the thread ID as the token; it should be unique */
510 tok = cap->rCurrentTSO->id;
513 cap->link = free_capabilities;
514 free_capabilities = cap;
515 n_free_capabilities++;
518 RELEASE_LOCK(&sched_mutex);
523 resumeThread( StgInt tok )
528 ACQUIRE_LOCK(&sched_mutex);
530 prev = &suspended_ccalling_threads;
531 for (tso = suspended_ccalling_threads;
532 tso != END_TSO_QUEUE;
533 prev = &tso->link, tso = tso->link) {
534 if (tso->id == (StgThreadID)tok) {
539 if (tso == END_TSO_QUEUE) {
540 barf("resumeThread: thread not found");
544 while (free_capabilities == NULL) {
546 fprintf(stderr,"schedule (task %ld): waiting to resume\n",
548 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
549 IF_DEBUG(scheduler,fprintf(stderr,
550 "schedule (task %ld): resuming thread %d\n",
551 pthread_self(), tso->id));
553 cap = free_capabilities;
554 free_capabilities = cap->link;
555 n_free_capabilities--;
560 cap->rCurrentTSO = tso;
562 RELEASE_LOCK(&sched_mutex);
566 /* -----------------------------------------------------------------------------
568 * -------------------------------------------------------------------------- */
569 static void unblockThread(StgTSO *tso);
571 /* -----------------------------------------------------------------------------
572 * Comparing Thread ids.
574 * This is used from STG land in the implementation of the
575 * instances of Eq/Ord for ThreadIds.
576 * -------------------------------------------------------------------------- */
578 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
580 StgThreadID id1 = tso1->id;
581 StgThreadID id2 = tso2->id;
583 if (id1 < id2) return (-1);
584 if (id1 > id2) return 1;
588 /* -----------------------------------------------------------------------------
591 The new thread starts with the given stack size. Before the
592 scheduler can run, however, this thread needs to have a closure
593 (and possibly some arguments) pushed on its stack. See
594 pushClosure() in Schedule.h.
596 createGenThread() and createIOThread() (in SchedAPI.h) are
597 convenient packaged versions of this function.
598 -------------------------------------------------------------------------- */
601 createThread(nat stack_size)
605 /* catch ridiculously small stack sizes */
606 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
607 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
610 tso = (StgTSO *)allocate(stack_size);
611 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
613 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
618 initThread(StgTSO *tso, nat stack_size)
620 SET_INFO(tso,&TSO_info);
621 tso->whatNext = ThreadEnterGHC;
623 /* tso->id needs to be unique. For now we use a heavyweight mutex to
624 protect the increment operation on next_thread_id.
625 In future, we could use an atomic increment instead.
628 ACQUIRE_LOCK(&sched_mutex);
629 tso->id = next_thread_id++;
630 RELEASE_LOCK(&sched_mutex);
632 tso->why_blocked = NotBlocked;
634 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
635 tso->stack_size = stack_size;
636 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
638 tso->sp = (P_)&(tso->stack) + stack_size;
641 tso->prof.CCCS = CCS_MAIN;
644 /* put a stop frame on the stack */
645 tso->sp -= sizeofW(StgStopFrame);
646 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
647 tso->su = (StgUpdateFrame*)tso->sp;
649 IF_DEBUG(scheduler,belch("schedule: Initialised thread %ld, stack size = %lx words",
650 tso->id, tso->stack_size));
655 /* -----------------------------------------------------------------------------
658 * scheduleThread puts a thread on the head of the runnable queue.
659 * This will usually be done immediately after a thread is created.
660 * The caller of scheduleThread must create the thread using e.g.
661 * createThread and push an appropriate closure
662 * on this thread's stack before the scheduler is invoked.
663 * -------------------------------------------------------------------------- */
666 scheduleThread(StgTSO *tso)
668 ACQUIRE_LOCK(&sched_mutex);
670 /* Put the new thread on the head of the runnable queue. The caller
671 * better push an appropriate closure on this thread's stack
672 * beforehand. In the SMP case, the thread may start running as
673 * soon as we release the scheduler lock below.
675 PUSH_ON_RUN_QUEUE(tso);
678 IF_DEBUG(scheduler,printTSO(tso));
679 RELEASE_LOCK(&sched_mutex);
683 /* -----------------------------------------------------------------------------
686 * Start up Posix threads to run each of the scheduler tasks.
687 * I believe the task ids are not needed in the system as defined.
689 * -------------------------------------------------------------------------- */
693 taskStart( void *arg STG_UNUSED )
700 /* -----------------------------------------------------------------------------
703 * Initialise the scheduler. This resets all the queues - if the
704 * queues contained any threads, they'll be garbage collected at the
707 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
708 * -------------------------------------------------------------------------- */
712 term_handler(int sig STG_UNUSED)
715 pthread_t me = pthread_self();
717 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
718 if (task_ids[i].id == me) {
719 task_ids[i].mut_time = usertime() - task_ids[i].gc_time;
720 if (task_ids[i].mut_time < 0.0) {
721 task_ids[i].mut_time = 0.0;
725 ACQUIRE_LOCK(&term_mutex);
727 RELEASE_LOCK(&term_mutex);
732 void initScheduler(void)
734 run_queue_hd = END_TSO_QUEUE;
735 run_queue_tl = END_TSO_QUEUE;
736 blocked_queue_hd = END_TSO_QUEUE;
737 blocked_queue_tl = END_TSO_QUEUE;
739 suspended_ccalling_threads = END_TSO_QUEUE;
746 enteredCAFs = END_CAF_LIST;
748 /* Install the SIGHUP handler */
751 struct sigaction action,oact;
753 action.sa_handler = term_handler;
754 sigemptyset(&action.sa_mask);
756 if (sigaction(SIGTERM, &action, &oact) != 0) {
757 barf("can't install TERM handler");
763 /* Allocate N Capabilities */
766 Capability *cap, *prev;
769 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
770 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
774 free_capabilities = cap;
775 n_free_capabilities = RtsFlags.ConcFlags.nNodes;
777 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
778 n_free_capabilities););
790 /* make some space for saving all the thread ids */
791 task_ids = stgMallocBytes(RtsFlags.ConcFlags.nNodes * sizeof(task_info),
792 "initScheduler:task_ids");
794 /* and create all the threads */
795 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
796 r = pthread_create(&tid,NULL,taskStart,NULL);
798 barf("startTasks: Can't create new Posix thread");
800 task_ids[i].id = tid;
801 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
807 exitScheduler( void )
812 /* Don't want to use pthread_cancel, since we'd have to install
813 * these silly exception handlers (pthread_cleanup_{push,pop}) around
817 /* Cancel all our tasks */
818 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
819 pthread_cancel(task_ids[i].id);
822 /* Wait for all the tasks to terminate */
823 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
824 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
826 pthread_join(task_ids[i].id, NULL);
830 /* Send 'em all a SIGHUP. That should shut 'em up.
832 await_death = RtsFlags.ConcFlags.nNodes;
833 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
834 pthread_kill(task_ids[i].id,SIGTERM);
836 while (await_death > 0) {
842 /* -----------------------------------------------------------------------------
843 Managing the per-task allocation areas.
845 Each capability comes with an allocation area. These are
846 fixed-length block lists into which allocation can be done.
848 ToDo: no support for two-space collection at the moment???
849 -------------------------------------------------------------------------- */
851 /* -----------------------------------------------------------------------------
852 * waitThread is the external interface for running a new computataion
853 * and waiting for the result.
855 * In the non-SMP case, we create a new main thread, push it on the
856 * main-thread stack, and invoke the scheduler to run it. The
857 * scheduler will return when the top main thread on the stack has
858 * completed or died, and fill in the necessary fields of the
859 * main_thread structure.
861 * In the SMP case, we create a main thread as before, but we then
862 * create a new condition variable and sleep on it. When our new
863 * main thread has completed, we'll be woken up and the status/result
864 * will be in the main_thread struct.
865 * -------------------------------------------------------------------------- */
868 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
871 SchedulerStatus stat;
873 ACQUIRE_LOCK(&sched_mutex);
875 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
881 pthread_cond_init(&m->wakeup, NULL);
884 m->link = main_threads;
888 pthread_cond_wait(&m->wakeup, &sched_mutex);
894 ASSERT(stat != NoStatus);
897 pthread_cond_destroy(&m->wakeup);
901 RELEASE_LOCK(&sched_mutex);
907 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
910 StgThreadReturnCode ret;
914 /* Return value is NULL by default, it is only filled in if the
915 * main thread completes successfully.
917 if (ret_val) { *ret_val = NULL; }
919 /* Save away a pointer to the main thread so that we can keep track
920 * of it should a garbage collection happen. We keep a stack of
921 * main threads in order to support scheduler re-entry. We can't
922 * use the normal TSO linkage for this stack, because the main TSO
923 * may need to be linked onto other queues.
925 main_threads[next_main_thread] = main;
926 MainTSO = &main_threads[next_main_thread];
929 fprintf(stderr, "Scheduler entered: nesting = %d\n",
932 /* Are we being re-entered?
934 if (CurrentTSO != NULL) {
935 /* This happens when a _ccall_gc from Haskell ends up re-entering
938 * Block the current thread (put it on the ccalling_queue) and
939 * continue executing. The calling thread better have stashed
940 * away its state properly and left its stack with a proper stack
943 threadPaused(CurrentTSO);
944 CurrentTSO->link = ccalling_threads;
945 ccalling_threads = CurrentTSO;
946 in_ccall_gc = rtsTrue;
948 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
951 in_ccall_gc = rtsFalse;
954 /* Take a thread from the run queue.
958 while (t != END_TSO_QUEUE) {
961 /* If we have more threads on the run queue, set up a context
962 * switch at some point in the future.
964 if (run_queue_hd != END_TSO_QUEUE || blocked_queue_hd != END_TSO_QUEUE) {
969 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
971 /* Be friendly to the storage manager: we're about to *run* this
972 * thread, so we better make sure the TSO is mutable.
974 if (t->mut_link == NULL) {
975 recordMutable((StgMutClosure *)t);
978 /* Run the current thread */
979 switch (t->whatNext) {
982 /* thread already killed. Drop it and carry on. */
985 ret = StgRun((StgFunPtr) stg_enterStackTop);
988 ret = StgRun((StgFunPtr) stg_returnToStackTop);
990 case ThreadEnterHugs:
993 IF_DEBUG(scheduler,belch("entering Hugs"));
995 /* CHECK_SENSIBLE_REGS(); */
997 StgClosure* c = (StgClosure *)Sp[0];
1005 barf("Panic: entered a BCO but no bytecode interpreter in this build");
1008 barf("schedule: invalid whatNext field");
1011 /* We may have garbage collected while running the thread
1012 * (eg. something nefarious like _ccall_GC_ performGC), and hence
1013 * CurrentTSO may have moved. Update t to reflect this.
1018 /* Costs for the scheduler are assigned to CCS_SYSTEM */
1026 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
1028 PUSH_ON_RUN_QUEUE(t);
1029 GarbageCollect(GetRoots);
1033 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
1036 /* enlarge the stack */
1037 StgTSO *new_t = threadStackOverflow(t);
1039 /* This TSO has moved, so update any pointers to it from the
1040 * main thread stack. It better not be on any other queues...
1043 for (i = 0; i < next_main_thread; i++) {
1044 if (main_threads[i] == t) {
1045 main_threads[i] = new_t;
1050 PUSH_ON_RUN_QUEUE(t);
1053 case ThreadYielding:
1055 if (t->whatNext == ThreadEnterHugs) {
1056 /* ToDo: or maybe a timer expired when we were in Hugs?
1057 * or maybe someone hit ctrl-C
1059 belch("Thread %ld stopped to switch to Hugs\n", t->id);
1061 belch("Thread %ld stopped, timer expired\n", t->id);
1066 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
1068 while (run_queue_hd != END_TSO_QUEUE) {
1069 run_queue_hd = t->link;
1072 run_queue_tl = END_TSO_QUEUE;
1073 /* ToDo: should I do the same with blocked queues? */
1077 /* Put the thread back on the run queue, at the end.
1078 * t->link is already set to END_TSO_QUEUE.
1080 APPEND_TO_RUN_QUEUE(t);
1085 fprintf(stderr, "Thread %d stopped, ", t->id);
1086 printThreadBlockage(t);
1087 fprintf(stderr, "\n"));
1089 /* assume the thread has put itself on some blocked queue
1094 case ThreadFinished:
1095 IF_DEBUG(scheduler,fprintf(stderr,"thread %ld finished\n", t->id));
1096 t->whatNext = ThreadComplete;
1100 barf("schedule: invalid thread return code");
1103 /* check for signals each time around the scheduler */
1105 if (signals_pending()) {
1106 start_signal_handlers();
1109 /* If our main thread has finished or been killed, return.
1110 * If we were re-entered as a result of a _ccall_gc, then
1111 * pop the blocked thread off the ccalling_threads stack back
1114 if ((*MainTSO)->whatNext == ThreadComplete
1115 || (*MainTSO)->whatNext == ThreadKilled) {
1118 CurrentTSO = ccalling_threads;
1119 ccalling_threads = ccalling_threads->link;
1120 /* remember to stub the link field of CurrentTSO */
1121 CurrentTSO->link = END_TSO_QUEUE;
1123 if ((*MainTSO)->whatNext == ThreadComplete) {
1124 /* we finished successfully, fill in the return value */
1125 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
1133 /* Checked whether any waiting threads need to be woken up.
1134 * If the run queue is empty, we can wait indefinitely for
1135 * something to happen.
1137 if (blocked_queue_hd != END_TSO_QUEUE) {
1138 awaitEvent(run_queue_hd == END_TSO_QUEUE);
1141 t = POP_RUN_QUEUE();
1144 /* If we got to here, then we ran out of threads to run, but the
1145 * main thread hasn't finished yet. It must be blocked on an MVar
1146 * or a black hole somewhere, so we return deadlock.
1152 /* -----------------------------------------------------------------------------
1153 Debugging: why is a thread blocked
1154 -------------------------------------------------------------------------- */
1157 void printThreadBlockage(StgTSO *tso)
1159 switch (tso->why_blocked) {
1161 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
1163 case BlockedOnWrite:
1164 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
1166 case BlockedOnDelay:
1167 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
1170 fprintf(stderr,"blocked on an MVar");
1172 case BlockedOnBlackHole:
1173 fprintf(stderr,"blocked on a black hole");
1176 fprintf(stderr,"not blocked");
1182 /* -----------------------------------------------------------------------------
1183 Where are the roots that we know about?
1185 - all the threads on the runnable queue
1186 - all the threads on the blocked queue
1187 - all the thread currently executing a _ccall_GC
1188 - all the "main threads"
1190 -------------------------------------------------------------------------- */
1192 /* This has to be protected either by the scheduler monitor, or by the
1193 garbage collection monitor (probably the latter).
1197 static void GetRoots(void)
1201 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1202 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1204 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1205 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1207 for (m = main_threads; m != NULL; m = m->link) {
1208 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1210 suspended_ccalling_threads =
1211 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1214 /* -----------------------------------------------------------------------------
1217 This is the interface to the garbage collector from Haskell land.
1218 We provide this so that external C code can allocate and garbage
1219 collect when called from Haskell via _ccall_GC.
1221 It might be useful to provide an interface whereby the programmer
1222 can specify more roots (ToDo).
1224 This needs to be protected by the GC condition variable above. KH.
1225 -------------------------------------------------------------------------- */
1227 void (*extra_roots)(void);
1232 GarbageCollect(GetRoots);
1238 GetRoots(); /* the scheduler's roots */
1239 extra_roots(); /* the user's roots */
1243 performGCWithRoots(void (*get_roots)(void))
1245 extra_roots = get_roots;
1247 GarbageCollect(AllRoots);
1250 /* -----------------------------------------------------------------------------
1253 If the thread has reached its maximum stack size,
1254 then bomb out. Otherwise relocate the TSO into a larger chunk of
1255 memory and adjust its stack size appropriately.
1256 -------------------------------------------------------------------------- */
1259 threadStackOverflow(StgTSO *tso)
1261 nat new_stack_size, new_tso_size, diff, stack_words;
1265 if (tso->stack_size >= tso->max_stack_size) {
1267 /* If we're debugging, just print out the top of the stack */
1268 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1272 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1275 /* Send this thread the StackOverflow exception */
1276 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1281 /* Try to double the current stack size. If that takes us over the
1282 * maximum stack size for this thread, then use the maximum instead.
1283 * Finally round up so the TSO ends up as a whole number of blocks.
1285 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1286 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1287 TSO_STRUCT_SIZE)/sizeof(W_);
1288 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1289 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1291 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1293 dest = (StgTSO *)allocate(new_tso_size);
1294 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1296 /* copy the TSO block and the old stack into the new area */
1297 memcpy(dest,tso,TSO_STRUCT_SIZE);
1298 stack_words = tso->stack + tso->stack_size - tso->sp;
1299 new_sp = (P_)dest + new_tso_size - stack_words;
1300 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1302 /* relocate the stack pointers... */
1303 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1304 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1306 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1307 dest->stack_size = new_stack_size;
1309 /* and relocate the update frame list */
1310 relocate_TSO(tso, dest);
1312 /* Mark the old one as dead so we don't try to scavenge it during
1313 * garbage collection (the TSO will likely be on a mutables list in
1314 * some generation, but it'll get collected soon enough). It's
1315 * important to set the sp and su values to just beyond the end of
1316 * the stack, so we don't attempt to scavenge any part of the dead
1319 tso->whatNext = ThreadKilled;
1320 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1321 tso->su = (StgUpdateFrame *)tso->sp;
1322 tso->why_blocked = NotBlocked;
1323 dest->mut_link = NULL;
1325 IF_DEBUG(sanity,checkTSO(tso));
1327 IF_DEBUG(scheduler,printTSO(dest));
1331 /* This will no longer work: KH */
1332 if (tso == MainTSO) { /* hack */
1339 /* -----------------------------------------------------------------------------
1340 Wake up a queue that was blocked on some resource.
1341 -------------------------------------------------------------------------- */
1344 unblockOneLocked(StgTSO *tso)
1348 ASSERT(get_itbl(tso)->type == TSO);
1349 ASSERT(tso->why_blocked != NotBlocked);
1350 tso->why_blocked = NotBlocked;
1352 PUSH_ON_RUN_QUEUE(tso);
1355 IF_DEBUG(scheduler,belch("schedule (task %ld): waking up thread %ld",
1356 pthread_self(), tso->id));
1358 IF_DEBUG(scheduler,belch("schedule: waking up thread %ld", tso->id));
1364 unblockOne(StgTSO *tso)
1366 ACQUIRE_LOCK(&sched_mutex);
1367 tso = unblockOneLocked(tso);
1368 RELEASE_LOCK(&sched_mutex);
1373 awakenBlockedQueue(StgTSO *tso)
1375 ACQUIRE_LOCK(&sched_mutex);
1376 while (tso != END_TSO_QUEUE) {
1377 tso = unblockOneLocked(tso);
1379 RELEASE_LOCK(&sched_mutex);
1382 /* -----------------------------------------------------------------------------
1384 - usually called inside a signal handler so it mustn't do anything fancy.
1385 -------------------------------------------------------------------------- */
1388 interruptStgRts(void)
1394 /* -----------------------------------------------------------------------------
1397 This is for use when we raise an exception in another thread, which
1399 -------------------------------------------------------------------------- */
1402 unblockThread(StgTSO *tso)
1406 ACQUIRE_LOCK(&sched_mutex);
1407 switch (tso->why_blocked) {
1410 return; /* not blocked */
1413 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1415 StgTSO *last_tso = END_TSO_QUEUE;
1416 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1419 for (t = mvar->head; t != END_TSO_QUEUE;
1420 last = &t->link, last_tso = t, t = t->link) {
1423 if (mvar->tail == tso) {
1424 mvar->tail = last_tso;
1429 barf("unblockThread (MVAR): TSO not found");
1432 case BlockedOnBlackHole:
1433 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1435 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1437 last = &bq->blocking_queue;
1438 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1439 last = &t->link, t = t->link) {
1445 barf("unblockThread (BLACKHOLE): TSO not found");
1448 case BlockedOnDelay:
1450 case BlockedOnWrite:
1452 last = &blocked_queue_hd;
1453 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1454 last = &t->link, t = t->link) {
1457 if (blocked_queue_tl == t) {
1458 blocked_queue_tl = tso->link;
1463 barf("unblockThread (I/O): TSO not found");
1467 barf("unblockThread");
1471 tso->link = END_TSO_QUEUE;
1472 tso->why_blocked = NotBlocked;
1473 tso->block_info.closure = NULL;
1474 PUSH_ON_RUN_QUEUE(tso);
1475 RELEASE_LOCK(&sched_mutex);
1478 /* -----------------------------------------------------------------------------
1481 * The following function implements the magic for raising an
1482 * asynchronous exception in an existing thread.
1484 * We first remove the thread from any queue on which it might be
1485 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1487 * We strip the stack down to the innermost CATCH_FRAME, building
1488 * thunks in the heap for all the active computations, so they can
1489 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1490 * an application of the handler to the exception, and push it on
1491 * the top of the stack.
1493 * How exactly do we save all the active computations? We create an
1494 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1495 * AP_UPDs pushes everything from the corresponding update frame
1496 * upwards onto the stack. (Actually, it pushes everything up to the
1497 * next update frame plus a pointer to the next AP_UPD object.
1498 * Entering the next AP_UPD object pushes more onto the stack until we
1499 * reach the last AP_UPD object - at which point the stack should look
1500 * exactly as it did when we killed the TSO and we can continue
1501 * execution by entering the closure on top of the stack.
1503 * We can also kill a thread entirely - this happens if either (a) the
1504 * exception passed to raiseAsync is NULL, or (b) there's no
1505 * CATCH_FRAME on the stack. In either case, we strip the entire
1506 * stack and replace the thread with a zombie.
1508 * -------------------------------------------------------------------------- */
1511 deleteThread(StgTSO *tso)
1513 raiseAsync(tso,NULL);
1517 raiseAsync(StgTSO *tso, StgClosure *exception)
1519 StgUpdateFrame* su = tso->su;
1520 StgPtr sp = tso->sp;
1522 /* Thread already dead? */
1523 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1527 IF_DEBUG(scheduler, belch("schedule: Raising exception in thread %ld.", tso->id));
1529 /* Remove it from any blocking queues */
1532 /* The stack freezing code assumes there's a closure pointer on
1533 * the top of the stack. This isn't always the case with compiled
1534 * code, so we have to push a dummy closure on the top which just
1535 * returns to the next return address on the stack.
1537 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1538 *(--sp) = (W_)&dummy_ret_closure;
1542 int words = ((P_)su - (P_)sp) - 1;
1546 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1547 * then build PAP(handler,exception), and leave it on top of
1548 * the stack ready to enter.
1550 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1551 StgCatchFrame *cf = (StgCatchFrame *)su;
1552 /* we've got an exception to raise, so let's pass it to the
1553 * handler in this frame.
1555 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1556 TICK_ALLOC_UPD_PAP(2,0);
1557 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1560 ap->fun = cf->handler;
1561 ap->payload[0] = (P_)exception;
1563 /* sp currently points to the word above the CATCH_FRAME on the
1564 * stack. Replace the CATCH_FRAME with a pointer to the new handler
1567 sp += sizeofW(StgCatchFrame);
1571 tso->whatNext = ThreadEnterGHC;
1575 /* First build an AP_UPD consisting of the stack chunk above the
1576 * current update frame, with the top word on the stack as the
1579 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1584 ap->fun = (StgClosure *)sp[0];
1586 for(i=0; i < (nat)words; ++i) {
1587 ap->payload[i] = (P_)*sp++;
1590 switch (get_itbl(su)->type) {
1594 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1595 TICK_ALLOC_UP_THK(words+1,0);
1598 fprintf(stderr, "schedule: Updating ");
1599 printPtr((P_)su->updatee);
1600 fprintf(stderr, " with ");
1601 printObj((StgClosure *)ap);
1604 /* Replace the updatee with an indirection - happily
1605 * this will also wake up any threads currently
1606 * waiting on the result.
1608 UPD_IND(su->updatee,ap); /* revert the black hole */
1610 sp += sizeofW(StgUpdateFrame) -1;
1611 sp[0] = (W_)ap; /* push onto stack */
1617 StgCatchFrame *cf = (StgCatchFrame *)su;
1620 /* We want a PAP, not an AP_UPD. Fortunately, the
1621 * layout's the same.
1623 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1624 TICK_ALLOC_UPD_PAP(words+1,0);
1626 /* now build o = FUN(catch,ap,handler) */
1627 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1628 TICK_ALLOC_FUN(2,0);
1629 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1630 o->payload[0] = (StgClosure *)ap;
1631 o->payload[1] = cf->handler;
1634 fprintf(stderr, "schedule: Built ");
1635 printObj((StgClosure *)o);
1638 /* pop the old handler and put o on the stack */
1640 sp += sizeofW(StgCatchFrame) - 1;
1647 StgSeqFrame *sf = (StgSeqFrame *)su;
1650 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1651 TICK_ALLOC_UPD_PAP(words+1,0);
1653 /* now build o = FUN(seq,ap) */
1654 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1655 TICK_ALLOC_SE_THK(1,0);
1656 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1657 payloadCPtr(o,0) = (StgClosure *)ap;
1660 fprintf(stderr, "schedule: Built ");
1661 printObj((StgClosure *)o);
1664 /* pop the old handler and put o on the stack */
1666 sp += sizeofW(StgSeqFrame) - 1;
1672 /* We've stripped the entire stack, the thread is now dead. */
1673 sp += sizeofW(StgStopFrame) - 1;
1674 sp[0] = (W_)exception; /* save the exception */
1675 tso->whatNext = ThreadKilled;
1676 tso->su = (StgUpdateFrame *)(sp+1);