1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.29 1999/11/02 17:19:16 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
46 #include "Profiling.h"
52 * These are the threads which clients have requested that we run.
54 * In an SMP build, we might have several concurrent clients all
55 * waiting for results, and each one will wait on a condition variable
56 * until the result is available.
58 * In non-SMP, clients are strictly nested: the first client calls
59 * into the RTS, which might call out again to C with a _ccall_GC, and
60 * eventually re-enter the RTS.
62 * Main threads information is kept in a linked list:
64 typedef struct StgMainThread_ {
69 pthread_cond_t wakeup;
71 struct StgMainThread_ *link;
75 * Locks required: sched_mutex.
77 static StgMainThread *main_threads;
80 * Locks required: sched_mutex.
82 StgTSO *run_queue_hd, *run_queue_tl;
83 StgTSO *blocked_queue_hd, *blocked_queue_tl;
85 /* Threads suspended in _ccall_GC.
86 * Locks required: sched_mutex.
88 static StgTSO *suspended_ccalling_threads;
91 static rtsBool in_ccall_gc;
94 static void GetRoots(void);
95 static StgTSO *threadStackOverflow(StgTSO *tso);
97 /* KH: The following two flags are shared memory locations. There is no need
98 to lock them, since they are only unset at the end of a scheduler
102 /* flag set by signal handler to precipitate a context switch */
104 /* if this flag is set as well, give up execution */
105 static nat interrupted;
107 /* Next thread ID to allocate.
108 * Locks required: sched_mutex
110 StgThreadID next_thread_id = 1;
113 * Pointers to the state of the current thread.
114 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
115 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
118 /* The smallest stack size that makes any sense is:
119 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
120 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
121 * + 1 (the realworld token for an IO thread)
122 * + 1 (the closure to enter)
124 * A thread with this stack will bomb immediately with a stack
125 * overflow, which will increase its stack size.
128 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
130 /* Free capability list.
131 * Locks required: sched_mutex.
134 Capability *free_capabilities; /* Available capabilities for running threads */
135 nat n_free_capabilities; /* total number of available capabilities */
137 Capability MainRegTable; /* for non-SMP, we have one global capability */
142 /* All our current task ids, saved in case we need to kill them later.
148 void addToBlockedQueue ( StgTSO *tso );
150 static void schedule ( void );
151 static void initThread ( StgTSO *tso, nat stack_size );
152 static void interruptStgRts ( void );
155 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
156 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
157 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
158 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
163 /* -----------------------------------------------------------------------------
164 Main scheduling loop.
166 We use round-robin scheduling, each thread returning to the
167 scheduler loop when one of these conditions is detected:
170 * timer expires (thread yields)
175 Locking notes: we acquire the scheduler lock once at the beginning
176 of the scheduler loop, and release it when
178 * running a thread, or
179 * waiting for work, or
180 * waiting for a GC to complete.
182 -------------------------------------------------------------------------- */
189 StgThreadReturnCode ret;
191 ACQUIRE_LOCK(&sched_mutex);
195 /* Check whether any waiting threads need to be woken up.
196 * If the run queue is empty, we can wait indefinitely for
197 * something to happen.
199 if (blocked_queue_hd != END_TSO_QUEUE) {
200 awaitEvent(run_queue_hd == END_TSO_QUEUE);
203 /* check for signals each time around the scheduler */
205 if (signals_pending()) {
206 start_signal_handlers();
211 /* If there's a GC pending, don't do anything until it has
215 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): waiting for GC\n",
217 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
220 /* block until we've got a thread on the run queue and a free
223 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
225 fprintf(stderr, "schedule (task %ld): waiting for work\n",
227 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
229 fprintf(stderr, "schedule (task %ld): work now available\n",
234 /* grab a thread from the run queue
241 cap = free_capabilities;
242 free_capabilities = cap->link;
243 n_free_capabilities--;
248 cap->rCurrentTSO = t;
250 /* set the context_switch flag
252 if (run_queue_hd == END_TSO_QUEUE)
257 RELEASE_LOCK(&sched_mutex);
260 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): running thread %d\n", pthread_self(),t->id));
262 IF_DEBUG(scheduler,fprintf(stderr,"schedule: running thread %d\n",t->id));
265 /* Run the current thread
267 switch (cap->rCurrentTSO->whatNext) {
270 /* Thread already finished, return to scheduler. */
271 ret = ThreadFinished;
274 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
277 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
279 case ThreadEnterHugs:
282 IF_DEBUG(scheduler,belch("schedule: entering Hugs"));
284 /* CHECK_SENSIBLE_REGS(); */
286 StgClosure* c = (StgClosure *)Sp[0];
294 barf("Panic: entered a BCO but no bytecode interpreter in this build");
297 barf("schedule: invalid whatNext field");
300 /* Costs for the scheduler are assigned to CCS_SYSTEM */
305 ACQUIRE_LOCK(&sched_mutex);
308 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
310 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
312 t = cap->rCurrentTSO;
316 /* make all the running tasks block on a condition variable,
317 * maybe set context_switch and wait till they all pile in,
318 * then have them wait on a GC condition variable.
320 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
323 ready_to_gc = rtsTrue;
324 context_switch = 1; /* stop other threads ASAP */
325 PUSH_ON_RUN_QUEUE(t);
329 /* just adjust the stack for this thread, then pop it back
332 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
336 /* enlarge the stack */
337 StgTSO *new_t = threadStackOverflow(t);
339 /* This TSO has moved, so update any pointers to it from the
340 * main thread stack. It better not be on any other queues...
343 for (m = main_threads; m != NULL; m = m->link) {
348 PUSH_ON_RUN_QUEUE(new_t);
353 /* put the thread back on the run queue. Then, if we're ready to
354 * GC, check whether this is the last task to stop. If so, wake
355 * up the GC thread. getThread will block during a GC until the
359 if (t->whatNext == ThreadEnterHugs) {
360 /* ToDo: or maybe a timer expired when we were in Hugs?
361 * or maybe someone hit ctrl-C
363 belch("thread %ld stopped to switch to Hugs", t->id);
365 belch("thread %ld stopped, yielding", t->id);
369 APPEND_TO_RUN_QUEUE(t);
373 /* don't need to do anything. Either the thread is blocked on
374 * I/O, in which case we'll have called addToBlockedQueue
375 * previously, or it's blocked on an MVar or Blackhole, in which
376 * case it'll be on the relevant queue already.
379 fprintf(stderr, "thread %d stopped, ", t->id);
380 printThreadBlockage(t);
381 fprintf(stderr, "\n"));
386 /* Need to check whether this was a main thread, and if so, signal
387 * the task that started it with the return value. If we have no
388 * more main threads, we probably need to stop all the tasks until
391 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
392 t->whatNext = ThreadComplete;
396 barf("doneThread: invalid thread return code");
400 cap->link = free_capabilities;
401 free_capabilities = cap;
402 n_free_capabilities++;
406 if (ready_to_gc && n_free_capabilities == RtsFlags.ConcFlags.nNodes) {
410 /* everybody back, start the GC.
411 * Could do it in this thread, or signal a condition var
412 * to do it in another thread. Either way, we need to
413 * broadcast on gc_pending_cond afterward.
416 IF_DEBUG(scheduler,belch("schedule (task %ld): doing GC", pthread_self()));
418 GarbageCollect(GetRoots);
419 ready_to_gc = rtsFalse;
421 pthread_cond_broadcast(&gc_pending_cond);
425 /* Go through the list of main threads and wake up any
426 * clients whose computations have finished. ToDo: this
427 * should be done more efficiently without a linear scan
428 * of the main threads list, somehow...
432 StgMainThread *m, **prev;
433 prev = &main_threads;
434 for (m = main_threads; m != NULL; m = m->link) {
435 if (m->tso->whatNext == ThreadComplete) {
437 *(m->ret) = (StgClosure *)m->tso->sp[0];
441 pthread_cond_broadcast(&m->wakeup);
443 if (m->tso->whatNext == ThreadKilled) {
446 pthread_cond_broadcast(&m->wakeup);
451 /* If our main thread has finished or been killed, return.
452 * If we were re-entered as a result of a _ccall_gc, then
453 * pop the blocked thread off the ccalling_threads stack back
457 StgMainThread *m = main_threads;
458 if (m->tso->whatNext == ThreadComplete
459 || m->tso->whatNext == ThreadKilled) {
460 main_threads = main_threads->link;
461 if (m->tso->whatNext == ThreadComplete) {
462 /* we finished successfully, fill in the return value */
463 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
474 } /* end of while(1) */
477 /* -----------------------------------------------------------------------------
478 * Suspending & resuming Haskell threads.
480 * When making a "safe" call to C (aka _ccall_GC), the task gives back
481 * its capability before calling the C function. This allows another
482 * task to pick up the capability and carry on running Haskell
483 * threads. It also means that if the C call blocks, it won't lock
486 * The Haskell thread making the C call is put to sleep for the
487 * duration of the call, on the susepended_ccalling_threads queue. We
488 * give out a token to the task, which it can use to resume the thread
489 * on return from the C function.
490 * -------------------------------------------------------------------------- */
493 suspendThread( Capability *cap )
497 ACQUIRE_LOCK(&sched_mutex);
501 fprintf(stderr, "schedule (task %ld): thread %d did a _ccall_gc\n",
502 pthread_self(), cap->rCurrentTSO->id));
505 fprintf(stderr, "schedule: thread %d did a _ccall_gc\n",
506 cap->rCurrentTSO->id));
509 threadPaused(cap->rCurrentTSO);
510 cap->rCurrentTSO->link = suspended_ccalling_threads;
511 suspended_ccalling_threads = cap->rCurrentTSO;
513 /* Use the thread ID as the token; it should be unique */
514 tok = cap->rCurrentTSO->id;
517 cap->link = free_capabilities;
518 free_capabilities = cap;
519 n_free_capabilities++;
522 RELEASE_LOCK(&sched_mutex);
527 resumeThread( StgInt tok )
532 ACQUIRE_LOCK(&sched_mutex);
534 prev = &suspended_ccalling_threads;
535 for (tso = suspended_ccalling_threads;
536 tso != END_TSO_QUEUE;
537 prev = &tso->link, tso = tso->link) {
538 if (tso->id == (StgThreadID)tok) {
543 if (tso == END_TSO_QUEUE) {
544 barf("resumeThread: thread not found");
548 while (free_capabilities == NULL) {
550 fprintf(stderr,"schedule (task %ld): waiting to resume\n",
552 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
553 IF_DEBUG(scheduler,fprintf(stderr,
554 "schedule (task %ld): resuming thread %d\n",
555 pthread_self(), tso->id));
557 cap = free_capabilities;
558 free_capabilities = cap->link;
559 n_free_capabilities--;
564 cap->rCurrentTSO = tso;
566 RELEASE_LOCK(&sched_mutex);
570 /* -----------------------------------------------------------------------------
572 * -------------------------------------------------------------------------- */
573 static void unblockThread(StgTSO *tso);
575 /* -----------------------------------------------------------------------------
576 * Comparing Thread ids.
578 * This is used from STG land in the implementation of the
579 * instances of Eq/Ord for ThreadIds.
580 * -------------------------------------------------------------------------- */
582 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
584 StgThreadID id1 = tso1->id;
585 StgThreadID id2 = tso2->id;
587 if (id1 < id2) return (-1);
588 if (id1 > id2) return 1;
592 /* -----------------------------------------------------------------------------
595 The new thread starts with the given stack size. Before the
596 scheduler can run, however, this thread needs to have a closure
597 (and possibly some arguments) pushed on its stack. See
598 pushClosure() in Schedule.h.
600 createGenThread() and createIOThread() (in SchedAPI.h) are
601 convenient packaged versions of this function.
602 -------------------------------------------------------------------------- */
605 createThread(nat stack_size)
609 /* catch ridiculously small stack sizes */
610 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
611 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
614 tso = (StgTSO *)allocate(stack_size);
615 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
617 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
622 initThread(StgTSO *tso, nat stack_size)
624 SET_INFO(tso,&TSO_info);
625 tso->whatNext = ThreadEnterGHC;
627 /* tso->id needs to be unique. For now we use a heavyweight mutex to
628 protect the increment operation on next_thread_id.
629 In future, we could use an atomic increment instead.
632 ACQUIRE_LOCK(&sched_mutex);
633 tso->id = next_thread_id++;
634 RELEASE_LOCK(&sched_mutex);
636 tso->why_blocked = NotBlocked;
638 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
639 tso->stack_size = stack_size;
640 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
642 tso->sp = (P_)&(tso->stack) + stack_size;
645 tso->prof.CCCS = CCS_MAIN;
648 /* put a stop frame on the stack */
649 tso->sp -= sizeofW(StgStopFrame);
650 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
651 tso->su = (StgUpdateFrame*)tso->sp;
653 IF_DEBUG(scheduler,belch("schedule: Initialised thread %ld, stack size = %lx words",
654 tso->id, tso->stack_size));
659 /* -----------------------------------------------------------------------------
662 * scheduleThread puts a thread on the head of the runnable queue.
663 * This will usually be done immediately after a thread is created.
664 * The caller of scheduleThread must create the thread using e.g.
665 * createThread and push an appropriate closure
666 * on this thread's stack before the scheduler is invoked.
667 * -------------------------------------------------------------------------- */
670 scheduleThread(StgTSO *tso)
672 ACQUIRE_LOCK(&sched_mutex);
674 /* Put the new thread on the head of the runnable queue. The caller
675 * better push an appropriate closure on this thread's stack
676 * beforehand. In the SMP case, the thread may start running as
677 * soon as we release the scheduler lock below.
679 PUSH_ON_RUN_QUEUE(tso);
682 IF_DEBUG(scheduler,printTSO(tso));
683 RELEASE_LOCK(&sched_mutex);
687 /* -----------------------------------------------------------------------------
690 * Start up Posix threads to run each of the scheduler tasks.
691 * I believe the task ids are not needed in the system as defined.
693 * -------------------------------------------------------------------------- */
697 taskStart( void *arg STG_UNUSED )
704 /* -----------------------------------------------------------------------------
707 * Initialise the scheduler. This resets all the queues - if the
708 * queues contained any threads, they'll be garbage collected at the
711 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
712 * -------------------------------------------------------------------------- */
716 term_handler(int sig STG_UNUSED)
719 pthread_t me = pthread_self();
721 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
722 if (task_ids[i].id == me) {
723 task_ids[i].mut_time = usertime() - task_ids[i].gc_time;
724 if (task_ids[i].mut_time < 0.0) {
725 task_ids[i].mut_time = 0.0;
729 ACQUIRE_LOCK(&term_mutex);
731 RELEASE_LOCK(&term_mutex);
736 void initScheduler(void)
738 run_queue_hd = END_TSO_QUEUE;
739 run_queue_tl = END_TSO_QUEUE;
740 blocked_queue_hd = END_TSO_QUEUE;
741 blocked_queue_tl = END_TSO_QUEUE;
743 suspended_ccalling_threads = END_TSO_QUEUE;
750 enteredCAFs = END_CAF_LIST;
752 /* Install the SIGHUP handler */
755 struct sigaction action,oact;
757 action.sa_handler = term_handler;
758 sigemptyset(&action.sa_mask);
760 if (sigaction(SIGTERM, &action, &oact) != 0) {
761 barf("can't install TERM handler");
767 /* Allocate N Capabilities */
770 Capability *cap, *prev;
773 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
774 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
778 free_capabilities = cap;
779 n_free_capabilities = RtsFlags.ConcFlags.nNodes;
781 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
782 n_free_capabilities););
794 /* make some space for saving all the thread ids */
795 task_ids = stgMallocBytes(RtsFlags.ConcFlags.nNodes * sizeof(task_info),
796 "initScheduler:task_ids");
798 /* and create all the threads */
799 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
800 r = pthread_create(&tid,NULL,taskStart,NULL);
802 barf("startTasks: Can't create new Posix thread");
804 task_ids[i].id = tid;
805 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
811 exitScheduler( void )
816 /* Don't want to use pthread_cancel, since we'd have to install
817 * these silly exception handlers (pthread_cleanup_{push,pop}) around
821 /* Cancel all our tasks */
822 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
823 pthread_cancel(task_ids[i].id);
826 /* Wait for all the tasks to terminate */
827 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
828 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
830 pthread_join(task_ids[i].id, NULL);
834 /* Send 'em all a SIGHUP. That should shut 'em up.
836 await_death = RtsFlags.ConcFlags.nNodes;
837 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
838 pthread_kill(task_ids[i].id,SIGTERM);
840 while (await_death > 0) {
846 /* -----------------------------------------------------------------------------
847 Managing the per-task allocation areas.
849 Each capability comes with an allocation area. These are
850 fixed-length block lists into which allocation can be done.
852 ToDo: no support for two-space collection at the moment???
853 -------------------------------------------------------------------------- */
855 /* -----------------------------------------------------------------------------
856 * waitThread is the external interface for running a new computataion
857 * and waiting for the result.
859 * In the non-SMP case, we create a new main thread, push it on the
860 * main-thread stack, and invoke the scheduler to run it. The
861 * scheduler will return when the top main thread on the stack has
862 * completed or died, and fill in the necessary fields of the
863 * main_thread structure.
865 * In the SMP case, we create a main thread as before, but we then
866 * create a new condition variable and sleep on it. When our new
867 * main thread has completed, we'll be woken up and the status/result
868 * will be in the main_thread struct.
869 * -------------------------------------------------------------------------- */
872 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
875 SchedulerStatus stat;
877 ACQUIRE_LOCK(&sched_mutex);
879 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
885 pthread_cond_init(&m->wakeup, NULL);
888 m->link = main_threads;
892 pthread_cond_wait(&m->wakeup, &sched_mutex);
898 ASSERT(stat != NoStatus);
901 pthread_cond_destroy(&m->wakeup);
905 RELEASE_LOCK(&sched_mutex);
911 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
914 StgThreadReturnCode ret;
918 /* Return value is NULL by default, it is only filled in if the
919 * main thread completes successfully.
921 if (ret_val) { *ret_val = NULL; }
923 /* Save away a pointer to the main thread so that we can keep track
924 * of it should a garbage collection happen. We keep a stack of
925 * main threads in order to support scheduler re-entry. We can't
926 * use the normal TSO linkage for this stack, because the main TSO
927 * may need to be linked onto other queues.
929 main_threads[next_main_thread] = main;
930 MainTSO = &main_threads[next_main_thread];
933 fprintf(stderr, "Scheduler entered: nesting = %d\n",
936 /* Are we being re-entered?
938 if (CurrentTSO != NULL) {
939 /* This happens when a _ccall_gc from Haskell ends up re-entering
942 * Block the current thread (put it on the ccalling_queue) and
943 * continue executing. The calling thread better have stashed
944 * away its state properly and left its stack with a proper stack
947 threadPaused(CurrentTSO);
948 CurrentTSO->link = ccalling_threads;
949 ccalling_threads = CurrentTSO;
950 in_ccall_gc = rtsTrue;
952 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
955 in_ccall_gc = rtsFalse;
958 /* Take a thread from the run queue.
962 while (t != END_TSO_QUEUE) {
965 /* If we have more threads on the run queue, set up a context
966 * switch at some point in the future.
968 if (run_queue_hd != END_TSO_QUEUE || blocked_queue_hd != END_TSO_QUEUE) {
973 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
975 /* Be friendly to the storage manager: we're about to *run* this
976 * thread, so we better make sure the TSO is mutable.
978 if (t->mut_link == NULL) {
979 recordMutable((StgMutClosure *)t);
982 /* Run the current thread */
983 switch (t->whatNext) {
986 /* thread already killed. Drop it and carry on. */
989 ret = StgRun((StgFunPtr) stg_enterStackTop);
992 ret = StgRun((StgFunPtr) stg_returnToStackTop);
994 case ThreadEnterHugs:
997 IF_DEBUG(scheduler,belch("entering Hugs"));
999 /* CHECK_SENSIBLE_REGS(); */
1001 StgClosure* c = (StgClosure *)Sp[0];
1009 barf("Panic: entered a BCO but no bytecode interpreter in this build");
1012 barf("schedule: invalid whatNext field");
1015 /* We may have garbage collected while running the thread
1016 * (eg. something nefarious like _ccall_GC_ performGC), and hence
1017 * CurrentTSO may have moved. Update t to reflect this.
1022 /* Costs for the scheduler are assigned to CCS_SYSTEM */
1030 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
1032 PUSH_ON_RUN_QUEUE(t);
1033 GarbageCollect(GetRoots);
1037 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
1040 /* enlarge the stack */
1041 StgTSO *new_t = threadStackOverflow(t);
1043 /* This TSO has moved, so update any pointers to it from the
1044 * main thread stack. It better not be on any other queues...
1047 for (i = 0; i < next_main_thread; i++) {
1048 if (main_threads[i] == t) {
1049 main_threads[i] = new_t;
1054 PUSH_ON_RUN_QUEUE(t);
1057 case ThreadYielding:
1059 if (t->whatNext == ThreadEnterHugs) {
1060 /* ToDo: or maybe a timer expired when we were in Hugs?
1061 * or maybe someone hit ctrl-C
1063 belch("Thread %ld stopped to switch to Hugs\n", t->id);
1065 belch("Thread %ld stopped, timer expired\n", t->id);
1070 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
1072 while (run_queue_hd != END_TSO_QUEUE) {
1073 run_queue_hd = t->link;
1076 run_queue_tl = END_TSO_QUEUE;
1077 /* ToDo: should I do the same with blocked queues? */
1081 /* Put the thread back on the run queue, at the end.
1082 * t->link is already set to END_TSO_QUEUE.
1084 APPEND_TO_RUN_QUEUE(t);
1089 fprintf(stderr, "Thread %d stopped, ", t->id);
1090 printThreadBlockage(t);
1091 fprintf(stderr, "\n"));
1093 /* assume the thread has put itself on some blocked queue
1098 case ThreadFinished:
1099 IF_DEBUG(scheduler,fprintf(stderr,"thread %ld finished\n", t->id));
1100 t->whatNext = ThreadComplete;
1104 barf("schedule: invalid thread return code");
1107 /* check for signals each time around the scheduler */
1109 if (signals_pending()) {
1110 start_signal_handlers();
1113 /* If our main thread has finished or been killed, return.
1114 * If we were re-entered as a result of a _ccall_gc, then
1115 * pop the blocked thread off the ccalling_threads stack back
1118 if ((*MainTSO)->whatNext == ThreadComplete
1119 || (*MainTSO)->whatNext == ThreadKilled) {
1122 CurrentTSO = ccalling_threads;
1123 ccalling_threads = ccalling_threads->link;
1124 /* remember to stub the link field of CurrentTSO */
1125 CurrentTSO->link = END_TSO_QUEUE;
1127 if ((*MainTSO)->whatNext == ThreadComplete) {
1128 /* we finished successfully, fill in the return value */
1129 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
1137 /* Checked whether any waiting threads need to be woken up.
1138 * If the run queue is empty, we can wait indefinitely for
1139 * something to happen.
1141 if (blocked_queue_hd != END_TSO_QUEUE) {
1142 awaitEvent(run_queue_hd == END_TSO_QUEUE);
1145 t = POP_RUN_QUEUE();
1148 /* If we got to here, then we ran out of threads to run, but the
1149 * main thread hasn't finished yet. It must be blocked on an MVar
1150 * or a black hole somewhere, so we return deadlock.
1156 /* -----------------------------------------------------------------------------
1157 Debugging: why is a thread blocked
1158 -------------------------------------------------------------------------- */
1161 void printThreadBlockage(StgTSO *tso)
1163 switch (tso->why_blocked) {
1165 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
1167 case BlockedOnWrite:
1168 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
1170 case BlockedOnDelay:
1171 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
1174 fprintf(stderr,"blocked on an MVar");
1176 case BlockedOnBlackHole:
1177 fprintf(stderr,"blocked on a black hole");
1180 fprintf(stderr,"not blocked");
1186 /* -----------------------------------------------------------------------------
1187 Where are the roots that we know about?
1189 - all the threads on the runnable queue
1190 - all the threads on the blocked queue
1191 - all the thread currently executing a _ccall_GC
1192 - all the "main threads"
1194 -------------------------------------------------------------------------- */
1196 /* This has to be protected either by the scheduler monitor, or by the
1197 garbage collection monitor (probably the latter).
1201 static void GetRoots(void)
1205 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1206 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1208 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1209 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1211 for (m = main_threads; m != NULL; m = m->link) {
1212 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1214 suspended_ccalling_threads =
1215 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1218 /* -----------------------------------------------------------------------------
1221 This is the interface to the garbage collector from Haskell land.
1222 We provide this so that external C code can allocate and garbage
1223 collect when called from Haskell via _ccall_GC.
1225 It might be useful to provide an interface whereby the programmer
1226 can specify more roots (ToDo).
1228 This needs to be protected by the GC condition variable above. KH.
1229 -------------------------------------------------------------------------- */
1231 void (*extra_roots)(void);
1236 GarbageCollect(GetRoots);
1242 GetRoots(); /* the scheduler's roots */
1243 extra_roots(); /* the user's roots */
1247 performGCWithRoots(void (*get_roots)(void))
1249 extra_roots = get_roots;
1251 GarbageCollect(AllRoots);
1254 /* -----------------------------------------------------------------------------
1257 If the thread has reached its maximum stack size,
1258 then bomb out. Otherwise relocate the TSO into a larger chunk of
1259 memory and adjust its stack size appropriately.
1260 -------------------------------------------------------------------------- */
1263 threadStackOverflow(StgTSO *tso)
1265 nat new_stack_size, new_tso_size, diff, stack_words;
1269 if (tso->stack_size >= tso->max_stack_size) {
1271 /* If we're debugging, just print out the top of the stack */
1272 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1276 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1279 /* Send this thread the StackOverflow exception */
1280 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1285 /* Try to double the current stack size. If that takes us over the
1286 * maximum stack size for this thread, then use the maximum instead.
1287 * Finally round up so the TSO ends up as a whole number of blocks.
1289 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1290 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1291 TSO_STRUCT_SIZE)/sizeof(W_);
1292 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1293 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1295 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1297 dest = (StgTSO *)allocate(new_tso_size);
1298 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1300 /* copy the TSO block and the old stack into the new area */
1301 memcpy(dest,tso,TSO_STRUCT_SIZE);
1302 stack_words = tso->stack + tso->stack_size - tso->sp;
1303 new_sp = (P_)dest + new_tso_size - stack_words;
1304 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1306 /* relocate the stack pointers... */
1307 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1308 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1310 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1311 dest->stack_size = new_stack_size;
1313 /* and relocate the update frame list */
1314 relocate_TSO(tso, dest);
1316 /* Mark the old one as dead so we don't try to scavenge it during
1317 * garbage collection (the TSO will likely be on a mutables list in
1318 * some generation, but it'll get collected soon enough). It's
1319 * important to set the sp and su values to just beyond the end of
1320 * the stack, so we don't attempt to scavenge any part of the dead
1323 tso->whatNext = ThreadKilled;
1324 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1325 tso->su = (StgUpdateFrame *)tso->sp;
1326 tso->why_blocked = NotBlocked;
1327 dest->mut_link = NULL;
1329 IF_DEBUG(sanity,checkTSO(tso));
1331 IF_DEBUG(scheduler,printTSO(dest));
1335 /* This will no longer work: KH */
1336 if (tso == MainTSO) { /* hack */
1343 /* -----------------------------------------------------------------------------
1344 Wake up a queue that was blocked on some resource.
1345 -------------------------------------------------------------------------- */
1348 unblockOneLocked(StgTSO *tso)
1352 ASSERT(get_itbl(tso)->type == TSO);
1353 ASSERT(tso->why_blocked != NotBlocked);
1354 tso->why_blocked = NotBlocked;
1356 PUSH_ON_RUN_QUEUE(tso);
1359 IF_DEBUG(scheduler,belch("schedule (task %ld): waking up thread %ld",
1360 pthread_self(), tso->id));
1362 IF_DEBUG(scheduler,belch("schedule: waking up thread %ld", tso->id));
1368 unblockOne(StgTSO *tso)
1370 ACQUIRE_LOCK(&sched_mutex);
1371 tso = unblockOneLocked(tso);
1372 RELEASE_LOCK(&sched_mutex);
1377 awakenBlockedQueue(StgTSO *tso)
1379 ACQUIRE_LOCK(&sched_mutex);
1380 while (tso != END_TSO_QUEUE) {
1381 tso = unblockOneLocked(tso);
1383 RELEASE_LOCK(&sched_mutex);
1386 /* -----------------------------------------------------------------------------
1388 - usually called inside a signal handler so it mustn't do anything fancy.
1389 -------------------------------------------------------------------------- */
1392 interruptStgRts(void)
1398 /* -----------------------------------------------------------------------------
1401 This is for use when we raise an exception in another thread, which
1403 -------------------------------------------------------------------------- */
1406 unblockThread(StgTSO *tso)
1410 ACQUIRE_LOCK(&sched_mutex);
1411 switch (tso->why_blocked) {
1414 return; /* not blocked */
1417 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1419 StgTSO *last_tso = END_TSO_QUEUE;
1420 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1423 for (t = mvar->head; t != END_TSO_QUEUE;
1424 last = &t->link, last_tso = t, t = t->link) {
1427 if (mvar->tail == tso) {
1428 mvar->tail = last_tso;
1433 barf("unblockThread (MVAR): TSO not found");
1436 case BlockedOnBlackHole:
1437 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1439 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1441 last = &bq->blocking_queue;
1442 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1443 last = &t->link, t = t->link) {
1449 barf("unblockThread (BLACKHOLE): TSO not found");
1452 case BlockedOnDelay:
1454 case BlockedOnWrite:
1456 last = &blocked_queue_hd;
1457 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1458 last = &t->link, t = t->link) {
1461 if (blocked_queue_tl == t) {
1462 blocked_queue_tl = tso->link;
1467 barf("unblockThread (I/O): TSO not found");
1471 barf("unblockThread");
1475 tso->link = END_TSO_QUEUE;
1476 tso->why_blocked = NotBlocked;
1477 tso->block_info.closure = NULL;
1478 PUSH_ON_RUN_QUEUE(tso);
1479 RELEASE_LOCK(&sched_mutex);
1482 /* -----------------------------------------------------------------------------
1485 * The following function implements the magic for raising an
1486 * asynchronous exception in an existing thread.
1488 * We first remove the thread from any queue on which it might be
1489 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1491 * We strip the stack down to the innermost CATCH_FRAME, building
1492 * thunks in the heap for all the active computations, so they can
1493 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1494 * an application of the handler to the exception, and push it on
1495 * the top of the stack.
1497 * How exactly do we save all the active computations? We create an
1498 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1499 * AP_UPDs pushes everything from the corresponding update frame
1500 * upwards onto the stack. (Actually, it pushes everything up to the
1501 * next update frame plus a pointer to the next AP_UPD object.
1502 * Entering the next AP_UPD object pushes more onto the stack until we
1503 * reach the last AP_UPD object - at which point the stack should look
1504 * exactly as it did when we killed the TSO and we can continue
1505 * execution by entering the closure on top of the stack.
1507 * We can also kill a thread entirely - this happens if either (a) the
1508 * exception passed to raiseAsync is NULL, or (b) there's no
1509 * CATCH_FRAME on the stack. In either case, we strip the entire
1510 * stack and replace the thread with a zombie.
1512 * -------------------------------------------------------------------------- */
1515 deleteThread(StgTSO *tso)
1517 raiseAsync(tso,NULL);
1521 raiseAsync(StgTSO *tso, StgClosure *exception)
1523 StgUpdateFrame* su = tso->su;
1524 StgPtr sp = tso->sp;
1526 /* Thread already dead? */
1527 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1531 IF_DEBUG(scheduler, belch("schedule: Raising exception in thread %ld.", tso->id));
1533 /* Remove it from any blocking queues */
1536 /* The stack freezing code assumes there's a closure pointer on
1537 * the top of the stack. This isn't always the case with compiled
1538 * code, so we have to push a dummy closure on the top which just
1539 * returns to the next return address on the stack.
1541 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1542 *(--sp) = (W_)&dummy_ret_closure;
1546 int words = ((P_)su - (P_)sp) - 1;
1550 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1551 * then build PAP(handler,exception), and leave it on top of
1552 * the stack ready to enter.
1554 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1555 StgCatchFrame *cf = (StgCatchFrame *)su;
1556 /* we've got an exception to raise, so let's pass it to the
1557 * handler in this frame.
1559 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1560 TICK_ALLOC_UPD_PAP(2,0);
1561 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1564 ap->fun = cf->handler;
1565 ap->payload[0] = (P_)exception;
1567 /* sp currently points to the word above the CATCH_FRAME on the
1568 * stack. Replace the CATCH_FRAME with a pointer to the new handler
1571 sp += sizeofW(StgCatchFrame);
1575 tso->whatNext = ThreadEnterGHC;
1579 /* First build an AP_UPD consisting of the stack chunk above the
1580 * current update frame, with the top word on the stack as the
1583 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1588 ap->fun = (StgClosure *)sp[0];
1590 for(i=0; i < (nat)words; ++i) {
1591 ap->payload[i] = (P_)*sp++;
1594 switch (get_itbl(su)->type) {
1598 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1599 TICK_ALLOC_UP_THK(words+1,0);
1602 fprintf(stderr, "schedule: Updating ");
1603 printPtr((P_)su->updatee);
1604 fprintf(stderr, " with ");
1605 printObj((StgClosure *)ap);
1608 /* Replace the updatee with an indirection - happily
1609 * this will also wake up any threads currently
1610 * waiting on the result.
1612 UPD_IND(su->updatee,ap); /* revert the black hole */
1614 sp += sizeofW(StgUpdateFrame) -1;
1615 sp[0] = (W_)ap; /* push onto stack */
1621 StgCatchFrame *cf = (StgCatchFrame *)su;
1624 /* We want a PAP, not an AP_UPD. Fortunately, the
1625 * layout's the same.
1627 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1628 TICK_ALLOC_UPD_PAP(words+1,0);
1630 /* now build o = FUN(catch,ap,handler) */
1631 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1632 TICK_ALLOC_FUN(2,0);
1633 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1634 o->payload[0] = (StgClosure *)ap;
1635 o->payload[1] = cf->handler;
1638 fprintf(stderr, "schedule: Built ");
1639 printObj((StgClosure *)o);
1642 /* pop the old handler and put o on the stack */
1644 sp += sizeofW(StgCatchFrame) - 1;
1651 StgSeqFrame *sf = (StgSeqFrame *)su;
1654 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1655 TICK_ALLOC_UPD_PAP(words+1,0);
1657 /* now build o = FUN(seq,ap) */
1658 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1659 TICK_ALLOC_SE_THK(1,0);
1660 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1661 payloadCPtr(o,0) = (StgClosure *)ap;
1664 fprintf(stderr, "schedule: Built ");
1665 printObj((StgClosure *)o);
1668 /* pop the old handler and put o on the stack */
1670 sp += sizeofW(StgSeqFrame) - 1;
1676 /* We've stripped the entire stack, the thread is now dead. */
1677 sp += sizeofW(StgStopFrame) - 1;
1678 sp[0] = (W_)exception; /* save the exception */
1679 tso->whatNext = ThreadKilled;
1680 tso->su = (StgUpdateFrame *)(sp+1);