1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.28 1999/11/02 15:06:01 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
46 #include "Profiling.h"
52 * These are the threads which clients have requested that we run.
54 * In an SMP build, we might have several concurrent clients all
55 * waiting for results, and each one will wait on a condition variable
56 * until the result is available.
58 * In non-SMP, clients are strictly nested: the first client calls
59 * into the RTS, which might call out again to C with a _ccall_GC, and
60 * eventually re-enter the RTS.
62 * Main threads information is kept in a linked list:
64 typedef struct StgMainThread_ {
69 pthread_cond_t wakeup;
71 struct StgMainThread_ *link;
75 * Locks required: sched_mutex.
77 static StgMainThread *main_threads;
80 * Locks required: sched_mutex.
82 StgTSO *run_queue_hd, *run_queue_tl;
83 StgTSO *blocked_queue_hd, *blocked_queue_tl;
85 /* Threads suspended in _ccall_GC.
86 * Locks required: sched_mutex.
88 static StgTSO *suspended_ccalling_threads;
91 static rtsBool in_ccall_gc;
94 static void GetRoots(void);
95 static StgTSO *threadStackOverflow(StgTSO *tso);
97 /* KH: The following two flags are shared memory locations. There is no need
98 to lock them, since they are only unset at the end of a scheduler
102 /* flag set by signal handler to precipitate a context switch */
104 /* if this flag is set as well, give up execution */
105 static nat interrupted;
107 /* Next thread ID to allocate.
108 * Locks required: sched_mutex
110 StgThreadID next_thread_id = 1;
113 * Pointers to the state of the current thread.
114 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
115 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
118 /* The smallest stack size that makes any sense is:
119 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
120 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
121 * + 1 (the realworld token for an IO thread)
122 * + 1 (the closure to enter)
124 * A thread with this stack will bomb immediately with a stack
125 * overflow, which will increase its stack size.
128 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
130 /* Free capability list.
131 * Locks required: sched_mutex.
134 Capability *free_capabilities; /* Available capabilities for running threads */
135 nat n_free_capabilities; /* total number of available capabilities */
137 Capability MainRegTable; /* for non-SMP, we have one global capability */
142 /* All our current task ids, saved in case we need to kill them later.
148 void addToBlockedQueue ( StgTSO *tso );
150 static void schedule ( void );
151 static void initThread ( StgTSO *tso, nat stack_size );
152 static void interruptStgRts ( void );
155 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
156 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
157 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
158 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
163 /* -----------------------------------------------------------------------------
164 Main scheduling loop.
166 We use round-robin scheduling, each thread returning to the
167 scheduler loop when one of these conditions is detected:
170 * timer expires (thread yields)
175 Locking notes: we acquire the scheduler lock once at the beginning
176 of the scheduler loop, and release it when
178 * running a thread, or
179 * waiting for work, or
180 * waiting for a GC to complete.
182 -------------------------------------------------------------------------- */
189 StgThreadReturnCode ret;
191 ACQUIRE_LOCK(&sched_mutex);
195 /* Check whether any waiting threads need to be woken up.
196 * If the run queue is empty, we can wait indefinitely for
197 * something to happen.
199 if (blocked_queue_hd != END_TSO_QUEUE) {
200 awaitEvent(run_queue_hd == END_TSO_QUEUE);
203 /* check for signals each time around the scheduler */
205 if (signals_pending()) {
206 start_signal_handlers();
211 /* If there's a GC pending, don't do anything until it has
215 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): waiting for GC\n",
217 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
220 /* block until we've got a thread on the run queue and a free
223 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
225 fprintf(stderr, "schedule (task %ld): waiting for work\n",
227 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
229 fprintf(stderr, "schedule (task %ld): work now available\n",
234 /* grab a thread from the run queue
241 cap = free_capabilities;
242 free_capabilities = cap->link;
243 n_free_capabilities--;
248 cap->rCurrentTSO = t;
250 /* set the context_switch flag
252 if (run_queue_hd == END_TSO_QUEUE)
257 RELEASE_LOCK(&sched_mutex);
259 /* Run the current thread
261 switch (cap->rCurrentTSO->whatNext) {
264 /* Thread already finished, return to scheduler. */
265 ret = ThreadFinished;
268 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
271 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
273 case ThreadEnterHugs:
276 IF_DEBUG(scheduler,belch("schedule: entering Hugs"));
278 /* CHECK_SENSIBLE_REGS(); */
280 StgClosure* c = (StgClosure *)Sp[0];
288 barf("Panic: entered a BCO but no bytecode interpreter in this build");
291 barf("schedule: invalid whatNext field");
294 /* Costs for the scheduler are assigned to CCS_SYSTEM */
299 ACQUIRE_LOCK(&sched_mutex);
302 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
304 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
306 t = cap->rCurrentTSO;
310 /* make all the running tasks block on a condition variable,
311 * maybe set context_switch and wait till they all pile in,
312 * then have them wait on a GC condition variable.
314 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
317 ready_to_gc = rtsTrue;
318 context_switch = 1; /* stop other threads ASAP */
319 PUSH_ON_RUN_QUEUE(t);
323 /* just adjust the stack for this thread, then pop it back
326 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
330 /* enlarge the stack */
331 StgTSO *new_t = threadStackOverflow(t);
333 /* This TSO has moved, so update any pointers to it from the
334 * main thread stack. It better not be on any other queues...
337 for (m = main_threads; m != NULL; m = m->link) {
342 PUSH_ON_RUN_QUEUE(new_t);
347 /* put the thread back on the run queue. Then, if we're ready to
348 * GC, check whether this is the last task to stop. If so, wake
349 * up the GC thread. getThread will block during a GC until the
353 if (t->whatNext == ThreadEnterHugs) {
354 /* ToDo: or maybe a timer expired when we were in Hugs?
355 * or maybe someone hit ctrl-C
357 belch("thread %ld stopped to switch to Hugs", t->id);
359 belch("thread %ld stopped, yielding", t->id);
363 APPEND_TO_RUN_QUEUE(t);
367 /* don't need to do anything. Either the thread is blocked on
368 * I/O, in which case we'll have called addToBlockedQueue
369 * previously, or it's blocked on an MVar or Blackhole, in which
370 * case it'll be on the relevant queue already.
373 fprintf(stderr, "thread %d stopped, ", t->id);
374 printThreadBlockage(t);
375 fprintf(stderr, "\n"));
380 /* Need to check whether this was a main thread, and if so, signal
381 * the task that started it with the return value. If we have no
382 * more main threads, we probably need to stop all the tasks until
385 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
386 t->whatNext = ThreadComplete;
390 barf("doneThread: invalid thread return code");
394 cap->link = free_capabilities;
395 free_capabilities = cap;
396 n_free_capabilities++;
400 if (ready_to_gc && n_free_capabilities == RtsFlags.ConcFlags.nNodes) {
404 /* everybody back, start the GC.
405 * Could do it in this thread, or signal a condition var
406 * to do it in another thread. Either way, we need to
407 * broadcast on gc_pending_cond afterward.
410 IF_DEBUG(scheduler,belch("schedule (task %ld): doing GC", pthread_self()));
412 GarbageCollect(GetRoots);
413 ready_to_gc = rtsFalse;
415 pthread_cond_broadcast(&gc_pending_cond);
419 /* Go through the list of main threads and wake up any
420 * clients whose computations have finished. ToDo: this
421 * should be done more efficiently without a linear scan
422 * of the main threads list, somehow...
426 StgMainThread *m, **prev;
427 prev = &main_threads;
428 for (m = main_threads; m != NULL; m = m->link) {
429 if (m->tso->whatNext == ThreadComplete) {
431 *(m->ret) = (StgClosure *)m->tso->sp[0];
435 pthread_cond_broadcast(&m->wakeup);
437 if (m->tso->whatNext == ThreadKilled) {
440 pthread_cond_broadcast(&m->wakeup);
445 /* If our main thread has finished or been killed, return.
446 * If we were re-entered as a result of a _ccall_gc, then
447 * pop the blocked thread off the ccalling_threads stack back
451 StgMainThread *m = main_threads;
452 if (m->tso->whatNext == ThreadComplete
453 || m->tso->whatNext == ThreadKilled) {
454 main_threads = main_threads->link;
455 if (m->tso->whatNext == ThreadComplete) {
456 /* we finished successfully, fill in the return value */
457 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
468 } /* end of while(1) */
471 /* -----------------------------------------------------------------------------
472 * Suspending & resuming Haskell threads.
474 * When making a "safe" call to C (aka _ccall_GC), the task gives back
475 * its capability before calling the C function. This allows another
476 * task to pick up the capability and carry on running Haskell
477 * threads. It also means that if the C call blocks, it won't lock
480 * The Haskell thread making the C call is put to sleep for the
481 * duration of the call, on the susepended_ccalling_threads queue. We
482 * give out a token to the task, which it can use to resume the thread
483 * on return from the C function.
484 * -------------------------------------------------------------------------- */
487 suspendThread( Capability *cap )
491 ACQUIRE_LOCK(&sched_mutex);
495 fprintf(stderr, "schedule (task %ld): thread %d did a _ccall_gc\n",
496 pthread_self(), cap->rCurrentTSO->id));
499 fprintf(stderr, "schedule: thread %d did a _ccall_gc\n",
500 cap->rCurrentTSO->id));
503 threadPaused(cap->rCurrentTSO);
504 cap->rCurrentTSO->link = suspended_ccalling_threads;
505 suspended_ccalling_threads = cap->rCurrentTSO;
507 /* Use the thread ID as the token; it should be unique */
508 tok = cap->rCurrentTSO->id;
511 cap->link = free_capabilities;
512 free_capabilities = cap;
513 n_free_capabilities++;
516 RELEASE_LOCK(&sched_mutex);
521 resumeThread( StgInt tok )
526 ACQUIRE_LOCK(&sched_mutex);
528 prev = &suspended_ccalling_threads;
529 for (tso = suspended_ccalling_threads;
530 tso != END_TSO_QUEUE;
531 prev = &tso->link, tso = tso->link) {
532 if (tso->id == (StgThreadID)tok) {
537 if (tso == END_TSO_QUEUE) {
538 barf("resumeThread: thread not found");
542 while (free_capabilities == NULL) {
544 fprintf(stderr,"schedule (task %ld): waiting to resume\n",
546 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
547 IF_DEBUG(scheduler,fprintf(stderr,
548 "schedule (task %ld): resuming thread %d\n",
549 pthread_self(), tso->id));
551 cap = free_capabilities;
552 free_capabilities = cap->link;
553 n_free_capabilities--;
558 cap->rCurrentTSO = tso;
560 RELEASE_LOCK(&sched_mutex);
564 /* -----------------------------------------------------------------------------
566 * -------------------------------------------------------------------------- */
567 static void unblockThread(StgTSO *tso);
569 /* -----------------------------------------------------------------------------
570 * Comparing Thread ids.
572 * This is used from STG land in the implementation of the
573 * instances of Eq/Ord for ThreadIds.
574 * -------------------------------------------------------------------------- */
576 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
578 StgThreadID id1 = tso1->id;
579 StgThreadID id2 = tso2->id;
581 if (id1 < id2) return (-1);
582 if (id1 > id2) return 1;
586 /* -----------------------------------------------------------------------------
589 The new thread starts with the given stack size. Before the
590 scheduler can run, however, this thread needs to have a closure
591 (and possibly some arguments) pushed on its stack. See
592 pushClosure() in Schedule.h.
594 createGenThread() and createIOThread() (in SchedAPI.h) are
595 convenient packaged versions of this function.
596 -------------------------------------------------------------------------- */
599 createThread(nat stack_size)
603 /* catch ridiculously small stack sizes */
604 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
605 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
608 tso = (StgTSO *)allocate(stack_size);
609 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
611 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
616 initThread(StgTSO *tso, nat stack_size)
618 SET_INFO(tso,&TSO_info);
619 tso->whatNext = ThreadEnterGHC;
621 /* tso->id needs to be unique. For now we use a heavyweight mutex to
622 protect the increment operation on next_thread_id.
623 In future, we could use an atomic increment instead.
626 ACQUIRE_LOCK(&sched_mutex);
627 tso->id = next_thread_id++;
628 RELEASE_LOCK(&sched_mutex);
630 tso->why_blocked = NotBlocked;
632 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
633 tso->stack_size = stack_size;
634 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
636 tso->sp = (P_)&(tso->stack) + stack_size;
639 tso->prof.CCCS = CCS_MAIN;
642 /* put a stop frame on the stack */
643 tso->sp -= sizeofW(StgStopFrame);
644 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
645 tso->su = (StgUpdateFrame*)tso->sp;
647 IF_DEBUG(scheduler,belch("schedule: Initialised thread %ld, stack size = %lx words",
648 tso->id, tso->stack_size));
653 /* -----------------------------------------------------------------------------
656 * scheduleThread puts a thread on the head of the runnable queue.
657 * This will usually be done immediately after a thread is created.
658 * The caller of scheduleThread must create the thread using e.g.
659 * createThread and push an appropriate closure
660 * on this thread's stack before the scheduler is invoked.
661 * -------------------------------------------------------------------------- */
664 scheduleThread(StgTSO *tso)
666 ACQUIRE_LOCK(&sched_mutex);
668 /* Put the new thread on the head of the runnable queue. The caller
669 * better push an appropriate closure on this thread's stack
670 * beforehand. In the SMP case, the thread may start running as
671 * soon as we release the scheduler lock below.
673 PUSH_ON_RUN_QUEUE(tso);
676 IF_DEBUG(scheduler,printTSO(tso));
677 RELEASE_LOCK(&sched_mutex);
681 /* -----------------------------------------------------------------------------
684 * Start up Posix threads to run each of the scheduler tasks.
685 * I believe the task ids are not needed in the system as defined.
687 * -------------------------------------------------------------------------- */
691 taskStart( void *arg STG_UNUSED )
698 /* -----------------------------------------------------------------------------
701 * Initialise the scheduler. This resets all the queues - if the
702 * queues contained any threads, they'll be garbage collected at the
705 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
706 * -------------------------------------------------------------------------- */
710 term_handler(int sig STG_UNUSED)
713 pthread_t me = pthread_self();
715 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
716 if (task_ids[i].id == me) {
717 task_ids[i].mut_time = usertime() - task_ids[i].gc_time;
718 if (task_ids[i].mut_time < 0.0) {
719 task_ids[i].mut_time = 0.0;
723 ACQUIRE_LOCK(&term_mutex);
725 RELEASE_LOCK(&term_mutex);
730 void initScheduler(void)
732 run_queue_hd = END_TSO_QUEUE;
733 run_queue_tl = END_TSO_QUEUE;
734 blocked_queue_hd = END_TSO_QUEUE;
735 blocked_queue_tl = END_TSO_QUEUE;
737 suspended_ccalling_threads = END_TSO_QUEUE;
744 enteredCAFs = END_CAF_LIST;
746 /* Install the SIGHUP handler */
749 struct sigaction action,oact;
751 action.sa_handler = term_handler;
752 sigemptyset(&action.sa_mask);
754 if (sigaction(SIGTERM, &action, &oact) != 0) {
755 barf("can't install TERM handler");
761 /* Allocate N Capabilities */
764 Capability *cap, *prev;
767 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
768 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
772 free_capabilities = cap;
773 n_free_capabilities = RtsFlags.ConcFlags.nNodes;
775 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
776 n_free_capabilities););
788 /* make some space for saving all the thread ids */
789 task_ids = stgMallocBytes(RtsFlags.ConcFlags.nNodes * sizeof(task_info),
790 "initScheduler:task_ids");
792 /* and create all the threads */
793 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
794 r = pthread_create(&tid,NULL,taskStart,NULL);
796 barf("startTasks: Can't create new Posix thread");
798 task_ids[i].id = tid;
799 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
805 exitScheduler( void )
810 /* Don't want to use pthread_cancel, since we'd have to install
811 * these silly exception handlers (pthread_cleanup_{push,pop}) around
815 /* Cancel all our tasks */
816 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
817 pthread_cancel(task_ids[i].id);
820 /* Wait for all the tasks to terminate */
821 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
822 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
824 pthread_join(task_ids[i].id, NULL);
828 /* Send 'em all a SIGHUP. That should shut 'em up.
830 await_death = RtsFlags.ConcFlags.nNodes;
831 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
832 pthread_kill(task_ids[i].id,SIGTERM);
834 while (await_death > 0) {
840 /* -----------------------------------------------------------------------------
841 Managing the per-task allocation areas.
843 Each capability comes with an allocation area. These are
844 fixed-length block lists into which allocation can be done.
846 ToDo: no support for two-space collection at the moment???
847 -------------------------------------------------------------------------- */
849 /* -----------------------------------------------------------------------------
850 * waitThread is the external interface for running a new computataion
851 * and waiting for the result.
853 * In the non-SMP case, we create a new main thread, push it on the
854 * main-thread stack, and invoke the scheduler to run it. The
855 * scheduler will return when the top main thread on the stack has
856 * completed or died, and fill in the necessary fields of the
857 * main_thread structure.
859 * In the SMP case, we create a main thread as before, but we then
860 * create a new condition variable and sleep on it. When our new
861 * main thread has completed, we'll be woken up and the status/result
862 * will be in the main_thread struct.
863 * -------------------------------------------------------------------------- */
866 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
869 SchedulerStatus stat;
871 ACQUIRE_LOCK(&sched_mutex);
873 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
879 pthread_cond_init(&m->wakeup, NULL);
882 m->link = main_threads;
886 pthread_cond_wait(&m->wakeup, &sched_mutex);
892 ASSERT(stat != NoStatus);
895 pthread_cond_destroy(&m->wakeup);
899 RELEASE_LOCK(&sched_mutex);
905 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
908 StgThreadReturnCode ret;
912 /* Return value is NULL by default, it is only filled in if the
913 * main thread completes successfully.
915 if (ret_val) { *ret_val = NULL; }
917 /* Save away a pointer to the main thread so that we can keep track
918 * of it should a garbage collection happen. We keep a stack of
919 * main threads in order to support scheduler re-entry. We can't
920 * use the normal TSO linkage for this stack, because the main TSO
921 * may need to be linked onto other queues.
923 main_threads[next_main_thread] = main;
924 MainTSO = &main_threads[next_main_thread];
927 fprintf(stderr, "Scheduler entered: nesting = %d\n",
930 /* Are we being re-entered?
932 if (CurrentTSO != NULL) {
933 /* This happens when a _ccall_gc from Haskell ends up re-entering
936 * Block the current thread (put it on the ccalling_queue) and
937 * continue executing. The calling thread better have stashed
938 * away its state properly and left its stack with a proper stack
941 threadPaused(CurrentTSO);
942 CurrentTSO->link = ccalling_threads;
943 ccalling_threads = CurrentTSO;
944 in_ccall_gc = rtsTrue;
946 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
949 in_ccall_gc = rtsFalse;
952 /* Take a thread from the run queue.
956 while (t != END_TSO_QUEUE) {
959 /* If we have more threads on the run queue, set up a context
960 * switch at some point in the future.
962 if (run_queue_hd != END_TSO_QUEUE || blocked_queue_hd != END_TSO_QUEUE) {
967 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
969 /* Be friendly to the storage manager: we're about to *run* this
970 * thread, so we better make sure the TSO is mutable.
972 if (t->mut_link == NULL) {
973 recordMutable((StgMutClosure *)t);
976 /* Run the current thread */
977 switch (t->whatNext) {
980 /* thread already killed. Drop it and carry on. */
983 ret = StgRun((StgFunPtr) stg_enterStackTop);
986 ret = StgRun((StgFunPtr) stg_returnToStackTop);
988 case ThreadEnterHugs:
991 IF_DEBUG(scheduler,belch("entering Hugs"));
993 /* CHECK_SENSIBLE_REGS(); */
995 StgClosure* c = (StgClosure *)Sp[0];
1003 barf("Panic: entered a BCO but no bytecode interpreter in this build");
1006 barf("schedule: invalid whatNext field");
1009 /* We may have garbage collected while running the thread
1010 * (eg. something nefarious like _ccall_GC_ performGC), and hence
1011 * CurrentTSO may have moved. Update t to reflect this.
1016 /* Costs for the scheduler are assigned to CCS_SYSTEM */
1024 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
1026 PUSH_ON_RUN_QUEUE(t);
1027 GarbageCollect(GetRoots);
1031 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
1034 /* enlarge the stack */
1035 StgTSO *new_t = threadStackOverflow(t);
1037 /* This TSO has moved, so update any pointers to it from the
1038 * main thread stack. It better not be on any other queues...
1041 for (i = 0; i < next_main_thread; i++) {
1042 if (main_threads[i] == t) {
1043 main_threads[i] = new_t;
1048 PUSH_ON_RUN_QUEUE(t);
1051 case ThreadYielding:
1053 if (t->whatNext == ThreadEnterHugs) {
1054 /* ToDo: or maybe a timer expired when we were in Hugs?
1055 * or maybe someone hit ctrl-C
1057 belch("Thread %ld stopped to switch to Hugs\n", t->id);
1059 belch("Thread %ld stopped, timer expired\n", t->id);
1064 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
1066 while (run_queue_hd != END_TSO_QUEUE) {
1067 run_queue_hd = t->link;
1070 run_queue_tl = END_TSO_QUEUE;
1071 /* ToDo: should I do the same with blocked queues? */
1075 /* Put the thread back on the run queue, at the end.
1076 * t->link is already set to END_TSO_QUEUE.
1078 APPEND_TO_RUN_QUEUE(t);
1083 fprintf(stderr, "Thread %d stopped, ", t->id);
1084 printThreadBlockage(t);
1085 fprintf(stderr, "\n"));
1087 /* assume the thread has put itself on some blocked queue
1092 case ThreadFinished:
1093 IF_DEBUG(scheduler,fprintf(stderr,"thread %ld finished\n", t->id));
1094 t->whatNext = ThreadComplete;
1098 barf("schedule: invalid thread return code");
1101 /* check for signals each time around the scheduler */
1103 if (signals_pending()) {
1104 start_signal_handlers();
1107 /* If our main thread has finished or been killed, return.
1108 * If we were re-entered as a result of a _ccall_gc, then
1109 * pop the blocked thread off the ccalling_threads stack back
1112 if ((*MainTSO)->whatNext == ThreadComplete
1113 || (*MainTSO)->whatNext == ThreadKilled) {
1116 CurrentTSO = ccalling_threads;
1117 ccalling_threads = ccalling_threads->link;
1118 /* remember to stub the link field of CurrentTSO */
1119 CurrentTSO->link = END_TSO_QUEUE;
1121 if ((*MainTSO)->whatNext == ThreadComplete) {
1122 /* we finished successfully, fill in the return value */
1123 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
1131 /* Checked whether any waiting threads need to be woken up.
1132 * If the run queue is empty, we can wait indefinitely for
1133 * something to happen.
1135 if (blocked_queue_hd != END_TSO_QUEUE) {
1136 awaitEvent(run_queue_hd == END_TSO_QUEUE);
1139 t = POP_RUN_QUEUE();
1142 /* If we got to here, then we ran out of threads to run, but the
1143 * main thread hasn't finished yet. It must be blocked on an MVar
1144 * or a black hole somewhere, so we return deadlock.
1150 /* -----------------------------------------------------------------------------
1151 Debugging: why is a thread blocked
1152 -------------------------------------------------------------------------- */
1155 void printThreadBlockage(StgTSO *tso)
1157 switch (tso->why_blocked) {
1159 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
1161 case BlockedOnWrite:
1162 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
1164 case BlockedOnDelay:
1165 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
1168 fprintf(stderr,"blocked on an MVar");
1170 case BlockedOnBlackHole:
1171 fprintf(stderr,"blocked on a black hole");
1174 fprintf(stderr,"not blocked");
1180 /* -----------------------------------------------------------------------------
1181 Where are the roots that we know about?
1183 - all the threads on the runnable queue
1184 - all the threads on the blocked queue
1185 - all the thread currently executing a _ccall_GC
1186 - all the "main threads"
1188 -------------------------------------------------------------------------- */
1190 /* This has to be protected either by the scheduler monitor, or by the
1191 garbage collection monitor (probably the latter).
1195 static void GetRoots(void)
1199 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1200 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1202 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1203 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1205 for (m = main_threads; m != NULL; m = m->link) {
1206 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1208 suspended_ccalling_threads =
1209 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1212 /* -----------------------------------------------------------------------------
1215 This is the interface to the garbage collector from Haskell land.
1216 We provide this so that external C code can allocate and garbage
1217 collect when called from Haskell via _ccall_GC.
1219 It might be useful to provide an interface whereby the programmer
1220 can specify more roots (ToDo).
1222 This needs to be protected by the GC condition variable above. KH.
1223 -------------------------------------------------------------------------- */
1225 void (*extra_roots)(void);
1230 GarbageCollect(GetRoots);
1236 GetRoots(); /* the scheduler's roots */
1237 extra_roots(); /* the user's roots */
1241 performGCWithRoots(void (*get_roots)(void))
1243 extra_roots = get_roots;
1245 GarbageCollect(AllRoots);
1248 /* -----------------------------------------------------------------------------
1251 If the thread has reached its maximum stack size,
1252 then bomb out. Otherwise relocate the TSO into a larger chunk of
1253 memory and adjust its stack size appropriately.
1254 -------------------------------------------------------------------------- */
1257 threadStackOverflow(StgTSO *tso)
1259 nat new_stack_size, new_tso_size, diff, stack_words;
1263 if (tso->stack_size >= tso->max_stack_size) {
1265 /* If we're debugging, just print out the top of the stack */
1266 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1270 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1273 /* Send this thread the StackOverflow exception */
1274 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1279 /* Try to double the current stack size. If that takes us over the
1280 * maximum stack size for this thread, then use the maximum instead.
1281 * Finally round up so the TSO ends up as a whole number of blocks.
1283 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1284 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1285 TSO_STRUCT_SIZE)/sizeof(W_);
1286 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1287 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1289 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1291 dest = (StgTSO *)allocate(new_tso_size);
1292 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1294 /* copy the TSO block and the old stack into the new area */
1295 memcpy(dest,tso,TSO_STRUCT_SIZE);
1296 stack_words = tso->stack + tso->stack_size - tso->sp;
1297 new_sp = (P_)dest + new_tso_size - stack_words;
1298 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1300 /* relocate the stack pointers... */
1301 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1302 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1304 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1305 dest->stack_size = new_stack_size;
1307 /* and relocate the update frame list */
1308 relocate_TSO(tso, dest);
1310 /* Mark the old one as dead so we don't try to scavenge it during
1311 * garbage collection (the TSO will likely be on a mutables list in
1312 * some generation, but it'll get collected soon enough). It's
1313 * important to set the sp and su values to just beyond the end of
1314 * the stack, so we don't attempt to scavenge any part of the dead
1317 tso->whatNext = ThreadKilled;
1318 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1319 tso->su = (StgUpdateFrame *)tso->sp;
1320 tso->why_blocked = NotBlocked;
1321 dest->mut_link = NULL;
1323 IF_DEBUG(sanity,checkTSO(tso));
1325 IF_DEBUG(scheduler,printTSO(dest));
1329 /* This will no longer work: KH */
1330 if (tso == MainTSO) { /* hack */
1337 /* -----------------------------------------------------------------------------
1338 Wake up a queue that was blocked on some resource.
1339 -------------------------------------------------------------------------- */
1342 unblockOneLocked(StgTSO *tso)
1346 ASSERT(get_itbl(tso)->type == TSO);
1347 ASSERT(tso->why_blocked != NotBlocked);
1348 tso->why_blocked = NotBlocked;
1350 PUSH_ON_RUN_QUEUE(tso);
1353 IF_DEBUG(scheduler,belch("schedule (task %ld): waking up thread %ld",
1354 pthread_self(), tso->id));
1356 IF_DEBUG(scheduler,belch("schedule: waking up thread %ld", tso->id));
1362 unblockOne(StgTSO *tso)
1364 ACQUIRE_LOCK(&sched_mutex);
1365 tso = unblockOneLocked(tso);
1366 RELEASE_LOCK(&sched_mutex);
1371 awakenBlockedQueue(StgTSO *tso)
1373 ACQUIRE_LOCK(&sched_mutex);
1374 while (tso != END_TSO_QUEUE) {
1375 tso = unblockOneLocked(tso);
1377 RELEASE_LOCK(&sched_mutex);
1380 /* -----------------------------------------------------------------------------
1382 - usually called inside a signal handler so it mustn't do anything fancy.
1383 -------------------------------------------------------------------------- */
1386 interruptStgRts(void)
1392 /* -----------------------------------------------------------------------------
1395 This is for use when we raise an exception in another thread, which
1397 -------------------------------------------------------------------------- */
1400 unblockThread(StgTSO *tso)
1404 ACQUIRE_LOCK(&sched_mutex);
1405 switch (tso->why_blocked) {
1408 return; /* not blocked */
1411 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1413 StgTSO *last_tso = END_TSO_QUEUE;
1414 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1417 for (t = mvar->head; t != END_TSO_QUEUE;
1418 last = &t->link, last_tso = t, t = t->link) {
1421 if (mvar->tail == tso) {
1422 mvar->tail = last_tso;
1427 barf("unblockThread (MVAR): TSO not found");
1430 case BlockedOnBlackHole:
1431 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1433 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1435 last = &bq->blocking_queue;
1436 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1437 last = &t->link, t = t->link) {
1443 barf("unblockThread (BLACKHOLE): TSO not found");
1446 case BlockedOnDelay:
1448 case BlockedOnWrite:
1450 last = &blocked_queue_hd;
1451 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1452 last = &t->link, t = t->link) {
1455 if (blocked_queue_tl == t) {
1456 blocked_queue_tl = tso->link;
1461 barf("unblockThread (I/O): TSO not found");
1465 barf("unblockThread");
1469 tso->link = END_TSO_QUEUE;
1470 tso->why_blocked = NotBlocked;
1471 tso->block_info.closure = NULL;
1472 PUSH_ON_RUN_QUEUE(tso);
1473 RELEASE_LOCK(&sched_mutex);
1476 /* -----------------------------------------------------------------------------
1479 * The following function implements the magic for raising an
1480 * asynchronous exception in an existing thread.
1482 * We first remove the thread from any queue on which it might be
1483 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1485 * We strip the stack down to the innermost CATCH_FRAME, building
1486 * thunks in the heap for all the active computations, so they can
1487 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1488 * an application of the handler to the exception, and push it on
1489 * the top of the stack.
1491 * How exactly do we save all the active computations? We create an
1492 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1493 * AP_UPDs pushes everything from the corresponding update frame
1494 * upwards onto the stack. (Actually, it pushes everything up to the
1495 * next update frame plus a pointer to the next AP_UPD object.
1496 * Entering the next AP_UPD object pushes more onto the stack until we
1497 * reach the last AP_UPD object - at which point the stack should look
1498 * exactly as it did when we killed the TSO and we can continue
1499 * execution by entering the closure on top of the stack.
1501 * We can also kill a thread entirely - this happens if either (a) the
1502 * exception passed to raiseAsync is NULL, or (b) there's no
1503 * CATCH_FRAME on the stack. In either case, we strip the entire
1504 * stack and replace the thread with a zombie.
1506 * -------------------------------------------------------------------------- */
1509 deleteThread(StgTSO *tso)
1511 raiseAsync(tso,NULL);
1515 raiseAsync(StgTSO *tso, StgClosure *exception)
1517 StgUpdateFrame* su = tso->su;
1518 StgPtr sp = tso->sp;
1520 /* Thread already dead? */
1521 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1525 IF_DEBUG(scheduler, belch("schedule: Raising exception in thread %ld.", tso->id));
1527 /* Remove it from any blocking queues */
1530 /* The stack freezing code assumes there's a closure pointer on
1531 * the top of the stack. This isn't always the case with compiled
1532 * code, so we have to push a dummy closure on the top which just
1533 * returns to the next return address on the stack.
1535 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1536 *(--sp) = (W_)&dummy_ret_closure;
1540 int words = ((P_)su - (P_)sp) - 1;
1544 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1545 * then build PAP(handler,exception), and leave it on top of
1546 * the stack ready to enter.
1548 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1549 StgCatchFrame *cf = (StgCatchFrame *)su;
1550 /* we've got an exception to raise, so let's pass it to the
1551 * handler in this frame.
1553 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1554 TICK_ALLOC_UPD_PAP(2,0);
1555 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1558 ap->fun = cf->handler;
1559 ap->payload[0] = (P_)exception;
1561 /* sp currently points to the word above the CATCH_FRAME on the
1562 * stack. Replace the CATCH_FRAME with a pointer to the new handler
1565 sp += sizeofW(StgCatchFrame);
1569 tso->whatNext = ThreadEnterGHC;
1573 /* First build an AP_UPD consisting of the stack chunk above the
1574 * current update frame, with the top word on the stack as the
1577 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1582 ap->fun = (StgClosure *)sp[0];
1584 for(i=0; i < (nat)words; ++i) {
1585 ap->payload[i] = (P_)*sp++;
1588 switch (get_itbl(su)->type) {
1592 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1593 TICK_ALLOC_UP_THK(words+1,0);
1596 fprintf(stderr, "schedule: Updating ");
1597 printPtr((P_)su->updatee);
1598 fprintf(stderr, " with ");
1599 printObj((StgClosure *)ap);
1602 /* Replace the updatee with an indirection - happily
1603 * this will also wake up any threads currently
1604 * waiting on the result.
1606 UPD_IND(su->updatee,ap); /* revert the black hole */
1608 sp += sizeofW(StgUpdateFrame) -1;
1609 sp[0] = (W_)ap; /* push onto stack */
1615 StgCatchFrame *cf = (StgCatchFrame *)su;
1618 /* We want a PAP, not an AP_UPD. Fortunately, the
1619 * layout's the same.
1621 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1622 TICK_ALLOC_UPD_PAP(words+1,0);
1624 /* now build o = FUN(catch,ap,handler) */
1625 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1626 TICK_ALLOC_FUN(2,0);
1627 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1628 o->payload[0] = (StgClosure *)ap;
1629 o->payload[1] = cf->handler;
1632 fprintf(stderr, "schedule: Built ");
1633 printObj((StgClosure *)o);
1636 /* pop the old handler and put o on the stack */
1638 sp += sizeofW(StgCatchFrame) - 1;
1645 StgSeqFrame *sf = (StgSeqFrame *)su;
1648 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1649 TICK_ALLOC_UPD_PAP(words+1,0);
1651 /* now build o = FUN(seq,ap) */
1652 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1653 TICK_ALLOC_SE_THK(1,0);
1654 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1655 payloadCPtr(o,0) = (StgClosure *)ap;
1658 fprintf(stderr, "schedule: Built ");
1659 printObj((StgClosure *)o);
1662 /* pop the old handler and put o on the stack */
1664 sp += sizeofW(StgSeqFrame) - 1;
1670 /* We've stripped the entire stack, the thread is now dead. */
1671 sp += sizeofW(StgStopFrame) - 1;
1672 sp[0] = (W_)exception; /* save the exception */
1673 tso->whatNext = ThreadKilled;
1674 tso->su = (StgUpdateFrame *)(sp+1);