1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.37 1999/12/01 14:58:09 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
43 #include "Exception.h"
47 #include "Profiling.h"
53 * These are the threads which clients have requested that we run.
55 * In an SMP build, we might have several concurrent clients all
56 * waiting for results, and each one will wait on a condition variable
57 * until the result is available.
59 * In non-SMP, clients are strictly nested: the first client calls
60 * into the RTS, which might call out again to C with a _ccall_GC, and
61 * eventually re-enter the RTS.
63 * Main threads information is kept in a linked list:
65 typedef struct StgMainThread_ {
70 pthread_cond_t wakeup;
72 struct StgMainThread_ *link;
76 * Locks required: sched_mutex.
78 static StgMainThread *main_threads;
81 * Locks required: sched_mutex.
83 StgTSO *run_queue_hd, *run_queue_tl;
84 StgTSO *blocked_queue_hd, *blocked_queue_tl;
86 /* Threads suspended in _ccall_GC.
87 * Locks required: sched_mutex.
89 static StgTSO *suspended_ccalling_threads;
91 static void GetRoots(void);
92 static StgTSO *threadStackOverflow(StgTSO *tso);
94 /* KH: The following two flags are shared memory locations. There is no need
95 to lock them, since they are only unset at the end of a scheduler
99 /* flag set by signal handler to precipitate a context switch */
101 /* if this flag is set as well, give up execution */
102 static nat interrupted;
104 /* Next thread ID to allocate.
105 * Locks required: sched_mutex
107 StgThreadID next_thread_id = 1;
110 * Pointers to the state of the current thread.
111 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
112 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
115 /* The smallest stack size that makes any sense is:
116 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
117 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
118 * + 1 (the realworld token for an IO thread)
119 * + 1 (the closure to enter)
121 * A thread with this stack will bomb immediately with a stack
122 * overflow, which will increase its stack size.
125 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
127 /* Free capability list.
128 * Locks required: sched_mutex.
131 Capability *free_capabilities; /* Available capabilities for running threads */
132 nat n_free_capabilities; /* total number of available capabilities */
134 Capability MainRegTable; /* for non-SMP, we have one global capability */
139 /* All our current task ids, saved in case we need to kill them later.
145 void addToBlockedQueue ( StgTSO *tso );
147 static void schedule ( void );
148 static void initThread ( StgTSO *tso, nat stack_size );
149 void interruptStgRts ( void );
152 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
153 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
154 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
155 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
160 /* -----------------------------------------------------------------------------
161 Main scheduling loop.
163 We use round-robin scheduling, each thread returning to the
164 scheduler loop when one of these conditions is detected:
167 * timer expires (thread yields)
172 Locking notes: we acquire the scheduler lock once at the beginning
173 of the scheduler loop, and release it when
175 * running a thread, or
176 * waiting for work, or
177 * waiting for a GC to complete.
179 -------------------------------------------------------------------------- */
186 StgThreadReturnCode ret;
188 ACQUIRE_LOCK(&sched_mutex);
192 /* If we're interrupted (the user pressed ^C, or some other
193 * termination condition occurred), kill all the currently running
197 IF_DEBUG(scheduler,belch("schedule: interrupted"));
198 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
201 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
204 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
205 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
208 /* Go through the list of main threads and wake up any
209 * clients whose computations have finished. ToDo: this
210 * should be done more efficiently without a linear scan
211 * of the main threads list, somehow...
215 StgMainThread *m, **prev;
216 prev = &main_threads;
217 for (m = main_threads; m != NULL; m = m->link) {
218 if (m->tso->whatNext == ThreadComplete) {
220 *(m->ret) = (StgClosure *)m->tso->sp[0];
224 pthread_cond_broadcast(&m->wakeup);
226 if (m->tso->whatNext == ThreadKilled) {
229 pthread_cond_broadcast(&m->wakeup);
234 /* If our main thread has finished or been killed, return.
237 StgMainThread *m = main_threads;
238 if (m->tso->whatNext == ThreadComplete
239 || m->tso->whatNext == ThreadKilled) {
240 main_threads = main_threads->link;
241 if (m->tso->whatNext == ThreadComplete) {
242 /* we finished successfully, fill in the return value */
243 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
254 /* Check whether any waiting threads need to be woken up. If the
255 * run queue is empty, and there are no other tasks running, we
256 * can wait indefinitely for something to happen.
257 * ToDo: what if another client comes along & requests another
260 if (blocked_queue_hd != END_TSO_QUEUE) {
262 (run_queue_hd == END_TSO_QUEUE)
264 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
269 /* check for signals each time around the scheduler */
271 if (signals_pending()) {
272 start_signal_handlers();
276 /* Detect deadlock: when we have no threads to run, there are
277 * no threads waiting on I/O or sleeping, and all the other
278 * tasks are waiting for work, we must have a deadlock. Inform
279 * all the main threads.
282 if (blocked_queue_hd == END_TSO_QUEUE
283 && run_queue_hd == END_TSO_QUEUE
284 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
287 for (m = main_threads; m != NULL; m = m->link) {
290 pthread_cond_broadcast(&m->wakeup);
295 if (blocked_queue_hd == END_TSO_QUEUE
296 && run_queue_hd == END_TSO_QUEUE) {
297 StgMainThread *m = main_threads;
300 main_threads = m->link;
306 /* If there's a GC pending, don't do anything until it has
310 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): waiting for GC\n",
312 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
315 /* block until we've got a thread on the run queue and a free
318 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
320 fprintf(stderr, "schedule (task %ld): waiting for work\n",
322 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
324 fprintf(stderr, "schedule (task %ld): work now available\n",
329 /* grab a thread from the run queue
336 cap = free_capabilities;
337 free_capabilities = cap->link;
338 n_free_capabilities--;
343 cap->rCurrentTSO = t;
345 /* set the context_switch flag
347 if (run_queue_hd == END_TSO_QUEUE)
352 RELEASE_LOCK(&sched_mutex);
355 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): running thread %d\n", pthread_self(),t->id));
357 IF_DEBUG(scheduler,fprintf(stderr,"schedule: running thread %d\n",t->id));
360 /* Run the current thread
362 switch (cap->rCurrentTSO->whatNext) {
365 /* Thread already finished, return to scheduler. */
366 ret = ThreadFinished;
369 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
372 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
374 case ThreadEnterHugs:
378 IF_DEBUG(scheduler,belch("schedule: entering Hugs"));
379 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
380 cap->rCurrentTSO->sp += 1;
385 barf("Panic: entered a BCO but no bytecode interpreter in this build");
388 barf("schedule: invalid whatNext field");
391 /* Costs for the scheduler are assigned to CCS_SYSTEM */
396 ACQUIRE_LOCK(&sched_mutex);
399 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
401 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
403 t = cap->rCurrentTSO;
407 /* make all the running tasks block on a condition variable,
408 * maybe set context_switch and wait till they all pile in,
409 * then have them wait on a GC condition variable.
411 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
414 ready_to_gc = rtsTrue;
415 context_switch = 1; /* stop other threads ASAP */
416 PUSH_ON_RUN_QUEUE(t);
420 /* just adjust the stack for this thread, then pop it back
423 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
427 /* enlarge the stack */
428 StgTSO *new_t = threadStackOverflow(t);
430 /* This TSO has moved, so update any pointers to it from the
431 * main thread stack. It better not be on any other queues...
434 for (m = main_threads; m != NULL; m = m->link) {
439 PUSH_ON_RUN_QUEUE(new_t);
444 /* put the thread back on the run queue. Then, if we're ready to
445 * GC, check whether this is the last task to stop. If so, wake
446 * up the GC thread. getThread will block during a GC until the
450 if (t->whatNext == ThreadEnterHugs) {
451 /* ToDo: or maybe a timer expired when we were in Hugs?
452 * or maybe someone hit ctrl-C
454 belch("thread %ld stopped to switch to Hugs", t->id);
456 belch("thread %ld stopped, yielding", t->id);
460 APPEND_TO_RUN_QUEUE(t);
464 /* don't need to do anything. Either the thread is blocked on
465 * I/O, in which case we'll have called addToBlockedQueue
466 * previously, or it's blocked on an MVar or Blackhole, in which
467 * case it'll be on the relevant queue already.
470 fprintf(stderr, "thread %d stopped, ", t->id);
471 printThreadBlockage(t);
472 fprintf(stderr, "\n"));
477 /* Need to check whether this was a main thread, and if so, signal
478 * the task that started it with the return value. If we have no
479 * more main threads, we probably need to stop all the tasks until
482 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
483 t->whatNext = ThreadComplete;
487 barf("doneThread: invalid thread return code");
491 cap->link = free_capabilities;
492 free_capabilities = cap;
493 n_free_capabilities++;
497 if (ready_to_gc && n_free_capabilities == RtsFlags.ConcFlags.nNodes) {
501 /* everybody back, start the GC.
502 * Could do it in this thread, or signal a condition var
503 * to do it in another thread. Either way, we need to
504 * broadcast on gc_pending_cond afterward.
507 IF_DEBUG(scheduler,belch("schedule (task %ld): doing GC", pthread_self()));
509 GarbageCollect(GetRoots);
510 ready_to_gc = rtsFalse;
512 pthread_cond_broadcast(&gc_pending_cond);
515 } /* end of while(1) */
519 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
520 void deleteAllThreads ( void )
523 IF_DEBUG(scheduler,belch("deleteAllThreads()"));
524 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
527 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
530 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
531 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
535 /* -----------------------------------------------------------------------------
536 * Suspending & resuming Haskell threads.
538 * When making a "safe" call to C (aka _ccall_GC), the task gives back
539 * its capability before calling the C function. This allows another
540 * task to pick up the capability and carry on running Haskell
541 * threads. It also means that if the C call blocks, it won't lock
544 * The Haskell thread making the C call is put to sleep for the
545 * duration of the call, on the susepended_ccalling_threads queue. We
546 * give out a token to the task, which it can use to resume the thread
547 * on return from the C function.
548 * -------------------------------------------------------------------------- */
551 suspendThread( Capability *cap )
555 ACQUIRE_LOCK(&sched_mutex);
559 fprintf(stderr, "schedule (task %ld): thread %d did a _ccall_gc\n",
560 pthread_self(), cap->rCurrentTSO->id));
563 fprintf(stderr, "schedule: thread %d did a _ccall_gc\n",
564 cap->rCurrentTSO->id));
567 threadPaused(cap->rCurrentTSO);
568 cap->rCurrentTSO->link = suspended_ccalling_threads;
569 suspended_ccalling_threads = cap->rCurrentTSO;
571 /* Use the thread ID as the token; it should be unique */
572 tok = cap->rCurrentTSO->id;
575 cap->link = free_capabilities;
576 free_capabilities = cap;
577 n_free_capabilities++;
580 RELEASE_LOCK(&sched_mutex);
585 resumeThread( StgInt tok )
590 ACQUIRE_LOCK(&sched_mutex);
592 prev = &suspended_ccalling_threads;
593 for (tso = suspended_ccalling_threads;
594 tso != END_TSO_QUEUE;
595 prev = &tso->link, tso = tso->link) {
596 if (tso->id == (StgThreadID)tok) {
601 if (tso == END_TSO_QUEUE) {
602 barf("resumeThread: thread not found");
606 while (free_capabilities == NULL) {
608 fprintf(stderr,"schedule (task %ld): waiting to resume\n",
610 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
611 IF_DEBUG(scheduler,fprintf(stderr,
612 "schedule (task %ld): resuming thread %d\n",
613 pthread_self(), tso->id));
615 cap = free_capabilities;
616 free_capabilities = cap->link;
617 n_free_capabilities--;
622 cap->rCurrentTSO = tso;
624 RELEASE_LOCK(&sched_mutex);
628 /* -----------------------------------------------------------------------------
630 * -------------------------------------------------------------------------- */
631 static void unblockThread(StgTSO *tso);
633 /* -----------------------------------------------------------------------------
634 * Comparing Thread ids.
636 * This is used from STG land in the implementation of the
637 * instances of Eq/Ord for ThreadIds.
638 * -------------------------------------------------------------------------- */
640 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
642 StgThreadID id1 = tso1->id;
643 StgThreadID id2 = tso2->id;
645 if (id1 < id2) return (-1);
646 if (id1 > id2) return 1;
650 /* -----------------------------------------------------------------------------
653 The new thread starts with the given stack size. Before the
654 scheduler can run, however, this thread needs to have a closure
655 (and possibly some arguments) pushed on its stack. See
656 pushClosure() in Schedule.h.
658 createGenThread() and createIOThread() (in SchedAPI.h) are
659 convenient packaged versions of this function.
660 -------------------------------------------------------------------------- */
663 createThread(nat stack_size)
667 /* catch ridiculously small stack sizes */
668 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
669 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
672 tso = (StgTSO *)allocate(stack_size);
673 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
675 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
680 initThread(StgTSO *tso, nat stack_size)
682 SET_HDR(tso, &TSO_info, CCS_MAIN);
683 tso->whatNext = ThreadEnterGHC;
685 /* tso->id needs to be unique. For now we use a heavyweight mutex to
686 protect the increment operation on next_thread_id.
687 In future, we could use an atomic increment instead.
690 ACQUIRE_LOCK(&sched_mutex);
691 tso->id = next_thread_id++;
692 RELEASE_LOCK(&sched_mutex);
694 tso->why_blocked = NotBlocked;
696 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
697 tso->stack_size = stack_size;
698 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
700 tso->sp = (P_)&(tso->stack) + stack_size;
703 tso->prof.CCCS = CCS_MAIN;
706 /* put a stop frame on the stack */
707 tso->sp -= sizeofW(StgStopFrame);
708 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
709 tso->su = (StgUpdateFrame*)tso->sp;
711 IF_DEBUG(scheduler,belch("schedule: Initialised thread %ld, stack size = %lx words",
712 tso->id, tso->stack_size));
717 /* -----------------------------------------------------------------------------
720 * scheduleThread puts a thread on the head of the runnable queue.
721 * This will usually be done immediately after a thread is created.
722 * The caller of scheduleThread must create the thread using e.g.
723 * createThread and push an appropriate closure
724 * on this thread's stack before the scheduler is invoked.
725 * -------------------------------------------------------------------------- */
728 scheduleThread(StgTSO *tso)
730 ACQUIRE_LOCK(&sched_mutex);
732 /* Put the new thread on the head of the runnable queue. The caller
733 * better push an appropriate closure on this thread's stack
734 * beforehand. In the SMP case, the thread may start running as
735 * soon as we release the scheduler lock below.
737 PUSH_ON_RUN_QUEUE(tso);
740 IF_DEBUG(scheduler,printTSO(tso));
741 RELEASE_LOCK(&sched_mutex);
745 /* -----------------------------------------------------------------------------
748 * Start up Posix threads to run each of the scheduler tasks.
749 * I believe the task ids are not needed in the system as defined.
751 * -------------------------------------------------------------------------- */
755 taskStart( void *arg STG_UNUSED )
762 /* -----------------------------------------------------------------------------
765 * Initialise the scheduler. This resets all the queues - if the
766 * queues contained any threads, they'll be garbage collected at the
769 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
770 * -------------------------------------------------------------------------- */
774 term_handler(int sig STG_UNUSED)
777 ACQUIRE_LOCK(&term_mutex);
779 RELEASE_LOCK(&term_mutex);
784 void initScheduler(void)
786 run_queue_hd = END_TSO_QUEUE;
787 run_queue_tl = END_TSO_QUEUE;
788 blocked_queue_hd = END_TSO_QUEUE;
789 blocked_queue_tl = END_TSO_QUEUE;
791 suspended_ccalling_threads = END_TSO_QUEUE;
798 enteredCAFs = END_CAF_LIST;
800 /* Install the SIGHUP handler */
803 struct sigaction action,oact;
805 action.sa_handler = term_handler;
806 sigemptyset(&action.sa_mask);
808 if (sigaction(SIGTERM, &action, &oact) != 0) {
809 barf("can't install TERM handler");
815 /* Allocate N Capabilities */
818 Capability *cap, *prev;
821 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
822 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
826 free_capabilities = cap;
827 n_free_capabilities = RtsFlags.ConcFlags.nNodes;
829 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
830 n_free_capabilities););
842 /* make some space for saving all the thread ids */
843 task_ids = stgMallocBytes(RtsFlags.ConcFlags.nNodes * sizeof(task_info),
844 "initScheduler:task_ids");
846 /* and create all the threads */
847 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
848 r = pthread_create(&tid,NULL,taskStart,NULL);
850 barf("startTasks: Can't create new Posix thread");
852 task_ids[i].id = tid;
853 task_ids[i].mut_time = 0.0;
854 task_ids[i].mut_etime = 0.0;
855 task_ids[i].gc_time = 0.0;
856 task_ids[i].gc_etime = 0.0;
857 task_ids[i].elapsedtimestart = elapsedtime();
858 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
864 exitScheduler( void )
869 /* Don't want to use pthread_cancel, since we'd have to install
870 * these silly exception handlers (pthread_cleanup_{push,pop}) around
874 /* Cancel all our tasks */
875 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
876 pthread_cancel(task_ids[i].id);
879 /* Wait for all the tasks to terminate */
880 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
881 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
883 pthread_join(task_ids[i].id, NULL);
887 /* Send 'em all a SIGHUP. That should shut 'em up.
889 await_death = RtsFlags.ConcFlags.nNodes;
890 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
891 pthread_kill(task_ids[i].id,SIGTERM);
893 while (await_death > 0) {
899 /* -----------------------------------------------------------------------------
900 Managing the per-task allocation areas.
902 Each capability comes with an allocation area. These are
903 fixed-length block lists into which allocation can be done.
905 ToDo: no support for two-space collection at the moment???
906 -------------------------------------------------------------------------- */
908 /* -----------------------------------------------------------------------------
909 * waitThread is the external interface for running a new computataion
910 * and waiting for the result.
912 * In the non-SMP case, we create a new main thread, push it on the
913 * main-thread stack, and invoke the scheduler to run it. The
914 * scheduler will return when the top main thread on the stack has
915 * completed or died, and fill in the necessary fields of the
916 * main_thread structure.
918 * In the SMP case, we create a main thread as before, but we then
919 * create a new condition variable and sleep on it. When our new
920 * main thread has completed, we'll be woken up and the status/result
921 * will be in the main_thread struct.
922 * -------------------------------------------------------------------------- */
925 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
928 SchedulerStatus stat;
930 ACQUIRE_LOCK(&sched_mutex);
932 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
938 pthread_cond_init(&m->wakeup, NULL);
941 m->link = main_threads;
944 IF_DEBUG(scheduler, fprintf(stderr, "schedule: new main thread (%d)\n",
949 pthread_cond_wait(&m->wakeup, &sched_mutex);
950 } while (m->stat == NoStatus);
953 ASSERT(m->stat != NoStatus);
959 pthread_cond_destroy(&m->wakeup);
963 RELEASE_LOCK(&sched_mutex);
967 /* -----------------------------------------------------------------------------
968 Debugging: why is a thread blocked
969 -------------------------------------------------------------------------- */
972 void printThreadBlockage(StgTSO *tso)
974 switch (tso->why_blocked) {
976 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
979 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
982 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
985 fprintf(stderr,"blocked on an MVar");
987 case BlockedOnException:
988 fprintf(stderr,"blocked on delivering an exception to thread %d",
989 tso->block_info.tso->id);
991 case BlockedOnBlackHole:
992 fprintf(stderr,"blocked on a black hole");
995 fprintf(stderr,"not blocked");
1001 /* -----------------------------------------------------------------------------
1002 Where are the roots that we know about?
1004 - all the threads on the runnable queue
1005 - all the threads on the blocked queue
1006 - all the thread currently executing a _ccall_GC
1007 - all the "main threads"
1009 -------------------------------------------------------------------------- */
1011 /* This has to be protected either by the scheduler monitor, or by the
1012 garbage collection monitor (probably the latter).
1016 static void GetRoots(void)
1020 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1021 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1023 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1024 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1026 for (m = main_threads; m != NULL; m = m->link) {
1027 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1029 suspended_ccalling_threads =
1030 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1033 /* -----------------------------------------------------------------------------
1036 This is the interface to the garbage collector from Haskell land.
1037 We provide this so that external C code can allocate and garbage
1038 collect when called from Haskell via _ccall_GC.
1040 It might be useful to provide an interface whereby the programmer
1041 can specify more roots (ToDo).
1043 This needs to be protected by the GC condition variable above. KH.
1044 -------------------------------------------------------------------------- */
1046 void (*extra_roots)(void);
1051 GarbageCollect(GetRoots);
1057 GetRoots(); /* the scheduler's roots */
1058 extra_roots(); /* the user's roots */
1062 performGCWithRoots(void (*get_roots)(void))
1064 extra_roots = get_roots;
1066 GarbageCollect(AllRoots);
1069 /* -----------------------------------------------------------------------------
1072 If the thread has reached its maximum stack size,
1073 then bomb out. Otherwise relocate the TSO into a larger chunk of
1074 memory and adjust its stack size appropriately.
1075 -------------------------------------------------------------------------- */
1078 threadStackOverflow(StgTSO *tso)
1080 nat new_stack_size, new_tso_size, diff, stack_words;
1084 if (tso->stack_size >= tso->max_stack_size) {
1086 /* If we're debugging, just print out the top of the stack */
1087 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1091 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1094 /* Send this thread the StackOverflow exception */
1095 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1100 /* Try to double the current stack size. If that takes us over the
1101 * maximum stack size for this thread, then use the maximum instead.
1102 * Finally round up so the TSO ends up as a whole number of blocks.
1104 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1105 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1106 TSO_STRUCT_SIZE)/sizeof(W_);
1107 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1108 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1110 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1112 dest = (StgTSO *)allocate(new_tso_size);
1113 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1115 /* copy the TSO block and the old stack into the new area */
1116 memcpy(dest,tso,TSO_STRUCT_SIZE);
1117 stack_words = tso->stack + tso->stack_size - tso->sp;
1118 new_sp = (P_)dest + new_tso_size - stack_words;
1119 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1121 /* relocate the stack pointers... */
1122 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1123 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1125 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1126 dest->stack_size = new_stack_size;
1128 /* and relocate the update frame list */
1129 relocate_TSO(tso, dest);
1131 /* Mark the old one as dead so we don't try to scavenge it during
1132 * garbage collection (the TSO will likely be on a mutables list in
1133 * some generation, but it'll get collected soon enough). It's
1134 * important to set the sp and su values to just beyond the end of
1135 * the stack, so we don't attempt to scavenge any part of the dead
1138 tso->whatNext = ThreadKilled;
1139 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1140 tso->su = (StgUpdateFrame *)tso->sp;
1141 tso->why_blocked = NotBlocked;
1142 dest->mut_link = NULL;
1144 IF_DEBUG(sanity,checkTSO(tso));
1146 IF_DEBUG(scheduler,printTSO(dest));
1150 /* This will no longer work: KH */
1151 if (tso == MainTSO) { /* hack */
1158 /* -----------------------------------------------------------------------------
1159 Wake up a queue that was blocked on some resource.
1160 -------------------------------------------------------------------------- */
1163 unblockOneLocked(StgTSO *tso)
1167 ASSERT(get_itbl(tso)->type == TSO);
1168 ASSERT(tso->why_blocked != NotBlocked);
1169 tso->why_blocked = NotBlocked;
1171 PUSH_ON_RUN_QUEUE(tso);
1174 IF_DEBUG(scheduler,belch("schedule (task %ld): waking up thread %ld",
1175 pthread_self(), tso->id));
1177 IF_DEBUG(scheduler,belch("schedule: waking up thread %ld", tso->id));
1183 unblockOne(StgTSO *tso)
1185 ACQUIRE_LOCK(&sched_mutex);
1186 tso = unblockOneLocked(tso);
1187 RELEASE_LOCK(&sched_mutex);
1192 awakenBlockedQueue(StgTSO *tso)
1194 ACQUIRE_LOCK(&sched_mutex);
1195 while (tso != END_TSO_QUEUE) {
1196 tso = unblockOneLocked(tso);
1198 RELEASE_LOCK(&sched_mutex);
1201 /* -----------------------------------------------------------------------------
1203 - usually called inside a signal handler so it mustn't do anything fancy.
1204 -------------------------------------------------------------------------- */
1207 interruptStgRts(void)
1213 /* -----------------------------------------------------------------------------
1216 This is for use when we raise an exception in another thread, which
1218 -------------------------------------------------------------------------- */
1221 unblockThread(StgTSO *tso)
1225 ACQUIRE_LOCK(&sched_mutex);
1226 switch (tso->why_blocked) {
1229 return; /* not blocked */
1232 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1234 StgTSO *last_tso = END_TSO_QUEUE;
1235 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1238 for (t = mvar->head; t != END_TSO_QUEUE;
1239 last = &t->link, last_tso = t, t = t->link) {
1242 if (mvar->tail == tso) {
1243 mvar->tail = last_tso;
1248 barf("unblockThread (MVAR): TSO not found");
1251 case BlockedOnBlackHole:
1252 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1254 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1256 last = &bq->blocking_queue;
1257 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1258 last = &t->link, t = t->link) {
1264 barf("unblockThread (BLACKHOLE): TSO not found");
1267 case BlockedOnException:
1269 StgTSO *target = tso->block_info.tso;
1271 ASSERT(get_itbl(target)->type == TSO);
1272 ASSERT(target->blocked_exceptions != NULL);
1274 last = &target->blocked_exceptions;
1275 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
1276 last = &t->link, t = t->link) {
1277 ASSERT(get_itbl(t)->type == TSO);
1283 barf("unblockThread (Exception): TSO not found");
1286 case BlockedOnDelay:
1288 case BlockedOnWrite:
1290 StgTSO *prev = NULL;
1291 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1292 prev = t, t = t->link) {
1295 blocked_queue_hd = t->link;
1296 if (blocked_queue_tl == t) {
1297 blocked_queue_tl = END_TSO_QUEUE;
1300 prev->link = t->link;
1301 if (blocked_queue_tl == t) {
1302 blocked_queue_tl = prev;
1308 barf("unblockThread (I/O): TSO not found");
1312 barf("unblockThread");
1316 tso->link = END_TSO_QUEUE;
1317 tso->why_blocked = NotBlocked;
1318 tso->block_info.closure = NULL;
1319 PUSH_ON_RUN_QUEUE(tso);
1320 RELEASE_LOCK(&sched_mutex);
1323 /* -----------------------------------------------------------------------------
1326 * The following function implements the magic for raising an
1327 * asynchronous exception in an existing thread.
1329 * We first remove the thread from any queue on which it might be
1330 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1332 * We strip the stack down to the innermost CATCH_FRAME, building
1333 * thunks in the heap for all the active computations, so they can
1334 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1335 * an application of the handler to the exception, and push it on
1336 * the top of the stack.
1338 * How exactly do we save all the active computations? We create an
1339 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1340 * AP_UPDs pushes everything from the corresponding update frame
1341 * upwards onto the stack. (Actually, it pushes everything up to the
1342 * next update frame plus a pointer to the next AP_UPD object.
1343 * Entering the next AP_UPD object pushes more onto the stack until we
1344 * reach the last AP_UPD object - at which point the stack should look
1345 * exactly as it did when we killed the TSO and we can continue
1346 * execution by entering the closure on top of the stack.
1348 * We can also kill a thread entirely - this happens if either (a) the
1349 * exception passed to raiseAsync is NULL, or (b) there's no
1350 * CATCH_FRAME on the stack. In either case, we strip the entire
1351 * stack and replace the thread with a zombie.
1353 * -------------------------------------------------------------------------- */
1356 deleteThread(StgTSO *tso)
1358 raiseAsync(tso,NULL);
1362 raiseAsync(StgTSO *tso, StgClosure *exception)
1364 StgUpdateFrame* su = tso->su;
1365 StgPtr sp = tso->sp;
1367 /* Thread already dead? */
1368 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1372 IF_DEBUG(scheduler, belch("schedule: Raising exception in thread %ld.", tso->id));
1374 /* Remove it from any blocking queues */
1377 /* The stack freezing code assumes there's a closure pointer on
1378 * the top of the stack. This isn't always the case with compiled
1379 * code, so we have to push a dummy closure on the top which just
1380 * returns to the next return address on the stack.
1382 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1383 *(--sp) = (W_)&dummy_ret_closure;
1387 int words = ((P_)su - (P_)sp) - 1;
1391 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1392 * then build PAP(handler,exception), and leave it on top of
1393 * the stack ready to enter.
1395 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1396 StgCatchFrame *cf = (StgCatchFrame *)su;
1397 /* we've got an exception to raise, so let's pass it to the
1398 * handler in this frame.
1400 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1401 TICK_ALLOC_UPD_PAP(2,0);
1402 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1405 ap->fun = cf->handler;
1406 ap->payload[0] = (P_)exception;
1408 /* sp currently points to the word above the CATCH_FRAME on the stack.
1410 sp += sizeofW(StgCatchFrame);
1413 /* Restore the blocked/unblocked state for asynchronous exceptions
1414 * at the CATCH_FRAME.
1416 * If exceptions were unblocked at the catch, arrange that they
1417 * are unblocked again after executing the handler by pushing an
1418 * unblockAsyncExceptions_ret stack frame.
1420 if (!cf->exceptions_blocked) {
1421 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
1424 /* Ensure that async exceptions are blocked when running the handler.
1426 if (tso->blocked_exceptions == NULL) {
1427 tso->blocked_exceptions = END_TSO_QUEUE;
1430 /* Put the newly-built PAP on top of the stack, ready to execute
1431 * when the thread restarts.
1435 tso->whatNext = ThreadEnterGHC;
1439 /* First build an AP_UPD consisting of the stack chunk above the
1440 * current update frame, with the top word on the stack as the
1443 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1448 ap->fun = (StgClosure *)sp[0];
1450 for(i=0; i < (nat)words; ++i) {
1451 ap->payload[i] = (P_)*sp++;
1454 switch (get_itbl(su)->type) {
1458 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1459 TICK_ALLOC_UP_THK(words+1,0);
1462 fprintf(stderr, "schedule: Updating ");
1463 printPtr((P_)su->updatee);
1464 fprintf(stderr, " with ");
1465 printObj((StgClosure *)ap);
1468 /* Replace the updatee with an indirection - happily
1469 * this will also wake up any threads currently
1470 * waiting on the result.
1472 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
1474 sp += sizeofW(StgUpdateFrame) -1;
1475 sp[0] = (W_)ap; /* push onto stack */
1481 StgCatchFrame *cf = (StgCatchFrame *)su;
1484 /* We want a PAP, not an AP_UPD. Fortunately, the
1485 * layout's the same.
1487 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1488 TICK_ALLOC_UPD_PAP(words+1,0);
1490 /* now build o = FUN(catch,ap,handler) */
1491 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1492 TICK_ALLOC_FUN(2,0);
1493 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1494 o->payload[0] = (StgClosure *)ap;
1495 o->payload[1] = cf->handler;
1498 fprintf(stderr, "schedule: Built ");
1499 printObj((StgClosure *)o);
1502 /* pop the old handler and put o on the stack */
1504 sp += sizeofW(StgCatchFrame) - 1;
1511 StgSeqFrame *sf = (StgSeqFrame *)su;
1514 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1515 TICK_ALLOC_UPD_PAP(words+1,0);
1517 /* now build o = FUN(seq,ap) */
1518 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1519 TICK_ALLOC_SE_THK(1,0);
1520 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1521 payloadCPtr(o,0) = (StgClosure *)ap;
1524 fprintf(stderr, "schedule: Built ");
1525 printObj((StgClosure *)o);
1528 /* pop the old handler and put o on the stack */
1530 sp += sizeofW(StgSeqFrame) - 1;
1536 /* We've stripped the entire stack, the thread is now dead. */
1537 sp += sizeofW(StgStopFrame) - 1;
1538 sp[0] = (W_)exception; /* save the exception */
1539 tso->whatNext = ThreadKilled;
1540 tso->su = (StgUpdateFrame *)(sp+1);