1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.32 1999/11/11 17:19:15 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
46 #include "Profiling.h"
52 * These are the threads which clients have requested that we run.
54 * In an SMP build, we might have several concurrent clients all
55 * waiting for results, and each one will wait on a condition variable
56 * until the result is available.
58 * In non-SMP, clients are strictly nested: the first client calls
59 * into the RTS, which might call out again to C with a _ccall_GC, and
60 * eventually re-enter the RTS.
62 * Main threads information is kept in a linked list:
64 typedef struct StgMainThread_ {
69 pthread_cond_t wakeup;
71 struct StgMainThread_ *link;
75 * Locks required: sched_mutex.
77 static StgMainThread *main_threads;
80 * Locks required: sched_mutex.
82 StgTSO *run_queue_hd, *run_queue_tl;
83 StgTSO *blocked_queue_hd, *blocked_queue_tl;
85 /* Threads suspended in _ccall_GC.
86 * Locks required: sched_mutex.
88 static StgTSO *suspended_ccalling_threads;
90 static void GetRoots(void);
91 static StgTSO *threadStackOverflow(StgTSO *tso);
93 /* KH: The following two flags are shared memory locations. There is no need
94 to lock them, since they are only unset at the end of a scheduler
98 /* flag set by signal handler to precipitate a context switch */
100 /* if this flag is set as well, give up execution */
101 static nat interrupted;
103 /* Next thread ID to allocate.
104 * Locks required: sched_mutex
106 StgThreadID next_thread_id = 1;
109 * Pointers to the state of the current thread.
110 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
111 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
114 /* The smallest stack size that makes any sense is:
115 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
116 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
117 * + 1 (the realworld token for an IO thread)
118 * + 1 (the closure to enter)
120 * A thread with this stack will bomb immediately with a stack
121 * overflow, which will increase its stack size.
124 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
126 /* Free capability list.
127 * Locks required: sched_mutex.
130 Capability *free_capabilities; /* Available capabilities for running threads */
131 nat n_free_capabilities; /* total number of available capabilities */
133 Capability MainRegTable; /* for non-SMP, we have one global capability */
138 /* All our current task ids, saved in case we need to kill them later.
144 void addToBlockedQueue ( StgTSO *tso );
146 static void schedule ( void );
147 static void initThread ( StgTSO *tso, nat stack_size );
148 void interruptStgRts ( void );
151 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
152 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
153 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
154 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
159 /* -----------------------------------------------------------------------------
160 Main scheduling loop.
162 We use round-robin scheduling, each thread returning to the
163 scheduler loop when one of these conditions is detected:
166 * timer expires (thread yields)
171 Locking notes: we acquire the scheduler lock once at the beginning
172 of the scheduler loop, and release it when
174 * running a thread, or
175 * waiting for work, or
176 * waiting for a GC to complete.
178 -------------------------------------------------------------------------- */
185 StgThreadReturnCode ret;
187 ACQUIRE_LOCK(&sched_mutex);
191 /* If we're interrupted (the user pressed ^C, or some other
192 * termination condition occurred), kill all the currently running
196 IF_DEBUG(scheduler,belch("schedule: interrupted"));
197 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
200 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
203 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
204 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
207 /* Go through the list of main threads and wake up any
208 * clients whose computations have finished. ToDo: this
209 * should be done more efficiently without a linear scan
210 * of the main threads list, somehow...
214 StgMainThread *m, **prev;
215 prev = &main_threads;
216 for (m = main_threads; m != NULL; m = m->link) {
217 if (m->tso->whatNext == ThreadComplete) {
219 *(m->ret) = (StgClosure *)m->tso->sp[0];
223 pthread_cond_broadcast(&m->wakeup);
225 if (m->tso->whatNext == ThreadKilled) {
228 pthread_cond_broadcast(&m->wakeup);
233 /* If our main thread has finished or been killed, return.
236 StgMainThread *m = main_threads;
237 if (m->tso->whatNext == ThreadComplete
238 || m->tso->whatNext == ThreadKilled) {
239 main_threads = main_threads->link;
240 if (m->tso->whatNext == ThreadComplete) {
241 /* we finished successfully, fill in the return value */
242 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
253 /* Check whether any waiting threads need to be woken up. If the
254 * run queue is empty, and there are no other tasks running, we
255 * can wait indefinitely for something to happen.
256 * ToDo: what if another client comes along & requests another
259 if (blocked_queue_hd != END_TSO_QUEUE) {
261 (run_queue_hd == END_TSO_QUEUE)
263 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
268 /* check for signals each time around the scheduler */
270 if (signals_pending()) {
271 start_signal_handlers();
275 /* Detect deadlock: when we have no threads to run, there are
276 * no threads waiting on I/O or sleeping, and all the other
277 * tasks are waiting for work, we must have a deadlock. Inform
278 * all the main threads.
281 if (blocked_queue_hd == END_TSO_QUEUE
282 && run_queue_hd == END_TSO_QUEUE
283 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
286 for (m = main_threads; m != NULL; m = m->link) {
289 pthread_cond_broadcast(&m->wakeup);
294 if (blocked_queue_hd == END_TSO_QUEUE
295 && run_queue_hd == END_TSO_QUEUE) {
296 StgMainThread *m = main_threads;
299 main_threads = m->link;
305 /* If there's a GC pending, don't do anything until it has
309 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): waiting for GC\n",
311 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
314 /* block until we've got a thread on the run queue and a free
317 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
319 fprintf(stderr, "schedule (task %ld): waiting for work\n",
321 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
323 fprintf(stderr, "schedule (task %ld): work now available\n",
328 /* grab a thread from the run queue
335 cap = free_capabilities;
336 free_capabilities = cap->link;
337 n_free_capabilities--;
342 cap->rCurrentTSO = t;
344 /* set the context_switch flag
346 if (run_queue_hd == END_TSO_QUEUE)
351 RELEASE_LOCK(&sched_mutex);
354 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): running thread %d\n", pthread_self(),t->id));
356 IF_DEBUG(scheduler,fprintf(stderr,"schedule: running thread %d\n",t->id));
359 /* Run the current thread
361 switch (cap->rCurrentTSO->whatNext) {
364 /* Thread already finished, return to scheduler. */
365 ret = ThreadFinished;
368 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
371 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
373 case ThreadEnterHugs:
377 IF_DEBUG(scheduler,belch("schedule: entering Hugs"));
378 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
379 cap->rCurrentTSO->sp += 1;
384 barf("Panic: entered a BCO but no bytecode interpreter in this build");
387 barf("schedule: invalid whatNext field");
390 /* Costs for the scheduler are assigned to CCS_SYSTEM */
395 ACQUIRE_LOCK(&sched_mutex);
398 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
400 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
402 t = cap->rCurrentTSO;
406 /* make all the running tasks block on a condition variable,
407 * maybe set context_switch and wait till they all pile in,
408 * then have them wait on a GC condition variable.
410 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
413 ready_to_gc = rtsTrue;
414 context_switch = 1; /* stop other threads ASAP */
415 PUSH_ON_RUN_QUEUE(t);
419 /* just adjust the stack for this thread, then pop it back
422 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
426 /* enlarge the stack */
427 StgTSO *new_t = threadStackOverflow(t);
429 /* This TSO has moved, so update any pointers to it from the
430 * main thread stack. It better not be on any other queues...
433 for (m = main_threads; m != NULL; m = m->link) {
438 PUSH_ON_RUN_QUEUE(new_t);
443 /* put the thread back on the run queue. Then, if we're ready to
444 * GC, check whether this is the last task to stop. If so, wake
445 * up the GC thread. getThread will block during a GC until the
449 if (t->whatNext == ThreadEnterHugs) {
450 /* ToDo: or maybe a timer expired when we were in Hugs?
451 * or maybe someone hit ctrl-C
453 belch("thread %ld stopped to switch to Hugs", t->id);
455 belch("thread %ld stopped, yielding", t->id);
459 APPEND_TO_RUN_QUEUE(t);
463 /* don't need to do anything. Either the thread is blocked on
464 * I/O, in which case we'll have called addToBlockedQueue
465 * previously, or it's blocked on an MVar or Blackhole, in which
466 * case it'll be on the relevant queue already.
469 fprintf(stderr, "thread %d stopped, ", t->id);
470 printThreadBlockage(t);
471 fprintf(stderr, "\n"));
476 /* Need to check whether this was a main thread, and if so, signal
477 * the task that started it with the return value. If we have no
478 * more main threads, we probably need to stop all the tasks until
481 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
482 t->whatNext = ThreadComplete;
486 barf("doneThread: invalid thread return code");
490 cap->link = free_capabilities;
491 free_capabilities = cap;
492 n_free_capabilities++;
496 if (ready_to_gc && n_free_capabilities == RtsFlags.ConcFlags.nNodes) {
500 /* everybody back, start the GC.
501 * Could do it in this thread, or signal a condition var
502 * to do it in another thread. Either way, we need to
503 * broadcast on gc_pending_cond afterward.
506 IF_DEBUG(scheduler,belch("schedule (task %ld): doing GC", pthread_self()));
508 GarbageCollect(GetRoots);
509 ready_to_gc = rtsFalse;
511 pthread_cond_broadcast(&gc_pending_cond);
514 } /* end of while(1) */
517 /* -----------------------------------------------------------------------------
518 * Suspending & resuming Haskell threads.
520 * When making a "safe" call to C (aka _ccall_GC), the task gives back
521 * its capability before calling the C function. This allows another
522 * task to pick up the capability and carry on running Haskell
523 * threads. It also means that if the C call blocks, it won't lock
526 * The Haskell thread making the C call is put to sleep for the
527 * duration of the call, on the susepended_ccalling_threads queue. We
528 * give out a token to the task, which it can use to resume the thread
529 * on return from the C function.
530 * -------------------------------------------------------------------------- */
533 suspendThread( Capability *cap )
537 ACQUIRE_LOCK(&sched_mutex);
541 fprintf(stderr, "schedule (task %ld): thread %d did a _ccall_gc\n",
542 pthread_self(), cap->rCurrentTSO->id));
545 fprintf(stderr, "schedule: thread %d did a _ccall_gc\n",
546 cap->rCurrentTSO->id));
549 threadPaused(cap->rCurrentTSO);
550 cap->rCurrentTSO->link = suspended_ccalling_threads;
551 suspended_ccalling_threads = cap->rCurrentTSO;
553 /* Use the thread ID as the token; it should be unique */
554 tok = cap->rCurrentTSO->id;
557 cap->link = free_capabilities;
558 free_capabilities = cap;
559 n_free_capabilities++;
562 RELEASE_LOCK(&sched_mutex);
567 resumeThread( StgInt tok )
572 ACQUIRE_LOCK(&sched_mutex);
574 prev = &suspended_ccalling_threads;
575 for (tso = suspended_ccalling_threads;
576 tso != END_TSO_QUEUE;
577 prev = &tso->link, tso = tso->link) {
578 if (tso->id == (StgThreadID)tok) {
583 if (tso == END_TSO_QUEUE) {
584 barf("resumeThread: thread not found");
588 while (free_capabilities == NULL) {
590 fprintf(stderr,"schedule (task %ld): waiting to resume\n",
592 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
593 IF_DEBUG(scheduler,fprintf(stderr,
594 "schedule (task %ld): resuming thread %d\n",
595 pthread_self(), tso->id));
597 cap = free_capabilities;
598 free_capabilities = cap->link;
599 n_free_capabilities--;
604 cap->rCurrentTSO = tso;
606 RELEASE_LOCK(&sched_mutex);
610 /* -----------------------------------------------------------------------------
612 * -------------------------------------------------------------------------- */
613 static void unblockThread(StgTSO *tso);
615 /* -----------------------------------------------------------------------------
616 * Comparing Thread ids.
618 * This is used from STG land in the implementation of the
619 * instances of Eq/Ord for ThreadIds.
620 * -------------------------------------------------------------------------- */
622 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
624 StgThreadID id1 = tso1->id;
625 StgThreadID id2 = tso2->id;
627 if (id1 < id2) return (-1);
628 if (id1 > id2) return 1;
632 /* -----------------------------------------------------------------------------
635 The new thread starts with the given stack size. Before the
636 scheduler can run, however, this thread needs to have a closure
637 (and possibly some arguments) pushed on its stack. See
638 pushClosure() in Schedule.h.
640 createGenThread() and createIOThread() (in SchedAPI.h) are
641 convenient packaged versions of this function.
642 -------------------------------------------------------------------------- */
645 createThread(nat stack_size)
649 /* catch ridiculously small stack sizes */
650 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
651 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
654 tso = (StgTSO *)allocate(stack_size);
655 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
657 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
662 initThread(StgTSO *tso, nat stack_size)
664 SET_INFO(tso,&TSO_info);
665 tso->whatNext = ThreadEnterGHC;
667 /* tso->id needs to be unique. For now we use a heavyweight mutex to
668 protect the increment operation on next_thread_id.
669 In future, we could use an atomic increment instead.
672 ACQUIRE_LOCK(&sched_mutex);
673 tso->id = next_thread_id++;
674 RELEASE_LOCK(&sched_mutex);
676 tso->why_blocked = NotBlocked;
678 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
679 tso->stack_size = stack_size;
680 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
682 tso->sp = (P_)&(tso->stack) + stack_size;
685 tso->prof.CCCS = CCS_MAIN;
688 /* put a stop frame on the stack */
689 tso->sp -= sizeofW(StgStopFrame);
690 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
691 tso->su = (StgUpdateFrame*)tso->sp;
693 IF_DEBUG(scheduler,belch("schedule: Initialised thread %ld, stack size = %lx words",
694 tso->id, tso->stack_size));
699 /* -----------------------------------------------------------------------------
702 * scheduleThread puts a thread on the head of the runnable queue.
703 * This will usually be done immediately after a thread is created.
704 * The caller of scheduleThread must create the thread using e.g.
705 * createThread and push an appropriate closure
706 * on this thread's stack before the scheduler is invoked.
707 * -------------------------------------------------------------------------- */
710 scheduleThread(StgTSO *tso)
712 ACQUIRE_LOCK(&sched_mutex);
714 /* Put the new thread on the head of the runnable queue. The caller
715 * better push an appropriate closure on this thread's stack
716 * beforehand. In the SMP case, the thread may start running as
717 * soon as we release the scheduler lock below.
719 PUSH_ON_RUN_QUEUE(tso);
722 IF_DEBUG(scheduler,printTSO(tso));
723 RELEASE_LOCK(&sched_mutex);
727 /* -----------------------------------------------------------------------------
730 * Start up Posix threads to run each of the scheduler tasks.
731 * I believe the task ids are not needed in the system as defined.
733 * -------------------------------------------------------------------------- */
737 taskStart( void *arg STG_UNUSED )
744 /* -----------------------------------------------------------------------------
747 * Initialise the scheduler. This resets all the queues - if the
748 * queues contained any threads, they'll be garbage collected at the
751 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
752 * -------------------------------------------------------------------------- */
756 term_handler(int sig STG_UNUSED)
759 ACQUIRE_LOCK(&term_mutex);
761 RELEASE_LOCK(&term_mutex);
766 void initScheduler(void)
768 run_queue_hd = END_TSO_QUEUE;
769 run_queue_tl = END_TSO_QUEUE;
770 blocked_queue_hd = END_TSO_QUEUE;
771 blocked_queue_tl = END_TSO_QUEUE;
773 suspended_ccalling_threads = END_TSO_QUEUE;
780 enteredCAFs = END_CAF_LIST;
782 /* Install the SIGHUP handler */
785 struct sigaction action,oact;
787 action.sa_handler = term_handler;
788 sigemptyset(&action.sa_mask);
790 if (sigaction(SIGTERM, &action, &oact) != 0) {
791 barf("can't install TERM handler");
797 /* Allocate N Capabilities */
800 Capability *cap, *prev;
803 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
804 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
808 free_capabilities = cap;
809 n_free_capabilities = RtsFlags.ConcFlags.nNodes;
811 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
812 n_free_capabilities););
824 /* make some space for saving all the thread ids */
825 task_ids = stgMallocBytes(RtsFlags.ConcFlags.nNodes * sizeof(task_info),
826 "initScheduler:task_ids");
828 /* and create all the threads */
829 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
830 r = pthread_create(&tid,NULL,taskStart,NULL);
832 barf("startTasks: Can't create new Posix thread");
834 task_ids[i].id = tid;
835 task_ids[i].mut_time = 0.0;
836 task_ids[i].mut_etime = 0.0;
837 task_ids[i].gc_time = 0.0;
838 task_ids[i].gc_etime = 0.0;
839 task_ids[i].elapsedtimestart = elapsedtime();
840 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
846 exitScheduler( void )
851 /* Don't want to use pthread_cancel, since we'd have to install
852 * these silly exception handlers (pthread_cleanup_{push,pop}) around
856 /* Cancel all our tasks */
857 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
858 pthread_cancel(task_ids[i].id);
861 /* Wait for all the tasks to terminate */
862 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
863 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
865 pthread_join(task_ids[i].id, NULL);
869 /* Send 'em all a SIGHUP. That should shut 'em up.
871 await_death = RtsFlags.ConcFlags.nNodes;
872 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
873 pthread_kill(task_ids[i].id,SIGTERM);
875 while (await_death > 0) {
881 /* -----------------------------------------------------------------------------
882 Managing the per-task allocation areas.
884 Each capability comes with an allocation area. These are
885 fixed-length block lists into which allocation can be done.
887 ToDo: no support for two-space collection at the moment???
888 -------------------------------------------------------------------------- */
890 /* -----------------------------------------------------------------------------
891 * waitThread is the external interface for running a new computataion
892 * and waiting for the result.
894 * In the non-SMP case, we create a new main thread, push it on the
895 * main-thread stack, and invoke the scheduler to run it. The
896 * scheduler will return when the top main thread on the stack has
897 * completed or died, and fill in the necessary fields of the
898 * main_thread structure.
900 * In the SMP case, we create a main thread as before, but we then
901 * create a new condition variable and sleep on it. When our new
902 * main thread has completed, we'll be woken up and the status/result
903 * will be in the main_thread struct.
904 * -------------------------------------------------------------------------- */
907 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
910 SchedulerStatus stat;
912 ACQUIRE_LOCK(&sched_mutex);
914 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
920 pthread_cond_init(&m->wakeup, NULL);
923 m->link = main_threads;
926 IF_DEBUG(scheduler, fprintf(stderr, "schedule: new main thread (%d)\n",
931 pthread_cond_wait(&m->wakeup, &sched_mutex);
932 } while (m->stat == NoStatus);
935 ASSERT(m->stat != NoStatus);
941 pthread_cond_destroy(&m->wakeup);
945 RELEASE_LOCK(&sched_mutex);
949 /* -----------------------------------------------------------------------------
950 Debugging: why is a thread blocked
951 -------------------------------------------------------------------------- */
954 void printThreadBlockage(StgTSO *tso)
956 switch (tso->why_blocked) {
958 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
961 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
964 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
967 fprintf(stderr,"blocked on an MVar");
969 case BlockedOnBlackHole:
970 fprintf(stderr,"blocked on a black hole");
973 fprintf(stderr,"not blocked");
979 /* -----------------------------------------------------------------------------
980 Where are the roots that we know about?
982 - all the threads on the runnable queue
983 - all the threads on the blocked queue
984 - all the thread currently executing a _ccall_GC
985 - all the "main threads"
987 -------------------------------------------------------------------------- */
989 /* This has to be protected either by the scheduler monitor, or by the
990 garbage collection monitor (probably the latter).
994 static void GetRoots(void)
998 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
999 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1001 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1002 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1004 for (m = main_threads; m != NULL; m = m->link) {
1005 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1007 suspended_ccalling_threads =
1008 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1011 /* -----------------------------------------------------------------------------
1014 This is the interface to the garbage collector from Haskell land.
1015 We provide this so that external C code can allocate and garbage
1016 collect when called from Haskell via _ccall_GC.
1018 It might be useful to provide an interface whereby the programmer
1019 can specify more roots (ToDo).
1021 This needs to be protected by the GC condition variable above. KH.
1022 -------------------------------------------------------------------------- */
1024 void (*extra_roots)(void);
1029 GarbageCollect(GetRoots);
1035 GetRoots(); /* the scheduler's roots */
1036 extra_roots(); /* the user's roots */
1040 performGCWithRoots(void (*get_roots)(void))
1042 extra_roots = get_roots;
1044 GarbageCollect(AllRoots);
1047 /* -----------------------------------------------------------------------------
1050 If the thread has reached its maximum stack size,
1051 then bomb out. Otherwise relocate the TSO into a larger chunk of
1052 memory and adjust its stack size appropriately.
1053 -------------------------------------------------------------------------- */
1056 threadStackOverflow(StgTSO *tso)
1058 nat new_stack_size, new_tso_size, diff, stack_words;
1062 if (tso->stack_size >= tso->max_stack_size) {
1064 /* If we're debugging, just print out the top of the stack */
1065 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1069 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1072 /* Send this thread the StackOverflow exception */
1073 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1078 /* Try to double the current stack size. If that takes us over the
1079 * maximum stack size for this thread, then use the maximum instead.
1080 * Finally round up so the TSO ends up as a whole number of blocks.
1082 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1083 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1084 TSO_STRUCT_SIZE)/sizeof(W_);
1085 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1086 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1088 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1090 dest = (StgTSO *)allocate(new_tso_size);
1091 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1093 /* copy the TSO block and the old stack into the new area */
1094 memcpy(dest,tso,TSO_STRUCT_SIZE);
1095 stack_words = tso->stack + tso->stack_size - tso->sp;
1096 new_sp = (P_)dest + new_tso_size - stack_words;
1097 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1099 /* relocate the stack pointers... */
1100 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1101 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1103 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1104 dest->stack_size = new_stack_size;
1106 /* and relocate the update frame list */
1107 relocate_TSO(tso, dest);
1109 /* Mark the old one as dead so we don't try to scavenge it during
1110 * garbage collection (the TSO will likely be on a mutables list in
1111 * some generation, but it'll get collected soon enough). It's
1112 * important to set the sp and su values to just beyond the end of
1113 * the stack, so we don't attempt to scavenge any part of the dead
1116 tso->whatNext = ThreadKilled;
1117 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1118 tso->su = (StgUpdateFrame *)tso->sp;
1119 tso->why_blocked = NotBlocked;
1120 dest->mut_link = NULL;
1122 IF_DEBUG(sanity,checkTSO(tso));
1124 IF_DEBUG(scheduler,printTSO(dest));
1128 /* This will no longer work: KH */
1129 if (tso == MainTSO) { /* hack */
1136 /* -----------------------------------------------------------------------------
1137 Wake up a queue that was blocked on some resource.
1138 -------------------------------------------------------------------------- */
1141 unblockOneLocked(StgTSO *tso)
1145 ASSERT(get_itbl(tso)->type == TSO);
1146 ASSERT(tso->why_blocked != NotBlocked);
1147 tso->why_blocked = NotBlocked;
1149 PUSH_ON_RUN_QUEUE(tso);
1152 IF_DEBUG(scheduler,belch("schedule (task %ld): waking up thread %ld",
1153 pthread_self(), tso->id));
1155 IF_DEBUG(scheduler,belch("schedule: waking up thread %ld", tso->id));
1161 unblockOne(StgTSO *tso)
1163 ACQUIRE_LOCK(&sched_mutex);
1164 tso = unblockOneLocked(tso);
1165 RELEASE_LOCK(&sched_mutex);
1170 awakenBlockedQueue(StgTSO *tso)
1172 ACQUIRE_LOCK(&sched_mutex);
1173 while (tso != END_TSO_QUEUE) {
1174 tso = unblockOneLocked(tso);
1176 RELEASE_LOCK(&sched_mutex);
1179 /* -----------------------------------------------------------------------------
1181 - usually called inside a signal handler so it mustn't do anything fancy.
1182 -------------------------------------------------------------------------- */
1185 interruptStgRts(void)
1191 /* -----------------------------------------------------------------------------
1194 This is for use when we raise an exception in another thread, which
1196 -------------------------------------------------------------------------- */
1199 unblockThread(StgTSO *tso)
1203 ACQUIRE_LOCK(&sched_mutex);
1204 switch (tso->why_blocked) {
1207 return; /* not blocked */
1210 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1212 StgTSO *last_tso = END_TSO_QUEUE;
1213 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1216 for (t = mvar->head; t != END_TSO_QUEUE;
1217 last = &t->link, last_tso = t, t = t->link) {
1220 if (mvar->tail == tso) {
1221 mvar->tail = last_tso;
1226 barf("unblockThread (MVAR): TSO not found");
1229 case BlockedOnBlackHole:
1230 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1232 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1234 last = &bq->blocking_queue;
1235 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1236 last = &t->link, t = t->link) {
1242 barf("unblockThread (BLACKHOLE): TSO not found");
1245 case BlockedOnDelay:
1247 case BlockedOnWrite:
1249 last = &blocked_queue_hd;
1250 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1251 last = &t->link, t = t->link) {
1254 if (blocked_queue_tl == t) {
1255 blocked_queue_tl = tso->link;
1260 barf("unblockThread (I/O): TSO not found");
1264 barf("unblockThread");
1268 tso->link = END_TSO_QUEUE;
1269 tso->why_blocked = NotBlocked;
1270 tso->block_info.closure = NULL;
1271 PUSH_ON_RUN_QUEUE(tso);
1272 RELEASE_LOCK(&sched_mutex);
1275 /* -----------------------------------------------------------------------------
1278 * The following function implements the magic for raising an
1279 * asynchronous exception in an existing thread.
1281 * We first remove the thread from any queue on which it might be
1282 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1284 * We strip the stack down to the innermost CATCH_FRAME, building
1285 * thunks in the heap for all the active computations, so they can
1286 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1287 * an application of the handler to the exception, and push it on
1288 * the top of the stack.
1290 * How exactly do we save all the active computations? We create an
1291 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1292 * AP_UPDs pushes everything from the corresponding update frame
1293 * upwards onto the stack. (Actually, it pushes everything up to the
1294 * next update frame plus a pointer to the next AP_UPD object.
1295 * Entering the next AP_UPD object pushes more onto the stack until we
1296 * reach the last AP_UPD object - at which point the stack should look
1297 * exactly as it did when we killed the TSO and we can continue
1298 * execution by entering the closure on top of the stack.
1300 * We can also kill a thread entirely - this happens if either (a) the
1301 * exception passed to raiseAsync is NULL, or (b) there's no
1302 * CATCH_FRAME on the stack. In either case, we strip the entire
1303 * stack and replace the thread with a zombie.
1305 * -------------------------------------------------------------------------- */
1308 deleteThread(StgTSO *tso)
1310 raiseAsync(tso,NULL);
1314 raiseAsync(StgTSO *tso, StgClosure *exception)
1316 StgUpdateFrame* su = tso->su;
1317 StgPtr sp = tso->sp;
1319 /* Thread already dead? */
1320 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1324 IF_DEBUG(scheduler, belch("schedule: Raising exception in thread %ld.", tso->id));
1326 /* Remove it from any blocking queues */
1329 /* The stack freezing code assumes there's a closure pointer on
1330 * the top of the stack. This isn't always the case with compiled
1331 * code, so we have to push a dummy closure on the top which just
1332 * returns to the next return address on the stack.
1334 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1335 *(--sp) = (W_)&dummy_ret_closure;
1339 int words = ((P_)su - (P_)sp) - 1;
1343 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1344 * then build PAP(handler,exception), and leave it on top of
1345 * the stack ready to enter.
1347 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1348 StgCatchFrame *cf = (StgCatchFrame *)su;
1349 /* we've got an exception to raise, so let's pass it to the
1350 * handler in this frame.
1352 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1353 TICK_ALLOC_UPD_PAP(2,0);
1354 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1357 ap->fun = cf->handler;
1358 ap->payload[0] = (P_)exception;
1360 /* sp currently points to the word above the CATCH_FRAME on the
1361 * stack. Replace the CATCH_FRAME with a pointer to the new handler
1364 sp += sizeofW(StgCatchFrame);
1368 tso->whatNext = ThreadEnterGHC;
1372 /* First build an AP_UPD consisting of the stack chunk above the
1373 * current update frame, with the top word on the stack as the
1376 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1381 ap->fun = (StgClosure *)sp[0];
1383 for(i=0; i < (nat)words; ++i) {
1384 ap->payload[i] = (P_)*sp++;
1387 switch (get_itbl(su)->type) {
1391 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1392 TICK_ALLOC_UP_THK(words+1,0);
1395 fprintf(stderr, "schedule: Updating ");
1396 printPtr((P_)su->updatee);
1397 fprintf(stderr, " with ");
1398 printObj((StgClosure *)ap);
1401 /* Replace the updatee with an indirection - happily
1402 * this will also wake up any threads currently
1403 * waiting on the result.
1405 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
1407 sp += sizeofW(StgUpdateFrame) -1;
1408 sp[0] = (W_)ap; /* push onto stack */
1414 StgCatchFrame *cf = (StgCatchFrame *)su;
1417 /* We want a PAP, not an AP_UPD. Fortunately, the
1418 * layout's the same.
1420 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1421 TICK_ALLOC_UPD_PAP(words+1,0);
1423 /* now build o = FUN(catch,ap,handler) */
1424 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1425 TICK_ALLOC_FUN(2,0);
1426 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1427 o->payload[0] = (StgClosure *)ap;
1428 o->payload[1] = cf->handler;
1431 fprintf(stderr, "schedule: Built ");
1432 printObj((StgClosure *)o);
1435 /* pop the old handler and put o on the stack */
1437 sp += sizeofW(StgCatchFrame) - 1;
1444 StgSeqFrame *sf = (StgSeqFrame *)su;
1447 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1448 TICK_ALLOC_UPD_PAP(words+1,0);
1450 /* now build o = FUN(seq,ap) */
1451 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1452 TICK_ALLOC_SE_THK(1,0);
1453 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1454 payloadCPtr(o,0) = (StgClosure *)ap;
1457 fprintf(stderr, "schedule: Built ");
1458 printObj((StgClosure *)o);
1461 /* pop the old handler and put o on the stack */
1463 sp += sizeofW(StgSeqFrame) - 1;
1469 /* We've stripped the entire stack, the thread is now dead. */
1470 sp += sizeofW(StgStopFrame) - 1;
1471 sp[0] = (W_)exception; /* save the exception */
1472 tso->whatNext = ThreadKilled;
1473 tso->su = (StgUpdateFrame *)(sp+1);