1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.38 1999/12/01 16:13:25 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
43 #include "Exception.h"
47 #include "Profiling.h"
53 * These are the threads which clients have requested that we run.
55 * In an SMP build, we might have several concurrent clients all
56 * waiting for results, and each one will wait on a condition variable
57 * until the result is available.
59 * In non-SMP, clients are strictly nested: the first client calls
60 * into the RTS, which might call out again to C with a _ccall_GC, and
61 * eventually re-enter the RTS.
63 * Main threads information is kept in a linked list:
65 typedef struct StgMainThread_ {
70 pthread_cond_t wakeup;
72 struct StgMainThread_ *link;
76 * Locks required: sched_mutex.
78 static StgMainThread *main_threads;
81 * Locks required: sched_mutex.
83 StgTSO *run_queue_hd, *run_queue_tl;
84 StgTSO *blocked_queue_hd, *blocked_queue_tl;
86 /* Threads suspended in _ccall_GC.
87 * Locks required: sched_mutex.
89 static StgTSO *suspended_ccalling_threads;
91 static void GetRoots(void);
92 static StgTSO *threadStackOverflow(StgTSO *tso);
94 /* KH: The following two flags are shared memory locations. There is no need
95 to lock them, since they are only unset at the end of a scheduler
99 /* flag set by signal handler to precipitate a context switch */
101 /* if this flag is set as well, give up execution */
102 static nat interrupted;
104 /* Next thread ID to allocate.
105 * Locks required: sched_mutex
107 StgThreadID next_thread_id = 1;
110 * Pointers to the state of the current thread.
111 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
112 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
115 /* The smallest stack size that makes any sense is:
116 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
117 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
118 * + 1 (the realworld token for an IO thread)
119 * + 1 (the closure to enter)
121 * A thread with this stack will bomb immediately with a stack
122 * overflow, which will increase its stack size.
125 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
127 /* Free capability list.
128 * Locks required: sched_mutex.
131 Capability *free_capabilities; /* Available capabilities for running threads */
132 nat n_free_capabilities; /* total number of available capabilities */
134 Capability MainRegTable; /* for non-SMP, we have one global capability */
139 /* All our current task ids, saved in case we need to kill them later.
145 void addToBlockedQueue ( StgTSO *tso );
147 static void schedule ( void );
148 static void initThread ( StgTSO *tso, nat stack_size );
149 void interruptStgRts ( void );
152 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
153 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
154 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
155 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
160 /* -----------------------------------------------------------------------------
161 Main scheduling loop.
163 We use round-robin scheduling, each thread returning to the
164 scheduler loop when one of these conditions is detected:
167 * timer expires (thread yields)
172 Locking notes: we acquire the scheduler lock once at the beginning
173 of the scheduler loop, and release it when
175 * running a thread, or
176 * waiting for work, or
177 * waiting for a GC to complete.
179 -------------------------------------------------------------------------- */
186 StgThreadReturnCode ret;
188 ACQUIRE_LOCK(&sched_mutex);
192 /* If we're interrupted (the user pressed ^C, or some other
193 * termination condition occurred), kill all the currently running
197 IF_DEBUG(scheduler,belch("schedule: interrupted"));
198 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
201 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
204 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
205 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
208 /* Go through the list of main threads and wake up any
209 * clients whose computations have finished. ToDo: this
210 * should be done more efficiently without a linear scan
211 * of the main threads list, somehow...
215 StgMainThread *m, **prev;
216 prev = &main_threads;
217 for (m = main_threads; m != NULL; m = m->link) {
218 if (m->tso->whatNext == ThreadComplete) {
220 *(m->ret) = (StgClosure *)m->tso->sp[0];
224 pthread_cond_broadcast(&m->wakeup);
226 if (m->tso->whatNext == ThreadKilled) {
229 pthread_cond_broadcast(&m->wakeup);
234 /* If our main thread has finished or been killed, return.
237 StgMainThread *m = main_threads;
238 if (m->tso->whatNext == ThreadComplete
239 || m->tso->whatNext == ThreadKilled) {
240 main_threads = main_threads->link;
241 if (m->tso->whatNext == ThreadComplete) {
242 /* we finished successfully, fill in the return value */
243 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
254 /* Check whether any waiting threads need to be woken up. If the
255 * run queue is empty, and there are no other tasks running, we
256 * can wait indefinitely for something to happen.
257 * ToDo: what if another client comes along & requests another
260 if (blocked_queue_hd != END_TSO_QUEUE) {
262 (run_queue_hd == END_TSO_QUEUE)
264 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
269 /* check for signals each time around the scheduler */
271 if (signals_pending()) {
272 start_signal_handlers();
276 /* Detect deadlock: when we have no threads to run, there are
277 * no threads waiting on I/O or sleeping, and all the other
278 * tasks are waiting for work, we must have a deadlock. Inform
279 * all the main threads.
282 if (blocked_queue_hd == END_TSO_QUEUE
283 && run_queue_hd == END_TSO_QUEUE
284 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
287 for (m = main_threads; m != NULL; m = m->link) {
290 pthread_cond_broadcast(&m->wakeup);
295 if (blocked_queue_hd == END_TSO_QUEUE
296 && run_queue_hd == END_TSO_QUEUE) {
297 StgMainThread *m = main_threads;
300 main_threads = m->link;
306 /* If there's a GC pending, don't do anything until it has
310 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): waiting for GC\n",
312 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
315 /* block until we've got a thread on the run queue and a free
318 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
320 fprintf(stderr, "schedule (task %ld): waiting for work\n",
322 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
324 fprintf(stderr, "schedule (task %ld): work now available\n",
329 /* grab a thread from the run queue
336 cap = free_capabilities;
337 free_capabilities = cap->link;
338 n_free_capabilities--;
343 cap->rCurrentTSO = t;
345 /* set the context_switch flag
347 if (run_queue_hd == END_TSO_QUEUE)
352 RELEASE_LOCK(&sched_mutex);
355 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): running thread %d\n", pthread_self(),t->id));
357 IF_DEBUG(scheduler,fprintf(stderr,"schedule: running thread %d\n",t->id));
360 /* Run the current thread
362 switch (cap->rCurrentTSO->whatNext) {
365 /* Thread already finished, return to scheduler. */
366 ret = ThreadFinished;
369 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
372 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
374 case ThreadEnterHugs:
378 IF_DEBUG(scheduler,belch("schedule: entering Hugs"));
379 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
380 cap->rCurrentTSO->sp += 1;
385 barf("Panic: entered a BCO but no bytecode interpreter in this build");
388 barf("schedule: invalid whatNext field");
391 /* Costs for the scheduler are assigned to CCS_SYSTEM */
396 ACQUIRE_LOCK(&sched_mutex);
399 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
401 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
403 t = cap->rCurrentTSO;
407 /* make all the running tasks block on a condition variable,
408 * maybe set context_switch and wait till they all pile in,
409 * then have them wait on a GC condition variable.
411 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
414 ready_to_gc = rtsTrue;
415 context_switch = 1; /* stop other threads ASAP */
416 PUSH_ON_RUN_QUEUE(t);
420 /* just adjust the stack for this thread, then pop it back
423 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
427 /* enlarge the stack */
428 StgTSO *new_t = threadStackOverflow(t);
430 /* This TSO has moved, so update any pointers to it from the
431 * main thread stack. It better not be on any other queues...
434 for (m = main_threads; m != NULL; m = m->link) {
439 PUSH_ON_RUN_QUEUE(new_t);
444 /* put the thread back on the run queue. Then, if we're ready to
445 * GC, check whether this is the last task to stop. If so, wake
446 * up the GC thread. getThread will block during a GC until the
450 if (t->whatNext == ThreadEnterHugs) {
451 /* ToDo: or maybe a timer expired when we were in Hugs?
452 * or maybe someone hit ctrl-C
454 belch("thread %ld stopped to switch to Hugs", t->id);
456 belch("thread %ld stopped, yielding", t->id);
460 APPEND_TO_RUN_QUEUE(t);
464 /* don't need to do anything. Either the thread is blocked on
465 * I/O, in which case we'll have called addToBlockedQueue
466 * previously, or it's blocked on an MVar or Blackhole, in which
467 * case it'll be on the relevant queue already.
470 fprintf(stderr, "thread %d stopped, ", t->id);
471 printThreadBlockage(t);
472 fprintf(stderr, "\n"));
477 /* Need to check whether this was a main thread, and if so, signal
478 * the task that started it with the return value. If we have no
479 * more main threads, we probably need to stop all the tasks until
482 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
483 t->whatNext = ThreadComplete;
487 barf("doneThread: invalid thread return code");
491 cap->link = free_capabilities;
492 free_capabilities = cap;
493 n_free_capabilities++;
497 if (ready_to_gc && n_free_capabilities == RtsFlags.ConcFlags.nNodes) {
501 /* everybody back, start the GC.
502 * Could do it in this thread, or signal a condition var
503 * to do it in another thread. Either way, we need to
504 * broadcast on gc_pending_cond afterward.
507 IF_DEBUG(scheduler,belch("schedule (task %ld): doing GC", pthread_self()));
509 GarbageCollect(GetRoots);
510 ready_to_gc = rtsFalse;
512 pthread_cond_broadcast(&gc_pending_cond);
515 } /* end of while(1) */
519 /* A hack for Hugs concurrency support. Needs sanitisation (?) */
520 void deleteAllThreads ( void )
523 IF_DEBUG(scheduler,belch("deleteAllThreads()"));
524 for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
527 for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) {
530 run_queue_hd = run_queue_tl = END_TSO_QUEUE;
531 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
535 /* -----------------------------------------------------------------------------
536 * Suspending & resuming Haskell threads.
538 * When making a "safe" call to C (aka _ccall_GC), the task gives back
539 * its capability before calling the C function. This allows another
540 * task to pick up the capability and carry on running Haskell
541 * threads. It also means that if the C call blocks, it won't lock
544 * The Haskell thread making the C call is put to sleep for the
545 * duration of the call, on the susepended_ccalling_threads queue. We
546 * give out a token to the task, which it can use to resume the thread
547 * on return from the C function.
548 * -------------------------------------------------------------------------- */
551 suspendThread( Capability *cap )
555 ACQUIRE_LOCK(&sched_mutex);
559 fprintf(stderr, "schedule (task %ld): thread %d did a _ccall_gc\n",
560 pthread_self(), cap->rCurrentTSO->id));
563 fprintf(stderr, "schedule: thread %d did a _ccall_gc\n",
564 cap->rCurrentTSO->id));
567 threadPaused(cap->rCurrentTSO);
568 cap->rCurrentTSO->link = suspended_ccalling_threads;
569 suspended_ccalling_threads = cap->rCurrentTSO;
571 /* Use the thread ID as the token; it should be unique */
572 tok = cap->rCurrentTSO->id;
575 cap->link = free_capabilities;
576 free_capabilities = cap;
577 n_free_capabilities++;
580 RELEASE_LOCK(&sched_mutex);
585 resumeThread( StgInt tok )
590 ACQUIRE_LOCK(&sched_mutex);
592 prev = &suspended_ccalling_threads;
593 for (tso = suspended_ccalling_threads;
594 tso != END_TSO_QUEUE;
595 prev = &tso->link, tso = tso->link) {
596 if (tso->id == (StgThreadID)tok) {
601 if (tso == END_TSO_QUEUE) {
602 barf("resumeThread: thread not found");
606 while (free_capabilities == NULL) {
608 fprintf(stderr,"schedule (task %ld): waiting to resume\n",
610 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
611 IF_DEBUG(scheduler,fprintf(stderr,
612 "schedule (task %ld): resuming thread %d\n",
613 pthread_self(), tso->id));
615 cap = free_capabilities;
616 free_capabilities = cap->link;
617 n_free_capabilities--;
622 cap->rCurrentTSO = tso;
624 RELEASE_LOCK(&sched_mutex);
628 /* -----------------------------------------------------------------------------
630 * -------------------------------------------------------------------------- */
631 static void unblockThread(StgTSO *tso);
633 /* -----------------------------------------------------------------------------
634 * Comparing Thread ids.
636 * This is used from STG land in the implementation of the
637 * instances of Eq/Ord for ThreadIds.
638 * -------------------------------------------------------------------------- */
640 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
642 StgThreadID id1 = tso1->id;
643 StgThreadID id2 = tso2->id;
645 if (id1 < id2) return (-1);
646 if (id1 > id2) return 1;
650 /* -----------------------------------------------------------------------------
653 The new thread starts with the given stack size. Before the
654 scheduler can run, however, this thread needs to have a closure
655 (and possibly some arguments) pushed on its stack. See
656 pushClosure() in Schedule.h.
658 createGenThread() and createIOThread() (in SchedAPI.h) are
659 convenient packaged versions of this function.
660 -------------------------------------------------------------------------- */
663 createThread(nat stack_size)
667 /* catch ridiculously small stack sizes */
668 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
669 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
672 tso = (StgTSO *)allocate(stack_size);
673 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
675 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
680 initThread(StgTSO *tso, nat stack_size)
682 SET_HDR(tso, &TSO_info, CCS_MAIN);
683 tso->whatNext = ThreadEnterGHC;
685 /* tso->id needs to be unique. For now we use a heavyweight mutex to
686 protect the increment operation on next_thread_id.
687 In future, we could use an atomic increment instead.
690 ACQUIRE_LOCK(&sched_mutex);
691 tso->id = next_thread_id++;
692 RELEASE_LOCK(&sched_mutex);
694 tso->why_blocked = NotBlocked;
695 tso->blocked_exceptions = NULL;
697 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
698 tso->stack_size = stack_size;
699 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
701 tso->sp = (P_)&(tso->stack) + stack_size;
704 tso->prof.CCCS = CCS_MAIN;
707 /* put a stop frame on the stack */
708 tso->sp -= sizeofW(StgStopFrame);
709 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
710 tso->su = (StgUpdateFrame*)tso->sp;
712 IF_DEBUG(scheduler,belch("schedule: Initialised thread %ld, stack size = %lx words",
713 tso->id, tso->stack_size));
718 /* -----------------------------------------------------------------------------
721 * scheduleThread puts a thread on the head of the runnable queue.
722 * This will usually be done immediately after a thread is created.
723 * The caller of scheduleThread must create the thread using e.g.
724 * createThread and push an appropriate closure
725 * on this thread's stack before the scheduler is invoked.
726 * -------------------------------------------------------------------------- */
729 scheduleThread(StgTSO *tso)
731 ACQUIRE_LOCK(&sched_mutex);
733 /* Put the new thread on the head of the runnable queue. The caller
734 * better push an appropriate closure on this thread's stack
735 * beforehand. In the SMP case, the thread may start running as
736 * soon as we release the scheduler lock below.
738 PUSH_ON_RUN_QUEUE(tso);
741 IF_DEBUG(scheduler,printTSO(tso));
742 RELEASE_LOCK(&sched_mutex);
746 /* -----------------------------------------------------------------------------
749 * Start up Posix threads to run each of the scheduler tasks.
750 * I believe the task ids are not needed in the system as defined.
752 * -------------------------------------------------------------------------- */
756 taskStart( void *arg STG_UNUSED )
763 /* -----------------------------------------------------------------------------
766 * Initialise the scheduler. This resets all the queues - if the
767 * queues contained any threads, they'll be garbage collected at the
770 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
771 * -------------------------------------------------------------------------- */
775 term_handler(int sig STG_UNUSED)
778 ACQUIRE_LOCK(&term_mutex);
780 RELEASE_LOCK(&term_mutex);
785 void initScheduler(void)
787 run_queue_hd = END_TSO_QUEUE;
788 run_queue_tl = END_TSO_QUEUE;
789 blocked_queue_hd = END_TSO_QUEUE;
790 blocked_queue_tl = END_TSO_QUEUE;
792 suspended_ccalling_threads = END_TSO_QUEUE;
799 enteredCAFs = END_CAF_LIST;
801 /* Install the SIGHUP handler */
804 struct sigaction action,oact;
806 action.sa_handler = term_handler;
807 sigemptyset(&action.sa_mask);
809 if (sigaction(SIGTERM, &action, &oact) != 0) {
810 barf("can't install TERM handler");
816 /* Allocate N Capabilities */
819 Capability *cap, *prev;
822 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
823 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
827 free_capabilities = cap;
828 n_free_capabilities = RtsFlags.ConcFlags.nNodes;
830 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
831 n_free_capabilities););
843 /* make some space for saving all the thread ids */
844 task_ids = stgMallocBytes(RtsFlags.ConcFlags.nNodes * sizeof(task_info),
845 "initScheduler:task_ids");
847 /* and create all the threads */
848 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
849 r = pthread_create(&tid,NULL,taskStart,NULL);
851 barf("startTasks: Can't create new Posix thread");
853 task_ids[i].id = tid;
854 task_ids[i].mut_time = 0.0;
855 task_ids[i].mut_etime = 0.0;
856 task_ids[i].gc_time = 0.0;
857 task_ids[i].gc_etime = 0.0;
858 task_ids[i].elapsedtimestart = elapsedtime();
859 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
865 exitScheduler( void )
870 /* Don't want to use pthread_cancel, since we'd have to install
871 * these silly exception handlers (pthread_cleanup_{push,pop}) around
875 /* Cancel all our tasks */
876 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
877 pthread_cancel(task_ids[i].id);
880 /* Wait for all the tasks to terminate */
881 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
882 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
884 pthread_join(task_ids[i].id, NULL);
888 /* Send 'em all a SIGHUP. That should shut 'em up.
890 await_death = RtsFlags.ConcFlags.nNodes;
891 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
892 pthread_kill(task_ids[i].id,SIGTERM);
894 while (await_death > 0) {
900 /* -----------------------------------------------------------------------------
901 Managing the per-task allocation areas.
903 Each capability comes with an allocation area. These are
904 fixed-length block lists into which allocation can be done.
906 ToDo: no support for two-space collection at the moment???
907 -------------------------------------------------------------------------- */
909 /* -----------------------------------------------------------------------------
910 * waitThread is the external interface for running a new computataion
911 * and waiting for the result.
913 * In the non-SMP case, we create a new main thread, push it on the
914 * main-thread stack, and invoke the scheduler to run it. The
915 * scheduler will return when the top main thread on the stack has
916 * completed or died, and fill in the necessary fields of the
917 * main_thread structure.
919 * In the SMP case, we create a main thread as before, but we then
920 * create a new condition variable and sleep on it. When our new
921 * main thread has completed, we'll be woken up and the status/result
922 * will be in the main_thread struct.
923 * -------------------------------------------------------------------------- */
926 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
929 SchedulerStatus stat;
931 ACQUIRE_LOCK(&sched_mutex);
933 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
939 pthread_cond_init(&m->wakeup, NULL);
942 m->link = main_threads;
945 IF_DEBUG(scheduler, fprintf(stderr, "schedule: new main thread (%d)\n",
950 pthread_cond_wait(&m->wakeup, &sched_mutex);
951 } while (m->stat == NoStatus);
954 ASSERT(m->stat != NoStatus);
960 pthread_cond_destroy(&m->wakeup);
964 RELEASE_LOCK(&sched_mutex);
968 /* -----------------------------------------------------------------------------
969 Debugging: why is a thread blocked
970 -------------------------------------------------------------------------- */
973 void printThreadBlockage(StgTSO *tso)
975 switch (tso->why_blocked) {
977 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
980 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
983 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
986 fprintf(stderr,"blocked on an MVar");
988 case BlockedOnException:
989 fprintf(stderr,"blocked on delivering an exception to thread %d",
990 tso->block_info.tso->id);
992 case BlockedOnBlackHole:
993 fprintf(stderr,"blocked on a black hole");
996 fprintf(stderr,"not blocked");
1002 /* -----------------------------------------------------------------------------
1003 Where are the roots that we know about?
1005 - all the threads on the runnable queue
1006 - all the threads on the blocked queue
1007 - all the thread currently executing a _ccall_GC
1008 - all the "main threads"
1010 -------------------------------------------------------------------------- */
1012 /* This has to be protected either by the scheduler monitor, or by the
1013 garbage collection monitor (probably the latter).
1017 static void GetRoots(void)
1021 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
1022 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
1024 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
1025 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
1027 for (m = main_threads; m != NULL; m = m->link) {
1028 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
1030 suspended_ccalling_threads =
1031 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
1034 /* -----------------------------------------------------------------------------
1037 This is the interface to the garbage collector from Haskell land.
1038 We provide this so that external C code can allocate and garbage
1039 collect when called from Haskell via _ccall_GC.
1041 It might be useful to provide an interface whereby the programmer
1042 can specify more roots (ToDo).
1044 This needs to be protected by the GC condition variable above. KH.
1045 -------------------------------------------------------------------------- */
1047 void (*extra_roots)(void);
1052 GarbageCollect(GetRoots);
1058 GetRoots(); /* the scheduler's roots */
1059 extra_roots(); /* the user's roots */
1063 performGCWithRoots(void (*get_roots)(void))
1065 extra_roots = get_roots;
1067 GarbageCollect(AllRoots);
1070 /* -----------------------------------------------------------------------------
1073 If the thread has reached its maximum stack size,
1074 then bomb out. Otherwise relocate the TSO into a larger chunk of
1075 memory and adjust its stack size appropriately.
1076 -------------------------------------------------------------------------- */
1079 threadStackOverflow(StgTSO *tso)
1081 nat new_stack_size, new_tso_size, diff, stack_words;
1085 if (tso->stack_size >= tso->max_stack_size) {
1087 /* If we're debugging, just print out the top of the stack */
1088 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1092 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1095 /* Send this thread the StackOverflow exception */
1096 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1101 /* Try to double the current stack size. If that takes us over the
1102 * maximum stack size for this thread, then use the maximum instead.
1103 * Finally round up so the TSO ends up as a whole number of blocks.
1105 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1106 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1107 TSO_STRUCT_SIZE)/sizeof(W_);
1108 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1109 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1111 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1113 dest = (StgTSO *)allocate(new_tso_size);
1114 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1116 /* copy the TSO block and the old stack into the new area */
1117 memcpy(dest,tso,TSO_STRUCT_SIZE);
1118 stack_words = tso->stack + tso->stack_size - tso->sp;
1119 new_sp = (P_)dest + new_tso_size - stack_words;
1120 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1122 /* relocate the stack pointers... */
1123 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1124 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1126 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1127 dest->stack_size = new_stack_size;
1129 /* and relocate the update frame list */
1130 relocate_TSO(tso, dest);
1132 /* Mark the old one as dead so we don't try to scavenge it during
1133 * garbage collection (the TSO will likely be on a mutables list in
1134 * some generation, but it'll get collected soon enough). It's
1135 * important to set the sp and su values to just beyond the end of
1136 * the stack, so we don't attempt to scavenge any part of the dead
1139 tso->whatNext = ThreadKilled;
1140 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1141 tso->su = (StgUpdateFrame *)tso->sp;
1142 tso->why_blocked = NotBlocked;
1143 dest->mut_link = NULL;
1145 IF_DEBUG(sanity,checkTSO(tso));
1147 IF_DEBUG(scheduler,printTSO(dest));
1151 /* This will no longer work: KH */
1152 if (tso == MainTSO) { /* hack */
1159 /* -----------------------------------------------------------------------------
1160 Wake up a queue that was blocked on some resource.
1161 -------------------------------------------------------------------------- */
1164 unblockOneLocked(StgTSO *tso)
1168 ASSERT(get_itbl(tso)->type == TSO);
1169 ASSERT(tso->why_blocked != NotBlocked);
1170 tso->why_blocked = NotBlocked;
1172 PUSH_ON_RUN_QUEUE(tso);
1175 IF_DEBUG(scheduler,belch("schedule (task %ld): waking up thread %ld",
1176 pthread_self(), tso->id));
1178 IF_DEBUG(scheduler,belch("schedule: waking up thread %ld", tso->id));
1184 unblockOne(StgTSO *tso)
1186 ACQUIRE_LOCK(&sched_mutex);
1187 tso = unblockOneLocked(tso);
1188 RELEASE_LOCK(&sched_mutex);
1193 awakenBlockedQueue(StgTSO *tso)
1195 ACQUIRE_LOCK(&sched_mutex);
1196 while (tso != END_TSO_QUEUE) {
1197 tso = unblockOneLocked(tso);
1199 RELEASE_LOCK(&sched_mutex);
1202 /* -----------------------------------------------------------------------------
1204 - usually called inside a signal handler so it mustn't do anything fancy.
1205 -------------------------------------------------------------------------- */
1208 interruptStgRts(void)
1214 /* -----------------------------------------------------------------------------
1217 This is for use when we raise an exception in another thread, which
1219 -------------------------------------------------------------------------- */
1222 unblockThread(StgTSO *tso)
1226 ACQUIRE_LOCK(&sched_mutex);
1227 switch (tso->why_blocked) {
1230 return; /* not blocked */
1233 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1235 StgTSO *last_tso = END_TSO_QUEUE;
1236 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1239 for (t = mvar->head; t != END_TSO_QUEUE;
1240 last = &t->link, last_tso = t, t = t->link) {
1243 if (mvar->tail == tso) {
1244 mvar->tail = last_tso;
1249 barf("unblockThread (MVAR): TSO not found");
1252 case BlockedOnBlackHole:
1253 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1255 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1257 last = &bq->blocking_queue;
1258 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1259 last = &t->link, t = t->link) {
1265 barf("unblockThread (BLACKHOLE): TSO not found");
1268 case BlockedOnException:
1270 StgTSO *target = tso->block_info.tso;
1272 ASSERT(get_itbl(target)->type == TSO);
1273 ASSERT(target->blocked_exceptions != NULL);
1275 last = &target->blocked_exceptions;
1276 for (t = target->blocked_exceptions; t != END_TSO_QUEUE;
1277 last = &t->link, t = t->link) {
1278 ASSERT(get_itbl(t)->type == TSO);
1284 barf("unblockThread (Exception): TSO not found");
1287 case BlockedOnDelay:
1289 case BlockedOnWrite:
1291 StgTSO *prev = NULL;
1292 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1293 prev = t, t = t->link) {
1296 blocked_queue_hd = t->link;
1297 if (blocked_queue_tl == t) {
1298 blocked_queue_tl = END_TSO_QUEUE;
1301 prev->link = t->link;
1302 if (blocked_queue_tl == t) {
1303 blocked_queue_tl = prev;
1309 barf("unblockThread (I/O): TSO not found");
1313 barf("unblockThread");
1317 tso->link = END_TSO_QUEUE;
1318 tso->why_blocked = NotBlocked;
1319 tso->block_info.closure = NULL;
1320 PUSH_ON_RUN_QUEUE(tso);
1321 RELEASE_LOCK(&sched_mutex);
1324 /* -----------------------------------------------------------------------------
1327 * The following function implements the magic for raising an
1328 * asynchronous exception in an existing thread.
1330 * We first remove the thread from any queue on which it might be
1331 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1333 * We strip the stack down to the innermost CATCH_FRAME, building
1334 * thunks in the heap for all the active computations, so they can
1335 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1336 * an application of the handler to the exception, and push it on
1337 * the top of the stack.
1339 * How exactly do we save all the active computations? We create an
1340 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1341 * AP_UPDs pushes everything from the corresponding update frame
1342 * upwards onto the stack. (Actually, it pushes everything up to the
1343 * next update frame plus a pointer to the next AP_UPD object.
1344 * Entering the next AP_UPD object pushes more onto the stack until we
1345 * reach the last AP_UPD object - at which point the stack should look
1346 * exactly as it did when we killed the TSO and we can continue
1347 * execution by entering the closure on top of the stack.
1349 * We can also kill a thread entirely - this happens if either (a) the
1350 * exception passed to raiseAsync is NULL, or (b) there's no
1351 * CATCH_FRAME on the stack. In either case, we strip the entire
1352 * stack and replace the thread with a zombie.
1354 * -------------------------------------------------------------------------- */
1357 deleteThread(StgTSO *tso)
1359 raiseAsync(tso,NULL);
1363 raiseAsync(StgTSO *tso, StgClosure *exception)
1365 StgUpdateFrame* su = tso->su;
1366 StgPtr sp = tso->sp;
1368 /* Thread already dead? */
1369 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1373 IF_DEBUG(scheduler, belch("schedule: Raising exception in thread %ld.", tso->id));
1375 /* Remove it from any blocking queues */
1378 /* The stack freezing code assumes there's a closure pointer on
1379 * the top of the stack. This isn't always the case with compiled
1380 * code, so we have to push a dummy closure on the top which just
1381 * returns to the next return address on the stack.
1383 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1384 *(--sp) = (W_)&dummy_ret_closure;
1388 int words = ((P_)su - (P_)sp) - 1;
1392 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1393 * then build PAP(handler,exception), and leave it on top of
1394 * the stack ready to enter.
1396 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1397 StgCatchFrame *cf = (StgCatchFrame *)su;
1398 /* we've got an exception to raise, so let's pass it to the
1399 * handler in this frame.
1401 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1402 TICK_ALLOC_UPD_PAP(2,0);
1403 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1406 ap->fun = cf->handler;
1407 ap->payload[0] = (P_)exception;
1409 /* sp currently points to the word above the CATCH_FRAME on the stack.
1411 sp += sizeofW(StgCatchFrame);
1414 /* Restore the blocked/unblocked state for asynchronous exceptions
1415 * at the CATCH_FRAME.
1417 * If exceptions were unblocked at the catch, arrange that they
1418 * are unblocked again after executing the handler by pushing an
1419 * unblockAsyncExceptions_ret stack frame.
1421 if (!cf->exceptions_blocked) {
1422 *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info;
1425 /* Ensure that async exceptions are blocked when running the handler.
1427 if (tso->blocked_exceptions == NULL) {
1428 tso->blocked_exceptions = END_TSO_QUEUE;
1431 /* Put the newly-built PAP on top of the stack, ready to execute
1432 * when the thread restarts.
1436 tso->whatNext = ThreadEnterGHC;
1440 /* First build an AP_UPD consisting of the stack chunk above the
1441 * current update frame, with the top word on the stack as the
1444 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1449 ap->fun = (StgClosure *)sp[0];
1451 for(i=0; i < (nat)words; ++i) {
1452 ap->payload[i] = (P_)*sp++;
1455 switch (get_itbl(su)->type) {
1459 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1460 TICK_ALLOC_UP_THK(words+1,0);
1463 fprintf(stderr, "schedule: Updating ");
1464 printPtr((P_)su->updatee);
1465 fprintf(stderr, " with ");
1466 printObj((StgClosure *)ap);
1469 /* Replace the updatee with an indirection - happily
1470 * this will also wake up any threads currently
1471 * waiting on the result.
1473 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
1475 sp += sizeofW(StgUpdateFrame) -1;
1476 sp[0] = (W_)ap; /* push onto stack */
1482 StgCatchFrame *cf = (StgCatchFrame *)su;
1485 /* We want a PAP, not an AP_UPD. Fortunately, the
1486 * layout's the same.
1488 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1489 TICK_ALLOC_UPD_PAP(words+1,0);
1491 /* now build o = FUN(catch,ap,handler) */
1492 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1493 TICK_ALLOC_FUN(2,0);
1494 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1495 o->payload[0] = (StgClosure *)ap;
1496 o->payload[1] = cf->handler;
1499 fprintf(stderr, "schedule: Built ");
1500 printObj((StgClosure *)o);
1503 /* pop the old handler and put o on the stack */
1505 sp += sizeofW(StgCatchFrame) - 1;
1512 StgSeqFrame *sf = (StgSeqFrame *)su;
1515 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1516 TICK_ALLOC_UPD_PAP(words+1,0);
1518 /* now build o = FUN(seq,ap) */
1519 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1520 TICK_ALLOC_SE_THK(1,0);
1521 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1522 payloadCPtr(o,0) = (StgClosure *)ap;
1525 fprintf(stderr, "schedule: Built ");
1526 printObj((StgClosure *)o);
1529 /* pop the old handler and put o on the stack */
1531 sp += sizeofW(StgSeqFrame) - 1;
1537 /* We've stripped the entire stack, the thread is now dead. */
1538 sp += sizeofW(StgStopFrame) - 1;
1539 sp[0] = (W_)exception; /* save the exception */
1540 tso->whatNext = ThreadKilled;
1541 tso->su = (StgUpdateFrame *)(sp+1);