1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.31 1999/11/09 15:46:54 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
10 /* Version with scheduler monitor support for SMPs.
12 This design provides a high-level API to create and schedule threads etc.
13 as documented in the SMP design document.
15 It uses a monitor design controlled by a single mutex to exercise control
16 over accesses to shared data structures, and builds on the Posix threads
19 The majority of state is shared. In order to keep essential per-task state,
20 there is a Capability structure, which contains all the information
21 needed to run a thread: its STG registers, a pointer to its TSO, a
22 nursery etc. During STG execution, a pointer to the capability is
23 kept in a register (BaseReg).
25 In a non-SMP build, there is one global capability, namely MainRegTable.
36 #include "StgStartup.h"
40 #include "StgMiscClosures.h"
42 #include "Evaluator.h"
46 #include "Profiling.h"
52 * These are the threads which clients have requested that we run.
54 * In an SMP build, we might have several concurrent clients all
55 * waiting for results, and each one will wait on a condition variable
56 * until the result is available.
58 * In non-SMP, clients are strictly nested: the first client calls
59 * into the RTS, which might call out again to C with a _ccall_GC, and
60 * eventually re-enter the RTS.
62 * Main threads information is kept in a linked list:
64 typedef struct StgMainThread_ {
69 pthread_cond_t wakeup;
71 struct StgMainThread_ *link;
75 * Locks required: sched_mutex.
77 static StgMainThread *main_threads;
80 * Locks required: sched_mutex.
82 StgTSO *run_queue_hd, *run_queue_tl;
83 StgTSO *blocked_queue_hd, *blocked_queue_tl;
85 /* Threads suspended in _ccall_GC.
86 * Locks required: sched_mutex.
88 static StgTSO *suspended_ccalling_threads;
90 static void GetRoots(void);
91 static StgTSO *threadStackOverflow(StgTSO *tso);
93 /* KH: The following two flags are shared memory locations. There is no need
94 to lock them, since they are only unset at the end of a scheduler
98 /* flag set by signal handler to precipitate a context switch */
100 /* if this flag is set as well, give up execution */
101 static nat interrupted;
103 /* Next thread ID to allocate.
104 * Locks required: sched_mutex
106 StgThreadID next_thread_id = 1;
109 * Pointers to the state of the current thread.
110 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
111 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
114 /* The smallest stack size that makes any sense is:
115 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
116 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
117 * + 1 (the realworld token for an IO thread)
118 * + 1 (the closure to enter)
120 * A thread with this stack will bomb immediately with a stack
121 * overflow, which will increase its stack size.
124 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
126 /* Free capability list.
127 * Locks required: sched_mutex.
130 Capability *free_capabilities; /* Available capabilities for running threads */
131 nat n_free_capabilities; /* total number of available capabilities */
133 Capability MainRegTable; /* for non-SMP, we have one global capability */
138 /* All our current task ids, saved in case we need to kill them later.
144 void addToBlockedQueue ( StgTSO *tso );
146 static void schedule ( void );
147 static void initThread ( StgTSO *tso, nat stack_size );
148 void interruptStgRts ( void );
151 pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER;
152 pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER;
153 pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER;
154 pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER;
159 /* -----------------------------------------------------------------------------
160 Main scheduling loop.
162 We use round-robin scheduling, each thread returning to the
163 scheduler loop when one of these conditions is detected:
166 * timer expires (thread yields)
171 Locking notes: we acquire the scheduler lock once at the beginning
172 of the scheduler loop, and release it when
174 * running a thread, or
175 * waiting for work, or
176 * waiting for a GC to complete.
178 -------------------------------------------------------------------------- */
185 StgThreadReturnCode ret;
187 ACQUIRE_LOCK(&sched_mutex);
191 /* Check whether any waiting threads need to be woken up. If the
192 * run queue is empty, and there are no other tasks running, we
193 * can wait indefinitely for something to happen.
194 * ToDo: what if another client comes along & requests another
197 if (blocked_queue_hd != END_TSO_QUEUE) {
199 (run_queue_hd == END_TSO_QUEUE)
201 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
206 /* check for signals each time around the scheduler */
208 if (signals_pending()) {
209 start_signal_handlers();
213 /* Detect deadlock: when we have no threads to run, there are
214 * no threads waiting on I/O or sleeping, and all the other
215 * tasks are waiting for work, we must have a deadlock. Inform
216 * all the main threads.
219 if (blocked_queue_hd == END_TSO_QUEUE
220 && run_queue_hd == END_TSO_QUEUE
221 && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
224 for (m = main_threads; m != NULL; m = m->link) {
227 pthread_cond_broadcast(&m->wakeup);
232 if (blocked_queue_hd == END_TSO_QUEUE
233 && run_queue_hd == END_TSO_QUEUE) {
234 StgMainThread *m = main_threads;
237 main_threads = m->link;
243 /* If there's a GC pending, don't do anything until it has
247 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): waiting for GC\n",
249 pthread_cond_wait(&gc_pending_cond, &sched_mutex);
252 /* block until we've got a thread on the run queue and a free
255 while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) {
257 fprintf(stderr, "schedule (task %ld): waiting for work\n",
259 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
261 fprintf(stderr, "schedule (task %ld): work now available\n",
266 /* grab a thread from the run queue
273 cap = free_capabilities;
274 free_capabilities = cap->link;
275 n_free_capabilities--;
280 cap->rCurrentTSO = t;
282 /* set the context_switch flag
284 if (run_queue_hd == END_TSO_QUEUE)
289 RELEASE_LOCK(&sched_mutex);
292 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): running thread %d\n", pthread_self(),t->id));
294 IF_DEBUG(scheduler,fprintf(stderr,"schedule: running thread %d\n",t->id));
297 /* Run the current thread
299 switch (cap->rCurrentTSO->whatNext) {
302 /* Thread already finished, return to scheduler. */
303 ret = ThreadFinished;
306 ret = StgRun((StgFunPtr) stg_enterStackTop, cap);
309 ret = StgRun((StgFunPtr) stg_returnToStackTop, cap);
311 case ThreadEnterHugs:
315 IF_DEBUG(scheduler,belch("schedule: entering Hugs"));
316 c = (StgClosure *)(cap->rCurrentTSO->sp[0]);
317 cap->rCurrentTSO->sp += 1;
322 barf("Panic: entered a BCO but no bytecode interpreter in this build");
325 barf("schedule: invalid whatNext field");
328 /* Costs for the scheduler are assigned to CCS_SYSTEM */
333 ACQUIRE_LOCK(&sched_mutex);
336 IF_DEBUG(scheduler,fprintf(stderr,"schedule (task %ld): ", pthread_self()););
338 IF_DEBUG(scheduler,fprintf(stderr,"schedule: "););
340 t = cap->rCurrentTSO;
344 /* make all the running tasks block on a condition variable,
345 * maybe set context_switch and wait till they all pile in,
346 * then have them wait on a GC condition variable.
348 IF_DEBUG(scheduler,belch("thread %ld stopped: HeapOverflow", t->id));
351 ready_to_gc = rtsTrue;
352 context_switch = 1; /* stop other threads ASAP */
353 PUSH_ON_RUN_QUEUE(t);
357 /* just adjust the stack for this thread, then pop it back
360 IF_DEBUG(scheduler,belch("thread %ld stopped, StackOverflow", t->id));
364 /* enlarge the stack */
365 StgTSO *new_t = threadStackOverflow(t);
367 /* This TSO has moved, so update any pointers to it from the
368 * main thread stack. It better not be on any other queues...
371 for (m = main_threads; m != NULL; m = m->link) {
376 PUSH_ON_RUN_QUEUE(new_t);
381 /* put the thread back on the run queue. Then, if we're ready to
382 * GC, check whether this is the last task to stop. If so, wake
383 * up the GC thread. getThread will block during a GC until the
387 if (t->whatNext == ThreadEnterHugs) {
388 /* ToDo: or maybe a timer expired when we were in Hugs?
389 * or maybe someone hit ctrl-C
391 belch("thread %ld stopped to switch to Hugs", t->id);
393 belch("thread %ld stopped, yielding", t->id);
397 APPEND_TO_RUN_QUEUE(t);
401 /* don't need to do anything. Either the thread is blocked on
402 * I/O, in which case we'll have called addToBlockedQueue
403 * previously, or it's blocked on an MVar or Blackhole, in which
404 * case it'll be on the relevant queue already.
407 fprintf(stderr, "thread %d stopped, ", t->id);
408 printThreadBlockage(t);
409 fprintf(stderr, "\n"));
414 /* Need to check whether this was a main thread, and if so, signal
415 * the task that started it with the return value. If we have no
416 * more main threads, we probably need to stop all the tasks until
419 IF_DEBUG(scheduler,belch("thread %ld finished", t->id));
420 t->whatNext = ThreadComplete;
424 barf("doneThread: invalid thread return code");
428 cap->link = free_capabilities;
429 free_capabilities = cap;
430 n_free_capabilities++;
434 if (ready_to_gc && n_free_capabilities == RtsFlags.ConcFlags.nNodes) {
438 /* everybody back, start the GC.
439 * Could do it in this thread, or signal a condition var
440 * to do it in another thread. Either way, we need to
441 * broadcast on gc_pending_cond afterward.
444 IF_DEBUG(scheduler,belch("schedule (task %ld): doing GC", pthread_self()));
446 GarbageCollect(GetRoots);
447 ready_to_gc = rtsFalse;
449 pthread_cond_broadcast(&gc_pending_cond);
453 /* Go through the list of main threads and wake up any
454 * clients whose computations have finished. ToDo: this
455 * should be done more efficiently without a linear scan
456 * of the main threads list, somehow...
460 StgMainThread *m, **prev;
461 prev = &main_threads;
462 for (m = main_threads; m != NULL; m = m->link) {
463 if (m->tso->whatNext == ThreadComplete) {
465 *(m->ret) = (StgClosure *)m->tso->sp[0];
469 pthread_cond_broadcast(&m->wakeup);
471 if (m->tso->whatNext == ThreadKilled) {
474 pthread_cond_broadcast(&m->wakeup);
479 /* If our main thread has finished or been killed, return.
480 * If we were re-entered as a result of a _ccall_gc, then
481 * pop the blocked thread off the ccalling_threads stack back
485 StgMainThread *m = main_threads;
486 if (m->tso->whatNext == ThreadComplete
487 || m->tso->whatNext == ThreadKilled) {
488 main_threads = main_threads->link;
489 if (m->tso->whatNext == ThreadComplete) {
490 /* we finished successfully, fill in the return value */
491 if (m->ret) { *(m->ret) = (StgClosure *)m->tso->sp[0]; };
502 } /* end of while(1) */
505 /* -----------------------------------------------------------------------------
506 * Suspending & resuming Haskell threads.
508 * When making a "safe" call to C (aka _ccall_GC), the task gives back
509 * its capability before calling the C function. This allows another
510 * task to pick up the capability and carry on running Haskell
511 * threads. It also means that if the C call blocks, it won't lock
514 * The Haskell thread making the C call is put to sleep for the
515 * duration of the call, on the susepended_ccalling_threads queue. We
516 * give out a token to the task, which it can use to resume the thread
517 * on return from the C function.
518 * -------------------------------------------------------------------------- */
521 suspendThread( Capability *cap )
525 ACQUIRE_LOCK(&sched_mutex);
529 fprintf(stderr, "schedule (task %ld): thread %d did a _ccall_gc\n",
530 pthread_self(), cap->rCurrentTSO->id));
533 fprintf(stderr, "schedule: thread %d did a _ccall_gc\n",
534 cap->rCurrentTSO->id));
537 threadPaused(cap->rCurrentTSO);
538 cap->rCurrentTSO->link = suspended_ccalling_threads;
539 suspended_ccalling_threads = cap->rCurrentTSO;
541 /* Use the thread ID as the token; it should be unique */
542 tok = cap->rCurrentTSO->id;
545 cap->link = free_capabilities;
546 free_capabilities = cap;
547 n_free_capabilities++;
550 RELEASE_LOCK(&sched_mutex);
555 resumeThread( StgInt tok )
560 ACQUIRE_LOCK(&sched_mutex);
562 prev = &suspended_ccalling_threads;
563 for (tso = suspended_ccalling_threads;
564 tso != END_TSO_QUEUE;
565 prev = &tso->link, tso = tso->link) {
566 if (tso->id == (StgThreadID)tok) {
571 if (tso == END_TSO_QUEUE) {
572 barf("resumeThread: thread not found");
576 while (free_capabilities == NULL) {
578 fprintf(stderr,"schedule (task %ld): waiting to resume\n",
580 pthread_cond_wait(&thread_ready_cond, &sched_mutex);
581 IF_DEBUG(scheduler,fprintf(stderr,
582 "schedule (task %ld): resuming thread %d\n",
583 pthread_self(), tso->id));
585 cap = free_capabilities;
586 free_capabilities = cap->link;
587 n_free_capabilities--;
592 cap->rCurrentTSO = tso;
594 RELEASE_LOCK(&sched_mutex);
598 /* -----------------------------------------------------------------------------
600 * -------------------------------------------------------------------------- */
601 static void unblockThread(StgTSO *tso);
603 /* -----------------------------------------------------------------------------
604 * Comparing Thread ids.
606 * This is used from STG land in the implementation of the
607 * instances of Eq/Ord for ThreadIds.
608 * -------------------------------------------------------------------------- */
610 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
612 StgThreadID id1 = tso1->id;
613 StgThreadID id2 = tso2->id;
615 if (id1 < id2) return (-1);
616 if (id1 > id2) return 1;
620 /* -----------------------------------------------------------------------------
623 The new thread starts with the given stack size. Before the
624 scheduler can run, however, this thread needs to have a closure
625 (and possibly some arguments) pushed on its stack. See
626 pushClosure() in Schedule.h.
628 createGenThread() and createIOThread() (in SchedAPI.h) are
629 convenient packaged versions of this function.
630 -------------------------------------------------------------------------- */
633 createThread(nat stack_size)
637 /* catch ridiculously small stack sizes */
638 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
639 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
642 tso = (StgTSO *)allocate(stack_size);
643 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
645 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
650 initThread(StgTSO *tso, nat stack_size)
652 SET_INFO(tso,&TSO_info);
653 tso->whatNext = ThreadEnterGHC;
655 /* tso->id needs to be unique. For now we use a heavyweight mutex to
656 protect the increment operation on next_thread_id.
657 In future, we could use an atomic increment instead.
660 ACQUIRE_LOCK(&sched_mutex);
661 tso->id = next_thread_id++;
662 RELEASE_LOCK(&sched_mutex);
664 tso->why_blocked = NotBlocked;
666 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
667 tso->stack_size = stack_size;
668 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
670 tso->sp = (P_)&(tso->stack) + stack_size;
673 tso->prof.CCCS = CCS_MAIN;
676 /* put a stop frame on the stack */
677 tso->sp -= sizeofW(StgStopFrame);
678 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
679 tso->su = (StgUpdateFrame*)tso->sp;
681 IF_DEBUG(scheduler,belch("schedule: Initialised thread %ld, stack size = %lx words",
682 tso->id, tso->stack_size));
687 /* -----------------------------------------------------------------------------
690 * scheduleThread puts a thread on the head of the runnable queue.
691 * This will usually be done immediately after a thread is created.
692 * The caller of scheduleThread must create the thread using e.g.
693 * createThread and push an appropriate closure
694 * on this thread's stack before the scheduler is invoked.
695 * -------------------------------------------------------------------------- */
698 scheduleThread(StgTSO *tso)
700 ACQUIRE_LOCK(&sched_mutex);
702 /* Put the new thread on the head of the runnable queue. The caller
703 * better push an appropriate closure on this thread's stack
704 * beforehand. In the SMP case, the thread may start running as
705 * soon as we release the scheduler lock below.
707 PUSH_ON_RUN_QUEUE(tso);
710 IF_DEBUG(scheduler,printTSO(tso));
711 RELEASE_LOCK(&sched_mutex);
715 /* -----------------------------------------------------------------------------
718 * Start up Posix threads to run each of the scheduler tasks.
719 * I believe the task ids are not needed in the system as defined.
721 * -------------------------------------------------------------------------- */
725 taskStart( void *arg STG_UNUSED )
732 /* -----------------------------------------------------------------------------
735 * Initialise the scheduler. This resets all the queues - if the
736 * queues contained any threads, they'll be garbage collected at the
739 * This now calls startTasks(), so should only be called once! KH @ 25/10/99
740 * -------------------------------------------------------------------------- */
744 term_handler(int sig STG_UNUSED)
747 ACQUIRE_LOCK(&term_mutex);
749 RELEASE_LOCK(&term_mutex);
754 void initScheduler(void)
756 run_queue_hd = END_TSO_QUEUE;
757 run_queue_tl = END_TSO_QUEUE;
758 blocked_queue_hd = END_TSO_QUEUE;
759 blocked_queue_tl = END_TSO_QUEUE;
761 suspended_ccalling_threads = END_TSO_QUEUE;
768 enteredCAFs = END_CAF_LIST;
770 /* Install the SIGHUP handler */
773 struct sigaction action,oact;
775 action.sa_handler = term_handler;
776 sigemptyset(&action.sa_mask);
778 if (sigaction(SIGTERM, &action, &oact) != 0) {
779 barf("can't install TERM handler");
785 /* Allocate N Capabilities */
788 Capability *cap, *prev;
791 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
792 cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities");
796 free_capabilities = cap;
797 n_free_capabilities = RtsFlags.ConcFlags.nNodes;
799 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Allocated %d capabilities\n",
800 n_free_capabilities););
812 /* make some space for saving all the thread ids */
813 task_ids = stgMallocBytes(RtsFlags.ConcFlags.nNodes * sizeof(task_info),
814 "initScheduler:task_ids");
816 /* and create all the threads */
817 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
818 r = pthread_create(&tid,NULL,taskStart,NULL);
820 barf("startTasks: Can't create new Posix thread");
822 task_ids[i].id = tid;
823 task_ids[i].mut_time = 0.0;
824 task_ids[i].mut_etime = 0.0;
825 task_ids[i].gc_time = 0.0;
826 task_ids[i].gc_etime = 0.0;
827 task_ids[i].elapsedtimestart = elapsedtime();
828 IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
834 exitScheduler( void )
839 /* Don't want to use pthread_cancel, since we'd have to install
840 * these silly exception handlers (pthread_cleanup_{push,pop}) around
844 /* Cancel all our tasks */
845 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
846 pthread_cancel(task_ids[i].id);
849 /* Wait for all the tasks to terminate */
850 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
851 IF_DEBUG(scheduler,fprintf(stderr,"schedule: waiting for task %ld\n",
853 pthread_join(task_ids[i].id, NULL);
857 /* Send 'em all a SIGHUP. That should shut 'em up.
859 await_death = RtsFlags.ConcFlags.nNodes;
860 for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
861 pthread_kill(task_ids[i].id,SIGTERM);
863 while (await_death > 0) {
869 /* -----------------------------------------------------------------------------
870 Managing the per-task allocation areas.
872 Each capability comes with an allocation area. These are
873 fixed-length block lists into which allocation can be done.
875 ToDo: no support for two-space collection at the moment???
876 -------------------------------------------------------------------------- */
878 /* -----------------------------------------------------------------------------
879 * waitThread is the external interface for running a new computataion
880 * and waiting for the result.
882 * In the non-SMP case, we create a new main thread, push it on the
883 * main-thread stack, and invoke the scheduler to run it. The
884 * scheduler will return when the top main thread on the stack has
885 * completed or died, and fill in the necessary fields of the
886 * main_thread structure.
888 * In the SMP case, we create a main thread as before, but we then
889 * create a new condition variable and sleep on it. When our new
890 * main thread has completed, we'll be woken up and the status/result
891 * will be in the main_thread struct.
892 * -------------------------------------------------------------------------- */
895 waitThread(StgTSO *tso, /*out*/StgClosure **ret)
898 SchedulerStatus stat;
900 ACQUIRE_LOCK(&sched_mutex);
902 m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
908 pthread_cond_init(&m->wakeup, NULL);
911 m->link = main_threads;
914 IF_DEBUG(scheduler, fprintf(stderr, "schedule: new main thread (%d)\n",
919 pthread_cond_wait(&m->wakeup, &sched_mutex);
920 } while (m->stat == NoStatus);
923 ASSERT(m->stat != NoStatus);
929 pthread_cond_destroy(&m->wakeup);
933 RELEASE_LOCK(&sched_mutex);
937 /* -----------------------------------------------------------------------------
938 Debugging: why is a thread blocked
939 -------------------------------------------------------------------------- */
942 void printThreadBlockage(StgTSO *tso)
944 switch (tso->why_blocked) {
946 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
949 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
952 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
955 fprintf(stderr,"blocked on an MVar");
957 case BlockedOnBlackHole:
958 fprintf(stderr,"blocked on a black hole");
961 fprintf(stderr,"not blocked");
967 /* -----------------------------------------------------------------------------
968 Where are the roots that we know about?
970 - all the threads on the runnable queue
971 - all the threads on the blocked queue
972 - all the thread currently executing a _ccall_GC
973 - all the "main threads"
975 -------------------------------------------------------------------------- */
977 /* This has to be protected either by the scheduler monitor, or by the
978 garbage collection monitor (probably the latter).
982 static void GetRoots(void)
986 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
987 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
989 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
990 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
992 for (m = main_threads; m != NULL; m = m->link) {
993 m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso);
995 suspended_ccalling_threads =
996 (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads);
999 /* -----------------------------------------------------------------------------
1002 This is the interface to the garbage collector from Haskell land.
1003 We provide this so that external C code can allocate and garbage
1004 collect when called from Haskell via _ccall_GC.
1006 It might be useful to provide an interface whereby the programmer
1007 can specify more roots (ToDo).
1009 This needs to be protected by the GC condition variable above. KH.
1010 -------------------------------------------------------------------------- */
1012 void (*extra_roots)(void);
1017 GarbageCollect(GetRoots);
1023 GetRoots(); /* the scheduler's roots */
1024 extra_roots(); /* the user's roots */
1028 performGCWithRoots(void (*get_roots)(void))
1030 extra_roots = get_roots;
1032 GarbageCollect(AllRoots);
1035 /* -----------------------------------------------------------------------------
1038 If the thread has reached its maximum stack size,
1039 then bomb out. Otherwise relocate the TSO into a larger chunk of
1040 memory and adjust its stack size appropriately.
1041 -------------------------------------------------------------------------- */
1044 threadStackOverflow(StgTSO *tso)
1046 nat new_stack_size, new_tso_size, diff, stack_words;
1050 if (tso->stack_size >= tso->max_stack_size) {
1052 /* If we're debugging, just print out the top of the stack */
1053 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
1057 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
1060 /* Send this thread the StackOverflow exception */
1061 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
1066 /* Try to double the current stack size. If that takes us over the
1067 * maximum stack size for this thread, then use the maximum instead.
1068 * Finally round up so the TSO ends up as a whole number of blocks.
1070 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
1071 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
1072 TSO_STRUCT_SIZE)/sizeof(W_);
1073 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
1074 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
1076 IF_DEBUG(scheduler, fprintf(stderr,"schedule: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
1078 dest = (StgTSO *)allocate(new_tso_size);
1079 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
1081 /* copy the TSO block and the old stack into the new area */
1082 memcpy(dest,tso,TSO_STRUCT_SIZE);
1083 stack_words = tso->stack + tso->stack_size - tso->sp;
1084 new_sp = (P_)dest + new_tso_size - stack_words;
1085 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
1087 /* relocate the stack pointers... */
1088 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
1089 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1091 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
1092 dest->stack_size = new_stack_size;
1094 /* and relocate the update frame list */
1095 relocate_TSO(tso, dest);
1097 /* Mark the old one as dead so we don't try to scavenge it during
1098 * garbage collection (the TSO will likely be on a mutables list in
1099 * some generation, but it'll get collected soon enough). It's
1100 * important to set the sp and su values to just beyond the end of
1101 * the stack, so we don't attempt to scavenge any part of the dead
1104 tso->whatNext = ThreadKilled;
1105 tso->sp = (P_)&(tso->stack[tso->stack_size]);
1106 tso->su = (StgUpdateFrame *)tso->sp;
1107 tso->why_blocked = NotBlocked;
1108 dest->mut_link = NULL;
1110 IF_DEBUG(sanity,checkTSO(tso));
1112 IF_DEBUG(scheduler,printTSO(dest));
1116 /* This will no longer work: KH */
1117 if (tso == MainTSO) { /* hack */
1124 /* -----------------------------------------------------------------------------
1125 Wake up a queue that was blocked on some resource.
1126 -------------------------------------------------------------------------- */
1129 unblockOneLocked(StgTSO *tso)
1133 ASSERT(get_itbl(tso)->type == TSO);
1134 ASSERT(tso->why_blocked != NotBlocked);
1135 tso->why_blocked = NotBlocked;
1137 PUSH_ON_RUN_QUEUE(tso);
1140 IF_DEBUG(scheduler,belch("schedule (task %ld): waking up thread %ld",
1141 pthread_self(), tso->id));
1143 IF_DEBUG(scheduler,belch("schedule: waking up thread %ld", tso->id));
1149 unblockOne(StgTSO *tso)
1151 ACQUIRE_LOCK(&sched_mutex);
1152 tso = unblockOneLocked(tso);
1153 RELEASE_LOCK(&sched_mutex);
1158 awakenBlockedQueue(StgTSO *tso)
1160 ACQUIRE_LOCK(&sched_mutex);
1161 while (tso != END_TSO_QUEUE) {
1162 tso = unblockOneLocked(tso);
1164 RELEASE_LOCK(&sched_mutex);
1167 /* -----------------------------------------------------------------------------
1169 - usually called inside a signal handler so it mustn't do anything fancy.
1170 -------------------------------------------------------------------------- */
1173 interruptStgRts(void)
1179 /* -----------------------------------------------------------------------------
1182 This is for use when we raise an exception in another thread, which
1184 -------------------------------------------------------------------------- */
1187 unblockThread(StgTSO *tso)
1191 ACQUIRE_LOCK(&sched_mutex);
1192 switch (tso->why_blocked) {
1195 return; /* not blocked */
1198 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
1200 StgTSO *last_tso = END_TSO_QUEUE;
1201 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
1204 for (t = mvar->head; t != END_TSO_QUEUE;
1205 last = &t->link, last_tso = t, t = t->link) {
1208 if (mvar->tail == tso) {
1209 mvar->tail = last_tso;
1214 barf("unblockThread (MVAR): TSO not found");
1217 case BlockedOnBlackHole:
1218 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
1220 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
1222 last = &bq->blocking_queue;
1223 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
1224 last = &t->link, t = t->link) {
1230 barf("unblockThread (BLACKHOLE): TSO not found");
1233 case BlockedOnDelay:
1235 case BlockedOnWrite:
1237 last = &blocked_queue_hd;
1238 for (t = blocked_queue_hd; t != END_TSO_QUEUE;
1239 last = &t->link, t = t->link) {
1242 if (blocked_queue_tl == t) {
1243 blocked_queue_tl = tso->link;
1248 barf("unblockThread (I/O): TSO not found");
1252 barf("unblockThread");
1256 tso->link = END_TSO_QUEUE;
1257 tso->why_blocked = NotBlocked;
1258 tso->block_info.closure = NULL;
1259 PUSH_ON_RUN_QUEUE(tso);
1260 RELEASE_LOCK(&sched_mutex);
1263 /* -----------------------------------------------------------------------------
1266 * The following function implements the magic for raising an
1267 * asynchronous exception in an existing thread.
1269 * We first remove the thread from any queue on which it might be
1270 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
1272 * We strip the stack down to the innermost CATCH_FRAME, building
1273 * thunks in the heap for all the active computations, so they can
1274 * be restarted if necessary. When we reach a CATCH_FRAME, we build
1275 * an application of the handler to the exception, and push it on
1276 * the top of the stack.
1278 * How exactly do we save all the active computations? We create an
1279 * AP_UPD for every UpdateFrame on the stack. Entering one of these
1280 * AP_UPDs pushes everything from the corresponding update frame
1281 * upwards onto the stack. (Actually, it pushes everything up to the
1282 * next update frame plus a pointer to the next AP_UPD object.
1283 * Entering the next AP_UPD object pushes more onto the stack until we
1284 * reach the last AP_UPD object - at which point the stack should look
1285 * exactly as it did when we killed the TSO and we can continue
1286 * execution by entering the closure on top of the stack.
1288 * We can also kill a thread entirely - this happens if either (a) the
1289 * exception passed to raiseAsync is NULL, or (b) there's no
1290 * CATCH_FRAME on the stack. In either case, we strip the entire
1291 * stack and replace the thread with a zombie.
1293 * -------------------------------------------------------------------------- */
1296 deleteThread(StgTSO *tso)
1298 raiseAsync(tso,NULL);
1302 raiseAsync(StgTSO *tso, StgClosure *exception)
1304 StgUpdateFrame* su = tso->su;
1305 StgPtr sp = tso->sp;
1307 /* Thread already dead? */
1308 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
1312 IF_DEBUG(scheduler, belch("schedule: Raising exception in thread %ld.", tso->id));
1314 /* Remove it from any blocking queues */
1317 /* The stack freezing code assumes there's a closure pointer on
1318 * the top of the stack. This isn't always the case with compiled
1319 * code, so we have to push a dummy closure on the top which just
1320 * returns to the next return address on the stack.
1322 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
1323 *(--sp) = (W_)&dummy_ret_closure;
1327 int words = ((P_)su - (P_)sp) - 1;
1331 /* If we find a CATCH_FRAME, and we've got an exception to raise,
1332 * then build PAP(handler,exception), and leave it on top of
1333 * the stack ready to enter.
1335 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
1336 StgCatchFrame *cf = (StgCatchFrame *)su;
1337 /* we've got an exception to raise, so let's pass it to the
1338 * handler in this frame.
1340 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
1341 TICK_ALLOC_UPD_PAP(2,0);
1342 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
1345 ap->fun = cf->handler;
1346 ap->payload[0] = (P_)exception;
1348 /* sp currently points to the word above the CATCH_FRAME on the
1349 * stack. Replace the CATCH_FRAME with a pointer to the new handler
1352 sp += sizeofW(StgCatchFrame);
1356 tso->whatNext = ThreadEnterGHC;
1360 /* First build an AP_UPD consisting of the stack chunk above the
1361 * current update frame, with the top word on the stack as the
1364 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
1369 ap->fun = (StgClosure *)sp[0];
1371 for(i=0; i < (nat)words; ++i) {
1372 ap->payload[i] = (P_)*sp++;
1375 switch (get_itbl(su)->type) {
1379 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
1380 TICK_ALLOC_UP_THK(words+1,0);
1383 fprintf(stderr, "schedule: Updating ");
1384 printPtr((P_)su->updatee);
1385 fprintf(stderr, " with ");
1386 printObj((StgClosure *)ap);
1389 /* Replace the updatee with an indirection - happily
1390 * this will also wake up any threads currently
1391 * waiting on the result.
1393 UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
1395 sp += sizeofW(StgUpdateFrame) -1;
1396 sp[0] = (W_)ap; /* push onto stack */
1402 StgCatchFrame *cf = (StgCatchFrame *)su;
1405 /* We want a PAP, not an AP_UPD. Fortunately, the
1406 * layout's the same.
1408 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1409 TICK_ALLOC_UPD_PAP(words+1,0);
1411 /* now build o = FUN(catch,ap,handler) */
1412 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
1413 TICK_ALLOC_FUN(2,0);
1414 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
1415 o->payload[0] = (StgClosure *)ap;
1416 o->payload[1] = cf->handler;
1419 fprintf(stderr, "schedule: Built ");
1420 printObj((StgClosure *)o);
1423 /* pop the old handler and put o on the stack */
1425 sp += sizeofW(StgCatchFrame) - 1;
1432 StgSeqFrame *sf = (StgSeqFrame *)su;
1435 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
1436 TICK_ALLOC_UPD_PAP(words+1,0);
1438 /* now build o = FUN(seq,ap) */
1439 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
1440 TICK_ALLOC_SE_THK(1,0);
1441 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
1442 payloadCPtr(o,0) = (StgClosure *)ap;
1445 fprintf(stderr, "schedule: Built ");
1446 printObj((StgClosure *)o);
1449 /* pop the old handler and put o on the stack */
1451 sp += sizeofW(StgSeqFrame) - 1;
1457 /* We've stripped the entire stack, the thread is now dead. */
1458 sp += sizeofW(StgStopFrame) - 1;
1459 sp[0] = (W_)exception; /* save the exception */
1460 tso->whatNext = ThreadKilled;
1461 tso->su = (StgUpdateFrame *)(sp+1);