1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.23 1999/08/25 10:23:53 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
16 #include "StgStartup.h"
20 #include "StgMiscClosures.h"
22 #include "Evaluator.h"
26 #include "Profiling.h"
29 StgTSO *run_queue_hd, *run_queue_tl;
30 StgTSO *blocked_queue_hd, *blocked_queue_tl;
31 StgTSO *ccalling_threads;
33 #define MAX_SCHEDULE_NESTING 256
35 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
37 static void GetRoots(void);
38 static StgTSO *threadStackOverflow(StgTSO *tso);
40 /* flag set by signal handler to precipitate a context switch */
42 /* if this flag is set as well, give up execution */
43 static nat interrupted;
45 /* Next thread ID to allocate */
46 StgThreadID next_thread_id = 1;
49 * Pointers to the state of the current thread.
50 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
51 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
54 StgRegTable MainRegTable;
57 * The thread state for the main thread.
61 /* The smallest stack size that makes any sense is:
62 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
63 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
64 * + 1 (the realworld token for an IO thread)
65 * + 1 (the closure to enter)
67 * A thread with this stack will bomb immediately with a stack
68 * overflow, which will increase its stack size.
71 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
73 /* -----------------------------------------------------------------------------
75 * -------------------------------------------------------------------------- */
76 static void unblockThread(StgTSO *tso);
78 /* -----------------------------------------------------------------------------
79 * Comparing Thread ids.
81 * This is used from STG land in the implementation of the
82 * instances of Eq/Ord for ThreadIds.
83 * -------------------------------------------------------------------------- */
85 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
87 StgThreadID id1 = tso1->id;
88 StgThreadID id2 = tso2->id;
90 if (id1 < id2) return (-1);
91 if (id1 > id2) return 1;
95 /* -----------------------------------------------------------------------------
98 The new thread starts with the given stack size. Before the
99 scheduler can run, however, this thread needs to have a closure
100 (and possibly some arguments) pushed on its stack. See
101 pushClosure() in Schedule.h.
103 createGenThread() and createIOThread() (in SchedAPI.h) are
104 convenient packaged versions of this function.
105 -------------------------------------------------------------------------- */
108 createThread(nat stack_size)
112 /* catch ridiculously small stack sizes */
113 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
114 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
117 tso = (StgTSO *)allocate(stack_size);
118 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
120 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
125 initThread(StgTSO *tso, nat stack_size)
127 SET_INFO(tso,&TSO_info);
128 tso->whatNext = ThreadEnterGHC;
129 tso->id = next_thread_id++;
130 tso->blocked_on = NULL;
132 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
133 tso->stack_size = stack_size;
134 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
136 tso->sp = (P_)&(tso->stack) + stack_size;
139 tso->prof.CCCS = CCS_MAIN;
142 /* put a stop frame on the stack */
143 tso->sp -= sizeofW(StgStopFrame);
144 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
145 tso->su = (StgUpdateFrame*)tso->sp;
147 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
148 tso->id, tso->stack_size));
150 /* Put the new thread on the head of the runnable queue.
151 * The caller of createThread better push an appropriate closure
152 * on this thread's stack before the scheduler is invoked.
154 tso->link = run_queue_hd;
156 if (run_queue_tl == END_TSO_QUEUE) {
160 IF_DEBUG(scheduler,printTSO(tso));
163 /* -----------------------------------------------------------------------------
166 * Initialise the scheduler. This resets all the queues - if the
167 * queues contained any threads, they'll be garbage collected at the
169 * -------------------------------------------------------------------------- */
171 void initScheduler(void)
173 run_queue_hd = END_TSO_QUEUE;
174 run_queue_tl = END_TSO_QUEUE;
175 blocked_queue_hd = END_TSO_QUEUE;
176 blocked_queue_tl = END_TSO_QUEUE;
177 ccalling_threads = END_TSO_QUEUE;
178 next_main_thread = 0;
183 enteredCAFs = END_CAF_LIST;
186 /* -----------------------------------------------------------------------------
187 Main scheduling loop.
189 We use round-robin scheduling, each thread returning to the
190 scheduler loop when one of these conditions is detected:
194 * timer expires (thread yields)
197 -------------------------------------------------------------------------- */
199 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
202 StgThreadReturnCode ret;
206 /* Return value is NULL by default, it is only filled in if the
207 * main thread completes successfully.
209 if (ret_val) { *ret_val = NULL; }
211 /* Save away a pointer to the main thread so that we can keep track
212 * of it should a garbage collection happen. We keep a stack of
213 * main threads in order to support scheduler re-entry. We can't
214 * use the normal TSO linkage for this stack, because the main TSO
215 * may need to be linked onto other queues.
217 main_threads[next_main_thread] = main;
218 MainTSO = &main_threads[next_main_thread];
221 fprintf(stderr, "Scheduler entered: nesting = %d\n",
224 /* Are we being re-entered?
226 if (CurrentTSO != NULL) {
227 /* This happens when a _ccall_gc from Haskell ends up re-entering
230 * Block the current thread (put it on the ccalling_queue) and
231 * continue executing. The calling thread better have stashed
232 * away its state properly and left its stack with a proper stack
235 threadPaused(CurrentTSO);
236 CurrentTSO->link = ccalling_threads;
237 ccalling_threads = CurrentTSO;
238 in_ccall_gc = rtsTrue;
240 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
243 in_ccall_gc = rtsFalse;
246 /* Take a thread from the run queue.
249 if (t != END_TSO_QUEUE) {
250 run_queue_hd = t->link;
251 t->link = END_TSO_QUEUE;
252 if (run_queue_hd == END_TSO_QUEUE) {
253 run_queue_tl = END_TSO_QUEUE;
257 while (t != END_TSO_QUEUE) {
260 /* If we have more threads on the run queue, set up a context
261 * switch at some point in the future.
263 if (run_queue_hd != END_TSO_QUEUE) {
268 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
270 /* Be friendly to the storage manager: we're about to *run* this
271 * thread, so we better make sure the TSO is mutable.
273 if (t->mut_link == NULL) {
274 recordMutable((StgMutClosure *)t);
277 /* Run the current thread */
278 switch (t->whatNext) {
281 /* thread already killed. Drop it and carry on. */
284 ret = StgRun((StgFunPtr) stg_enterStackTop);
287 ret = StgRun((StgFunPtr) stg_returnToStackTop);
289 case ThreadEnterHugs:
292 IF_DEBUG(scheduler,belch("entering Hugs"));
294 /* CHECK_SENSIBLE_REGS(); */
296 StgClosure* c = (StgClosure *)Sp[0];
304 barf("Panic: entered a BCO but no bytecode interpreter in this build");
307 barf("schedule: invalid whatNext field");
310 /* We may have garbage collected while running the thread
311 * (eg. something nefarious like _ccall_GC_ performGC), and hence
312 * CurrentTSO may have moved. Update t to reflect this.
317 /* Costs for the scheduler are assigned to CCS_SYSTEM */
325 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
327 PUSH_ON_RUN_QUEUE(t);
328 GarbageCollect(GetRoots);
332 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
335 /* enlarge the stack */
336 StgTSO *new_t = threadStackOverflow(t);
338 /* This TSO has moved, so update any pointers to it from the
339 * main thread stack. It better not be on any other queues...
342 for (i = 0; i < next_main_thread; i++) {
343 if (main_threads[i] == t) {
344 main_threads[i] = new_t;
349 PUSH_ON_RUN_QUEUE(t);
354 if (t->whatNext == ThreadEnterHugs) {
355 /* ToDo: or maybe a timer expired when we were in Hugs?
356 * or maybe someone hit ctrl-C
358 belch("Thread %ld stopped to switch to Hugs\n", t->id);
360 belch("Thread %ld stopped, timer expired\n", t->id);
365 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
367 while (run_queue_hd != END_TSO_QUEUE) {
368 run_queue_hd = t->link;
371 run_queue_tl = END_TSO_QUEUE;
372 /* ToDo: should I do the same with blocked queues? */
376 /* Put the thread back on the run queue, at the end.
377 * t->link is already set to END_TSO_QUEUE.
379 ASSERT(t->link == END_TSO_QUEUE);
380 if (run_queue_tl == END_TSO_QUEUE) {
381 run_queue_hd = run_queue_tl = t;
383 ASSERT(get_itbl(run_queue_tl)->type == TSO);
384 if (run_queue_hd == run_queue_tl) {
385 run_queue_hd->link = t;
388 run_queue_tl->link = t;
395 IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
397 /* assume the thread has put itself on some blocked queue
403 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
404 t->whatNext = ThreadComplete;
408 barf("schedule: invalid thread return code");
411 /* check for signals each time around the scheduler */
413 if (signals_pending()) {
414 start_signal_handlers();
417 /* If our main thread has finished or been killed, return.
418 * If we were re-entered as a result of a _ccall_gc, then
419 * pop the blocked thread off the ccalling_threads stack back
422 if ((*MainTSO)->whatNext == ThreadComplete
423 || (*MainTSO)->whatNext == ThreadKilled) {
426 CurrentTSO = ccalling_threads;
427 ccalling_threads = ccalling_threads->link;
428 /* remember to stub the link field of CurrentTSO */
429 CurrentTSO->link = END_TSO_QUEUE;
431 if ((*MainTSO)->whatNext == ThreadComplete) {
432 /* we finished successfully, fill in the return value */
433 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
442 if (t != END_TSO_QUEUE) {
443 run_queue_hd = t->link;
444 t->link = END_TSO_QUEUE;
445 if (run_queue_hd == END_TSO_QUEUE) {
446 run_queue_tl = END_TSO_QUEUE;
451 if (blocked_queue_hd != END_TSO_QUEUE) {
458 /* -----------------------------------------------------------------------------
459 Where are the roots that we know about?
461 - all the threads on the runnable queue
462 - all the threads on the blocked queue
463 - all the thread currently executing a _ccall_GC
464 - all the "main threads"
466 -------------------------------------------------------------------------- */
468 static void GetRoots(void)
472 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
473 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
475 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
476 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
478 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
480 for (i = 0; i < next_main_thread; i++) {
481 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
485 /* -----------------------------------------------------------------------------
488 This is the interface to the garbage collector from Haskell land.
489 We provide this so that external C code can allocate and garbage
490 collect when called from Haskell via _ccall_GC.
492 It might be useful to provide an interface whereby the programmer
493 can specify more roots (ToDo).
494 -------------------------------------------------------------------------- */
496 void (*extra_roots)(void);
501 GarbageCollect(GetRoots);
507 GetRoots(); /* the scheduler's roots */
508 extra_roots(); /* the user's roots */
512 performGCWithRoots(void (*get_roots)(void))
514 extra_roots = get_roots;
516 GarbageCollect(AllRoots);
519 /* -----------------------------------------------------------------------------
522 If the thread has reached its maximum stack size,
523 then bomb out. Otherwise relocate the TSO into a larger chunk of
524 memory and adjust its stack size appropriately.
525 -------------------------------------------------------------------------- */
528 threadStackOverflow(StgTSO *tso)
530 nat new_stack_size, new_tso_size, diff, stack_words;
534 if (tso->stack_size >= tso->max_stack_size) {
536 /* If we're debugging, just print out the top of the stack */
537 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
541 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
544 /* Send this thread the StackOverflow exception */
545 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
550 /* Try to double the current stack size. If that takes us over the
551 * maximum stack size for this thread, then use the maximum instead.
552 * Finally round up so the TSO ends up as a whole number of blocks.
554 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
555 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
556 TSO_STRUCT_SIZE)/sizeof(W_);
557 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
558 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
560 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
562 dest = (StgTSO *)allocate(new_tso_size);
563 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
565 /* copy the TSO block and the old stack into the new area */
566 memcpy(dest,tso,TSO_STRUCT_SIZE);
567 stack_words = tso->stack + tso->stack_size - tso->sp;
568 new_sp = (P_)dest + new_tso_size - stack_words;
569 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
571 /* relocate the stack pointers... */
572 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
573 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
575 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
576 dest->stack_size = new_stack_size;
578 /* and relocate the update frame list */
579 relocate_TSO(tso, dest);
581 /* Mark the old one as dead so we don't try to scavenge it during
582 * garbage collection (the TSO will likely be on a mutables list in
583 * some generation, but it'll get collected soon enough). It's
584 * important to set the sp and su values to just beyond the end of
585 * the stack, so we don't attempt to scavenge any part of the dead
588 tso->whatNext = ThreadKilled;
589 tso->sp = (P_)&(tso->stack[tso->stack_size]);
590 tso->su = (StgUpdateFrame *)tso->sp;
591 tso->blocked_on = NULL;
592 dest->mut_link = NULL;
594 IF_DEBUG(sanity,checkTSO(tso));
596 IF_DEBUG(scheduler,printTSO(dest));
598 if (tso == MainTSO) { /* hack */
604 /* -----------------------------------------------------------------------------
605 Wake up a queue that was blocked on some resource (usually a
606 computation in progress).
607 -------------------------------------------------------------------------- */
609 void awaken_blocked_queue(StgTSO *q)
613 while (q != END_TSO_QUEUE) {
614 ASSERT(get_itbl(q)->type == TSO);
617 PUSH_ON_RUN_QUEUE(tso);
618 tso->blocked_on = NULL;
619 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
623 /* -----------------------------------------------------------------------------
625 - usually called inside a signal handler so it mustn't do anything fancy.
626 -------------------------------------------------------------------------- */
629 interruptStgRts(void)
635 /* -----------------------------------------------------------------------------
638 This is for use when we raise an exception in another thread, which
640 -------------------------------------------------------------------------- */
643 unblockThread(StgTSO *tso)
647 if (tso->blocked_on == NULL) {
648 return; /* not blocked */
651 switch (get_itbl(tso->blocked_on)->type) {
655 StgTSO *last_tso = END_TSO_QUEUE;
656 StgMVar *mvar = (StgMVar *)(tso->blocked_on);
659 for (t = mvar->head; t != END_TSO_QUEUE;
660 last = &t->link, last_tso = t, t = t->link) {
663 if (mvar->tail == tso) {
664 mvar->tail = last_tso;
669 barf("unblockThread (MVAR): TSO not found");
674 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->blocked_on);
676 last = &bq->blocking_queue;
677 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
678 last = &t->link, t = t->link) {
684 barf("unblockThread (BLACKHOLE): TSO not found");
688 barf("unblockThread");
692 tso->link = END_TSO_QUEUE;
693 tso->blocked_on = NULL;
694 PUSH_ON_RUN_QUEUE(tso);
697 /* -----------------------------------------------------------------------------
700 * The following function implements the magic for raising an
701 * asynchronous exception in an existing thread.
703 * We first remove the thread from any queue on which it might be
704 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
706 * We strip the stack down to the innermost CATCH_FRAME, building
707 * thunks in the heap for all the active computations, so they can
708 * be restarted if necessary. When we reach a CATCH_FRAME, we build
709 * an application of the handler to the exception, and push it on
710 * the top of the stack.
712 * How exactly do we save all the active computations? We create an
713 * AP_UPD for every UpdateFrame on the stack. Entering one of these
714 * AP_UPDs pushes everything from the corresponding update frame
715 * upwards onto the stack. (Actually, it pushes everything up to the
716 * next update frame plus a pointer to the next AP_UPD object.
717 * Entering the next AP_UPD object pushes more onto the stack until we
718 * reach the last AP_UPD object - at which point the stack should look
719 * exactly as it did when we killed the TSO and we can continue
720 * execution by entering the closure on top of the stack.
722 * We can also kill a thread entirely - this happens if either (a) the
723 * exception passed to raiseAsync is NULL, or (b) there's no
724 * CATCH_FRAME on the stack. In either case, we strip the entire
725 * stack and replace the thread with a zombie.
727 * -------------------------------------------------------------------------- */
730 deleteThread(StgTSO *tso)
732 raiseAsync(tso,NULL);
736 raiseAsync(StgTSO *tso, StgClosure *exception)
738 StgUpdateFrame* su = tso->su;
741 /* Thread already dead? */
742 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
746 IF_DEBUG(scheduler, belch("Raising exception in thread %ld.", tso->id));
748 /* Remove it from any blocking queues */
751 /* The stack freezing code assumes there's a closure pointer on
752 * the top of the stack. This isn't always the case with compiled
753 * code, so we have to push a dummy closure on the top which just
754 * returns to the next return address on the stack.
756 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
757 *(--sp) = (W_)&dummy_ret_closure;
761 int words = ((P_)su - (P_)sp) - 1;
765 /* If we find a CATCH_FRAME, and we've got an exception to raise,
766 * then build PAP(handler,exception), and leave it on top of
767 * the stack ready to enter.
769 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
770 StgCatchFrame *cf = (StgCatchFrame *)su;
771 /* we've got an exception to raise, so let's pass it to the
772 * handler in this frame.
774 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
775 TICK_ALLOC_UPD_PAP(2,0);
776 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
779 ap->fun = cf->handler;
780 ap->payload[0] = (P_)exception;
782 /* sp currently points to the word above the CATCH_FRAME on the
783 * stack. Replace the CATCH_FRAME with a pointer to the new handler
786 sp += sizeofW(StgCatchFrame);
790 tso->whatNext = ThreadEnterGHC;
794 /* First build an AP_UPD consisting of the stack chunk above the
795 * current update frame, with the top word on the stack as the
798 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
803 ap->fun = (StgClosure *)sp[0];
805 for(i=0; i < (nat)words; ++i) {
806 ap->payload[i] = (P_)*sp++;
809 switch (get_itbl(su)->type) {
813 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
814 TICK_ALLOC_UP_THK(words+1,0);
817 fprintf(stderr, "Updating ");
818 printPtr((P_)su->updatee);
819 fprintf(stderr, " with ");
820 printObj((StgClosure *)ap);
823 /* Replace the updatee with an indirection - happily
824 * this will also wake up any threads currently
825 * waiting on the result.
827 UPD_IND(su->updatee,ap); /* revert the black hole */
829 sp += sizeofW(StgUpdateFrame) -1;
830 sp[0] = (W_)ap; /* push onto stack */
836 StgCatchFrame *cf = (StgCatchFrame *)su;
839 /* We want a PAP, not an AP_UPD. Fortunately, the
842 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
843 TICK_ALLOC_UPD_PAP(words+1,0);
845 /* now build o = FUN(catch,ap,handler) */
846 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
848 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
849 o->payload[0] = (StgClosure *)ap;
850 o->payload[1] = cf->handler;
853 fprintf(stderr, "Built ");
854 printObj((StgClosure *)o);
857 /* pop the old handler and put o on the stack */
859 sp += sizeofW(StgCatchFrame) - 1;
866 StgSeqFrame *sf = (StgSeqFrame *)su;
869 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
870 TICK_ALLOC_UPD_PAP(words+1,0);
872 /* now build o = FUN(seq,ap) */
873 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
874 TICK_ALLOC_SE_THK(1,0);
875 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
876 payloadCPtr(o,0) = (StgClosure *)ap;
879 fprintf(stderr, "Built ");
880 printObj((StgClosure *)o);
883 /* pop the old handler and put o on the stack */
885 sp += sizeofW(StgSeqFrame) - 1;
891 /* We've stripped the entire stack, the thread is now dead. */
892 sp += sizeofW(StgStopFrame) - 1;
893 sp[0] = (W_)exception; /* save the exception */
894 tso->whatNext = ThreadKilled;
895 tso->su = (StgUpdateFrame *)(sp+1);