1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.22 1999/06/25 09:17:58 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
16 #include "StgStartup.h"
20 #include "StgMiscClosures.h"
22 #include "Evaluator.h"
26 #include "Profiling.h"
29 StgTSO *run_queue_hd, *run_queue_tl;
30 StgTSO *blocked_queue_hd, *blocked_queue_tl;
31 StgTSO *ccalling_threads;
33 #define MAX_SCHEDULE_NESTING 256
35 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
37 static void GetRoots(void);
38 static StgTSO *threadStackOverflow(StgTSO *tso);
40 /* flag set by signal handler to precipitate a context switch */
42 /* if this flag is set as well, give up execution */
43 static nat interrupted;
45 /* Next thread ID to allocate */
46 StgThreadID next_thread_id = 1;
49 * Pointers to the state of the current thread.
50 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
51 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
54 StgRegTable MainRegTable;
57 * The thread state for the main thread.
61 /* The smallest stack size that makes any sense is:
62 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
63 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
64 * + 1 (the realworld token for an IO thread)
65 * + 1 (the closure to enter)
67 * A thread with this stack will bomb immediately with a stack
68 * overflow, which will increase its stack size.
71 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
73 /* -----------------------------------------------------------------------------
75 * -------------------------------------------------------------------------- */
76 static void unblockThread(StgTSO *tso);
78 /* -----------------------------------------------------------------------------
81 The new thread starts with the given stack size. Before the
82 scheduler can run, however, this thread needs to have a closure
83 (and possibly some arguments) pushed on its stack. See
84 pushClosure() in Schedule.h.
86 createGenThread() and createIOThread() (in SchedAPI.h) are
87 convenient packaged versions of this function.
88 -------------------------------------------------------------------------- */
91 createThread(nat stack_size)
95 /* catch ridiculously small stack sizes */
96 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
97 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
100 tso = (StgTSO *)allocate(stack_size);
101 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
103 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
108 initThread(StgTSO *tso, nat stack_size)
110 SET_INFO(tso,&TSO_info);
111 tso->whatNext = ThreadEnterGHC;
112 tso->id = next_thread_id++;
113 tso->blocked_on = NULL;
115 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
116 tso->stack_size = stack_size;
117 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
119 tso->sp = (P_)&(tso->stack) + stack_size;
122 tso->prof.CCCS = CCS_MAIN;
125 /* put a stop frame on the stack */
126 tso->sp -= sizeofW(StgStopFrame);
127 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
128 tso->su = (StgUpdateFrame*)tso->sp;
130 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
131 tso->id, tso->stack_size));
133 /* Put the new thread on the head of the runnable queue.
134 * The caller of createThread better push an appropriate closure
135 * on this thread's stack before the scheduler is invoked.
137 tso->link = run_queue_hd;
139 if (run_queue_tl == END_TSO_QUEUE) {
143 IF_DEBUG(scheduler,printTSO(tso));
146 /* -----------------------------------------------------------------------------
149 * Initialise the scheduler. This resets all the queues - if the
150 * queues contained any threads, they'll be garbage collected at the
152 * -------------------------------------------------------------------------- */
154 void initScheduler(void)
156 run_queue_hd = END_TSO_QUEUE;
157 run_queue_tl = END_TSO_QUEUE;
158 blocked_queue_hd = END_TSO_QUEUE;
159 blocked_queue_tl = END_TSO_QUEUE;
160 ccalling_threads = END_TSO_QUEUE;
161 next_main_thread = 0;
166 enteredCAFs = END_CAF_LIST;
169 /* -----------------------------------------------------------------------------
170 Main scheduling loop.
172 We use round-robin scheduling, each thread returning to the
173 scheduler loop when one of these conditions is detected:
177 * timer expires (thread yields)
180 -------------------------------------------------------------------------- */
182 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
185 StgThreadReturnCode ret;
189 /* Return value is NULL by default, it is only filled in if the
190 * main thread completes successfully.
192 if (ret_val) { *ret_val = NULL; }
194 /* Save away a pointer to the main thread so that we can keep track
195 * of it should a garbage collection happen. We keep a stack of
196 * main threads in order to support scheduler re-entry. We can't
197 * use the normal TSO linkage for this stack, because the main TSO
198 * may need to be linked onto other queues.
200 main_threads[next_main_thread] = main;
201 MainTSO = &main_threads[next_main_thread];
204 fprintf(stderr, "Scheduler entered: nesting = %d\n",
207 /* Are we being re-entered?
209 if (CurrentTSO != NULL) {
210 /* This happens when a _ccall_gc from Haskell ends up re-entering
213 * Block the current thread (put it on the ccalling_queue) and
214 * continue executing. The calling thread better have stashed
215 * away its state properly and left its stack with a proper stack
218 threadPaused(CurrentTSO);
219 CurrentTSO->link = ccalling_threads;
220 ccalling_threads = CurrentTSO;
221 in_ccall_gc = rtsTrue;
223 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
226 in_ccall_gc = rtsFalse;
229 /* Take a thread from the run queue.
232 if (t != END_TSO_QUEUE) {
233 run_queue_hd = t->link;
234 t->link = END_TSO_QUEUE;
235 if (run_queue_hd == END_TSO_QUEUE) {
236 run_queue_tl = END_TSO_QUEUE;
240 while (t != END_TSO_QUEUE) {
243 /* If we have more threads on the run queue, set up a context
244 * switch at some point in the future.
246 if (run_queue_hd != END_TSO_QUEUE) {
251 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
253 /* Be friendly to the storage manager: we're about to *run* this
254 * thread, so we better make sure the TSO is mutable.
256 if (t->mut_link == NULL) {
257 recordMutable((StgMutClosure *)t);
260 /* Run the current thread */
261 switch (t->whatNext) {
264 /* thread already killed. Drop it and carry on. */
267 ret = StgRun((StgFunPtr) stg_enterStackTop);
270 ret = StgRun((StgFunPtr) stg_returnToStackTop);
272 case ThreadEnterHugs:
275 IF_DEBUG(scheduler,belch("entering Hugs"));
277 /* CHECK_SENSIBLE_REGS(); */
279 StgClosure* c = (StgClosure *)Sp[0];
287 barf("Panic: entered a BCO but no bytecode interpreter in this build");
290 barf("schedule: invalid whatNext field");
293 /* We may have garbage collected while running the thread
294 * (eg. something nefarious like _ccall_GC_ performGC), and hence
295 * CurrentTSO may have moved. Update t to reflect this.
300 /* Costs for the scheduler are assigned to CCS_SYSTEM */
308 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
310 PUSH_ON_RUN_QUEUE(t);
311 GarbageCollect(GetRoots);
315 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
318 /* enlarge the stack */
319 StgTSO *new_t = threadStackOverflow(t);
321 /* This TSO has moved, so update any pointers to it from the
322 * main thread stack. It better not be on any other queues...
325 for (i = 0; i < next_main_thread; i++) {
326 if (main_threads[i] == t) {
327 main_threads[i] = new_t;
332 PUSH_ON_RUN_QUEUE(t);
337 if (t->whatNext == ThreadEnterHugs) {
338 /* ToDo: or maybe a timer expired when we were in Hugs?
339 * or maybe someone hit ctrl-C
341 belch("Thread %ld stopped to switch to Hugs\n", t->id);
343 belch("Thread %ld stopped, timer expired\n", t->id);
348 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
350 while (run_queue_hd != END_TSO_QUEUE) {
351 run_queue_hd = t->link;
354 run_queue_tl = END_TSO_QUEUE;
355 /* ToDo: should I do the same with blocked queues? */
359 /* Put the thread back on the run queue, at the end.
360 * t->link is already set to END_TSO_QUEUE.
362 ASSERT(t->link == END_TSO_QUEUE);
363 if (run_queue_tl == END_TSO_QUEUE) {
364 run_queue_hd = run_queue_tl = t;
366 ASSERT(get_itbl(run_queue_tl)->type == TSO);
367 if (run_queue_hd == run_queue_tl) {
368 run_queue_hd->link = t;
371 run_queue_tl->link = t;
378 IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
380 /* assume the thread has put itself on some blocked queue
386 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
387 t->whatNext = ThreadComplete;
391 barf("schedule: invalid thread return code");
394 /* check for signals each time around the scheduler */
396 if (signals_pending()) {
397 start_signal_handlers();
400 /* If our main thread has finished or been killed, return.
401 * If we were re-entered as a result of a _ccall_gc, then
402 * pop the blocked thread off the ccalling_threads stack back
405 if ((*MainTSO)->whatNext == ThreadComplete
406 || (*MainTSO)->whatNext == ThreadKilled) {
409 CurrentTSO = ccalling_threads;
410 ccalling_threads = ccalling_threads->link;
411 /* remember to stub the link field of CurrentTSO */
412 CurrentTSO->link = END_TSO_QUEUE;
414 if ((*MainTSO)->whatNext == ThreadComplete) {
415 /* we finished successfully, fill in the return value */
416 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
425 if (t != END_TSO_QUEUE) {
426 run_queue_hd = t->link;
427 t->link = END_TSO_QUEUE;
428 if (run_queue_hd == END_TSO_QUEUE) {
429 run_queue_tl = END_TSO_QUEUE;
434 if (blocked_queue_hd != END_TSO_QUEUE) {
441 /* -----------------------------------------------------------------------------
442 Where are the roots that we know about?
444 - all the threads on the runnable queue
445 - all the threads on the blocked queue
446 - all the thread currently executing a _ccall_GC
447 - all the "main threads"
449 -------------------------------------------------------------------------- */
451 static void GetRoots(void)
455 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
456 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
458 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
459 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
461 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
463 for (i = 0; i < next_main_thread; i++) {
464 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
468 /* -----------------------------------------------------------------------------
471 This is the interface to the garbage collector from Haskell land.
472 We provide this so that external C code can allocate and garbage
473 collect when called from Haskell via _ccall_GC.
475 It might be useful to provide an interface whereby the programmer
476 can specify more roots (ToDo).
477 -------------------------------------------------------------------------- */
479 void (*extra_roots)(void);
484 GarbageCollect(GetRoots);
490 GetRoots(); /* the scheduler's roots */
491 extra_roots(); /* the user's roots */
495 performGCWithRoots(void (*get_roots)(void))
497 extra_roots = get_roots;
499 GarbageCollect(AllRoots);
502 /* -----------------------------------------------------------------------------
505 If the thread has reached its maximum stack size,
506 then bomb out. Otherwise relocate the TSO into a larger chunk of
507 memory and adjust its stack size appropriately.
508 -------------------------------------------------------------------------- */
511 threadStackOverflow(StgTSO *tso)
513 nat new_stack_size, new_tso_size, diff, stack_words;
517 if (tso->stack_size >= tso->max_stack_size) {
519 /* If we're debugging, just print out the top of the stack */
520 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
524 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
527 /* Send this thread the StackOverflow exception */
528 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
533 /* Try to double the current stack size. If that takes us over the
534 * maximum stack size for this thread, then use the maximum instead.
535 * Finally round up so the TSO ends up as a whole number of blocks.
537 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
538 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
539 TSO_STRUCT_SIZE)/sizeof(W_);
540 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
541 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
543 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
545 dest = (StgTSO *)allocate(new_tso_size);
546 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
548 /* copy the TSO block and the old stack into the new area */
549 memcpy(dest,tso,TSO_STRUCT_SIZE);
550 stack_words = tso->stack + tso->stack_size - tso->sp;
551 new_sp = (P_)dest + new_tso_size - stack_words;
552 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
554 /* relocate the stack pointers... */
555 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
556 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
558 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
559 dest->stack_size = new_stack_size;
561 /* and relocate the update frame list */
562 relocate_TSO(tso, dest);
564 /* Mark the old one as dead so we don't try to scavenge it during
565 * garbage collection (the TSO will likely be on a mutables list in
566 * some generation, but it'll get collected soon enough). It's
567 * important to set the sp and su values to just beyond the end of
568 * the stack, so we don't attempt to scavenge any part of the dead
571 tso->whatNext = ThreadKilled;
572 tso->sp = (P_)&(tso->stack[tso->stack_size]);
573 tso->su = (StgUpdateFrame *)tso->sp;
574 tso->blocked_on = NULL;
575 dest->mut_link = NULL;
577 IF_DEBUG(sanity,checkTSO(tso));
579 IF_DEBUG(scheduler,printTSO(dest));
581 if (tso == MainTSO) { /* hack */
587 /* -----------------------------------------------------------------------------
588 Wake up a queue that was blocked on some resource (usually a
589 computation in progress).
590 -------------------------------------------------------------------------- */
592 void awaken_blocked_queue(StgTSO *q)
596 while (q != END_TSO_QUEUE) {
597 ASSERT(get_itbl(q)->type == TSO);
600 PUSH_ON_RUN_QUEUE(tso);
601 tso->blocked_on = NULL;
602 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
606 /* -----------------------------------------------------------------------------
608 - usually called inside a signal handler so it mustn't do anything fancy.
609 -------------------------------------------------------------------------- */
612 interruptStgRts(void)
618 /* -----------------------------------------------------------------------------
621 This is for use when we raise an exception in another thread, which
623 -------------------------------------------------------------------------- */
626 unblockThread(StgTSO *tso)
630 if (tso->blocked_on == NULL) {
631 return; /* not blocked */
634 switch (get_itbl(tso->blocked_on)->type) {
638 StgTSO *last_tso = END_TSO_QUEUE;
639 StgMVar *mvar = (StgMVar *)(tso->blocked_on);
642 for (t = mvar->head; t != END_TSO_QUEUE;
643 last = &t->link, last_tso = t, t = t->link) {
646 if (mvar->tail == tso) {
647 mvar->tail = last_tso;
652 barf("unblockThread (MVAR): TSO not found");
657 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->blocked_on);
659 last = &bq->blocking_queue;
660 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
661 last = &t->link, t = t->link) {
667 barf("unblockThread (BLACKHOLE): TSO not found");
671 barf("unblockThread");
675 tso->link = END_TSO_QUEUE;
676 tso->blocked_on = NULL;
677 PUSH_ON_RUN_QUEUE(tso);
680 /* -----------------------------------------------------------------------------
683 * The following function implements the magic for raising an
684 * asynchronous exception in an existing thread.
686 * We first remove the thread from any queue on which it might be
687 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
689 * We strip the stack down to the innermost CATCH_FRAME, building
690 * thunks in the heap for all the active computations, so they can
691 * be restarted if necessary. When we reach a CATCH_FRAME, we build
692 * an application of the handler to the exception, and push it on
693 * the top of the stack.
695 * How exactly do we save all the active computations? We create an
696 * AP_UPD for every UpdateFrame on the stack. Entering one of these
697 * AP_UPDs pushes everything from the corresponding update frame
698 * upwards onto the stack. (Actually, it pushes everything up to the
699 * next update frame plus a pointer to the next AP_UPD object.
700 * Entering the next AP_UPD object pushes more onto the stack until we
701 * reach the last AP_UPD object - at which point the stack should look
702 * exactly as it did when we killed the TSO and we can continue
703 * execution by entering the closure on top of the stack.
705 * We can also kill a thread entirely - this happens if either (a) the
706 * exception passed to raiseAsync is NULL, or (b) there's no
707 * CATCH_FRAME on the stack. In either case, we strip the entire
708 * stack and replace the thread with a zombie.
710 * -------------------------------------------------------------------------- */
713 deleteThread(StgTSO *tso)
715 raiseAsync(tso,NULL);
719 raiseAsync(StgTSO *tso, StgClosure *exception)
721 StgUpdateFrame* su = tso->su;
724 /* Thread already dead? */
725 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
729 IF_DEBUG(scheduler, belch("Raising exception in thread %ld.", tso->id));
731 /* Remove it from any blocking queues */
734 /* The stack freezing code assumes there's a closure pointer on
735 * the top of the stack. This isn't always the case with compiled
736 * code, so we have to push a dummy closure on the top which just
737 * returns to the next return address on the stack.
739 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
740 *(--sp) = (W_)&dummy_ret_closure;
744 int words = ((P_)su - (P_)sp) - 1;
748 /* If we find a CATCH_FRAME, and we've got an exception to raise,
749 * then build PAP(handler,exception), and leave it on top of
750 * the stack ready to enter.
752 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
753 StgCatchFrame *cf = (StgCatchFrame *)su;
754 /* we've got an exception to raise, so let's pass it to the
755 * handler in this frame.
757 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
758 TICK_ALLOC_UPD_PAP(2,0);
759 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
762 ap->fun = cf->handler;
763 ap->payload[0] = (P_)exception;
765 /* sp currently points to the word above the CATCH_FRAME on the
766 * stack. Replace the CATCH_FRAME with a pointer to the new handler
769 sp += sizeofW(StgCatchFrame);
773 tso->whatNext = ThreadEnterGHC;
777 /* First build an AP_UPD consisting of the stack chunk above the
778 * current update frame, with the top word on the stack as the
781 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
786 ap->fun = (StgClosure *)sp[0];
788 for(i=0; i < (nat)words; ++i) {
789 ap->payload[i] = (P_)*sp++;
792 switch (get_itbl(su)->type) {
796 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
797 TICK_ALLOC_UP_THK(words+1,0);
800 fprintf(stderr, "Updating ");
801 printPtr((P_)su->updatee);
802 fprintf(stderr, " with ");
803 printObj((StgClosure *)ap);
806 /* Replace the updatee with an indirection - happily
807 * this will also wake up any threads currently
808 * waiting on the result.
810 UPD_IND(su->updatee,ap); /* revert the black hole */
812 sp += sizeofW(StgUpdateFrame) -1;
813 sp[0] = (W_)ap; /* push onto stack */
819 StgCatchFrame *cf = (StgCatchFrame *)su;
822 /* We want a PAP, not an AP_UPD. Fortunately, the
825 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
826 TICK_ALLOC_UPD_PAP(words+1,0);
828 /* now build o = FUN(catch,ap,handler) */
829 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
831 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
832 o->payload[0] = (StgClosure *)ap;
833 o->payload[1] = cf->handler;
836 fprintf(stderr, "Built ");
837 printObj((StgClosure *)o);
840 /* pop the old handler and put o on the stack */
842 sp += sizeofW(StgCatchFrame) - 1;
849 StgSeqFrame *sf = (StgSeqFrame *)su;
852 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
853 TICK_ALLOC_UPD_PAP(words+1,0);
855 /* now build o = FUN(seq,ap) */
856 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
857 TICK_ALLOC_SE_THK(1,0);
858 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
859 payloadCPtr(o,0) = (StgClosure *)ap;
862 fprintf(stderr, "Built ");
863 printObj((StgClosure *)o);
866 /* pop the old handler and put o on the stack */
868 sp += sizeofW(StgSeqFrame) - 1;
874 /* We've stripped the entire stack, the thread is now dead. */
875 sp += sizeofW(StgStopFrame) - 1;
876 sp[0] = (W_)exception; /* save the exception */
877 tso->whatNext = ThreadKilled;
878 tso->su = (StgUpdateFrame *)(sp+1);