1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.7 1999/02/02 14:21:31 simonm Exp $
6 * ---------------------------------------------------------------------------*/
14 #include "StgStartup.h"
18 #include "StgMiscClosures.h"
20 #include "Evaluator.h"
24 #include "Profiling.h"
27 StgTSO *run_queue_hd, *run_queue_tl;
28 StgTSO *blocked_queue_hd, *blocked_queue_tl;
29 StgTSO *ccalling_threads;
31 #define MAX_SCHEDULE_NESTING 256
33 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
35 static void GetRoots(void);
36 static StgTSO *threadStackOverflow(StgTSO *tso);
38 /* flag set by signal handler to precipitate a context switch */
40 /* if this flag is set as well, give up execution */
41 static nat interrupted;
43 /* Next thread ID to allocate */
44 StgThreadID next_thread_id = 1;
47 * Pointers to the state of the current thread.
48 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
49 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
52 StgRegTable MainRegTable;
55 * The thread state for the main thread.
59 /* The smallest stack size that makes any sense is:
60 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
61 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
62 * + 1 (the realworld token for an IO thread)
63 * + 1 (the closure to enter)
65 * A thread with this stack will bomb immediately with a stack
66 * overflow, which will increase its stack size.
69 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
71 /* -----------------------------------------------------------------------------
74 The new thread starts with the given stack size. Before the
75 scheduler can run, however, this thread needs to have a closure
76 (and possibly some arguments) pushed on its stack. See
77 pushClosure() in Schedule.h.
79 createGenThread() and createIOThread() (in Schedule.h) are
80 convenient packaged versions of this function.
81 -------------------------------------------------------------------------- */
84 createThread(nat stack_size)
88 /* catch ridiculously small stack sizes */
89 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
90 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
93 tso = (StgTSO *)allocate(stack_size);
94 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
96 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
101 initThread(StgTSO *tso, nat stack_size)
103 SET_INFO(tso,&TSO_info);
104 tso->whatNext = ThreadEnterGHC;
105 tso->state = tso_state_runnable;
106 tso->id = next_thread_id++;
108 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
109 tso->stack_size = stack_size;
110 tso->max_stack_size = RtsFlags.GcFlags.maxStkSize - TSO_STRUCT_SIZEW;
111 tso->sp = (P_)&(tso->stack) + stack_size;
114 tso->prof.CCCS = CCS_MAIN;
117 /* put a stop frame on the stack */
118 tso->sp -= sizeofW(StgStopFrame);
119 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
120 tso->su = (StgUpdateFrame*)tso->sp;
122 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
123 tso->id, tso->stack_size));
125 /* Put the new thread on the head of the runnable queue.
126 * The caller of createThread better push an appropriate closure
127 * on this thread's stack before the scheduler is invoked.
129 tso->link = run_queue_hd;
131 if (run_queue_tl == END_TSO_QUEUE) {
135 IF_DEBUG(scheduler,printTSO(tso));
138 /* -----------------------------------------------------------------------------
139 Delete a thread - reverting all blackholes to (something
140 equivalent to) their former state.
142 We create an AP_UPD for every UpdateFrame on the stack.
143 Entering one of these AP_UPDs pushes everything from the corresponding
144 update frame upwards onto the stack. (Actually, it pushes everything
145 up to the next update frame plus a pointer to the next AP_UPD
146 object. Entering the next AP_UPD object pushes more onto the
147 stack until we reach the last AP_UPD object - at which point
148 the stack should look exactly as it did when we killed the TSO
149 and we can continue execution by entering the closure on top of
151 -------------------------------------------------------------------------- */
153 void deleteThread(StgTSO *tso)
155 StgUpdateFrame* su = tso->su;
158 /* Thread already dead? */
159 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
163 IF_DEBUG(scheduler, belch("Killing thread %ld.", tso->id));
165 tso->whatNext = ThreadKilled; /* changed to ThreadComplete in schedule() */
166 tso->link = END_TSO_QUEUE; /* Just to be on the safe side... */
168 /* Threads that finish normally leave Su pointing to the word
169 * beyond the top of the stack, and Sp pointing to the last word
170 * on the stack, which is the return value of the thread.
172 if ((P_)tso->su >= tso->stack + tso->stack_size
173 || get_itbl(tso->su)->type == STOP_FRAME) {
178 fprintf(stderr, "Freezing TSO stack\n");
182 /* The stack freezing code assumes there's a closure pointer on
183 * the top of the stack. This isn't always the case with compiled
184 * code, so we have to push a dummy closure on the top which just
185 * returns to the next return address on the stack.
187 if (LOOKS_LIKE_GHC_INFO(*sp)) {
188 *(--sp) = (W_)&dummy_ret_closure;
192 int words = (stgCast(StgPtr,su) - stgCast(StgPtr,sp)) - 1;
194 StgAP_UPD* ap = stgCast(StgAP_UPD*,allocate(AP_sizeW(words)));
195 TICK_ALLOC_THK(words+1,0);
197 /* First build an AP_UPD consisting of the stack chunk above the
198 * current update frame, with the top word on the stack as the
203 /* if (words == 0) { -- optimisation
204 ap = stgCast(StgAP_UPD*,*stgCast(StgPtr*,sp)++);
207 ap->fun = stgCast(StgClosure*,*stgCast(StgPtr*,sp)++);
208 for(i=0; i < (nat)words; ++i) {
209 payloadWord(ap,i) = *sp++;
213 switch (get_itbl(su)->type) {
217 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
220 fprintf(stderr, "Updating ");
221 printPtr(stgCast(StgPtr,su->updatee));
222 fprintf(stderr, " with ");
223 printObj(stgCast(StgClosure*,ap));
226 /* Replace the updatee with an indirection - happily
227 * this will also wake up any threads currently
228 * waiting on the result.
230 UPD_IND(su->updatee,ap); /* revert the black hole */
232 sp += sizeofW(StgUpdateFrame) -1;
233 sp[0] = stgCast(StgWord,ap); /* push onto stack */
239 StgCatchFrame *cf = (StgCatchFrame *)su;
242 /* We want a PAP, not an AP_UPD. Fortunately, the
245 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
247 /* now build o = FUN(catch,ap,handler) */
248 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+2));
250 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
251 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
252 payloadCPtr(o,1) = cf->handler;
255 fprintf(stderr, "Built ");
256 printObj(stgCast(StgClosure*,o));
259 /* pop the old handler and put o on the stack */
261 sp += sizeofW(StgCatchFrame) - 1;
268 StgSeqFrame *sf = (StgSeqFrame *)su;
271 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
273 /* now build o = FUN(seq,ap) */
274 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+1));
276 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
277 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
280 fprintf(stderr, "Built ");
281 printObj(stgCast(StgClosure*,o));
284 /* pop the old handler and put o on the stack */
286 sp += sizeofW(StgSeqFrame) - 1;
300 void initScheduler(void)
302 run_queue_hd = END_TSO_QUEUE;
303 run_queue_tl = END_TSO_QUEUE;
304 blocked_queue_hd = END_TSO_QUEUE;
305 blocked_queue_tl = END_TSO_QUEUE;
306 ccalling_threads = END_TSO_QUEUE;
307 next_main_thread = 0;
312 enteredCAFs = END_CAF_LIST;
315 /* -----------------------------------------------------------------------------
316 Main scheduling loop.
318 We use round-robin scheduling, each thread returning to the
319 scheduler loop when one of these conditions is detected:
323 * timer expires (thread yields)
326 -------------------------------------------------------------------------- */
328 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
331 StgThreadReturnCode ret;
335 /* Return value is NULL by default, it is only filled in if the
336 * main thread completes successfully.
338 if (ret_val) { *ret_val = NULL; }
340 /* Save away a pointer to the main thread so that we can keep track
341 * of it should a garbage collection happen. We keep a stack of
342 * main threads in order to support scheduler re-entry. We can't
343 * use the normal TSO linkage for this stack, because the main TSO
344 * may need to be linked onto other queues.
346 main_threads[next_main_thread] = main;
347 MainTSO = &main_threads[next_main_thread];
350 fprintf(stderr, "Scheduler entered: nesting = %d\n",
353 /* Are we being re-entered?
355 if (CurrentTSO != NULL) {
356 /* This happens when a _ccall_gc from Haskell ends up re-entering
359 * Block the current thread (put it on the ccalling_queue) and
360 * continue executing. The calling thread better have stashed
361 * away its state properly and left its stack with a proper stack
364 threadPaused(CurrentTSO);
365 CurrentTSO->link = ccalling_threads;
366 ccalling_threads = CurrentTSO;
367 in_ccall_gc = rtsTrue;
369 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
372 in_ccall_gc = rtsFalse;
375 /* Take a thread from the run queue.
378 if (t != END_TSO_QUEUE) {
379 run_queue_hd = t->link;
380 t->link = END_TSO_QUEUE;
381 if (run_queue_hd == END_TSO_QUEUE) {
382 run_queue_tl = END_TSO_QUEUE;
386 while (t != END_TSO_QUEUE) {
389 /* If we have more threads on the run queue, set up a context
390 * switch at some point in the future.
392 if (run_queue_hd != END_TSO_QUEUE) {
397 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
399 /* Be friendly to the storage manager: we're about to *run* this
400 * thread, so we better make sure the TSO is mutable.
402 if (t->mut_link == NULL) {
403 recordMutable((StgMutClosure *)t);
406 /* Run the current thread */
407 switch (t->whatNext) {
410 /* thread already killed. Drop it and carry on. */
413 ret = StgRun((StgFunPtr) stg_enterStackTop);
416 ret = StgRun((StgFunPtr) stg_returnToStackTop);
418 case ThreadEnterHugs:
421 IF_DEBUG(scheduler,belch("entering Hugs"));
423 /* CHECK_SENSIBLE_REGS(); */
425 StgClosure* c = stgCast(StgClosure*,*Sp);
433 barf("Panic: entered a BCO but no bytecode interpreter in this build");
436 barf("schedule: invalid whatNext field");
439 /* We may have garbage collected while running the thread
440 * (eg. something nefarious like _ccall_GC_ performGC), and hence
441 * CurrentTSO may have moved. Update t to reflect this.
446 /* Costs for the scheduler are assigned to CCS_SYSTEM */
454 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
456 PUSH_ON_RUN_QUEUE(t);
457 GarbageCollect(GetRoots);
461 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
464 /* enlarge the stack */
465 StgTSO *new_t = threadStackOverflow(t);
467 /* This TSO has moved, so update any pointers to it from the
468 * main thread stack. It better not be on any other queues...
471 for (i = 0; i < next_main_thread; i++) {
472 if (main_threads[i] == t) {
473 main_threads[i] = new_t;
478 PUSH_ON_RUN_QUEUE(t);
483 if (t->whatNext == ThreadEnterHugs) {
484 /* ToDo: or maybe a timer expired when we were in Hugs?
485 * or maybe someone hit ctrl-C
487 belch("Thread %ld stopped to switch to Hugs\n", t->id);
489 belch("Thread %ld stopped, timer expired\n", t->id);
494 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
496 while (run_queue_hd != END_TSO_QUEUE) {
497 run_queue_hd = t->link;
500 run_queue_tl = END_TSO_QUEUE;
501 /* ToDo: should I do the same with blocked queues? */
505 /* Put the thread back on the run queue, at the end.
506 * t->link is already set to END_TSO_QUEUE.
508 ASSERT(t->link == END_TSO_QUEUE);
509 if (run_queue_tl != END_TSO_QUEUE) {
510 ASSERT(get_itbl(run_queue_tl)->type == TSO);
511 if (run_queue_hd == run_queue_tl) {
512 run_queue_hd->link = t;
515 run_queue_tl->link = t;
518 run_queue_hd = run_queue_tl = t;
523 IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
525 /* assume the thread has put itself on some blocked queue
531 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
533 t->whatNext = ThreadComplete;
537 barf("schedule: invalid thread return code");
540 /* check for signals each time around the scheduler */
541 if (signals_pending()) {
542 start_signal_handlers();
545 /* If our main thread has finished or been killed, return.
546 * If we were re-entered as a result of a _ccall_gc, then
547 * pop the blocked thread off the ccalling_threads stack back
550 if ((*MainTSO)->whatNext == ThreadComplete
551 || (*MainTSO)->whatNext == ThreadKilled) {
554 CurrentTSO = ccalling_threads;
555 ccalling_threads = ccalling_threads->link;
556 /* remember to stub the link field of CurrentTSO */
557 CurrentTSO->link = END_TSO_QUEUE;
559 if ((*MainTSO)->whatNext == ThreadComplete) {
560 /* we finished successfully, fill in the return value */
561 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
570 if (t != END_TSO_QUEUE) {
571 run_queue_hd = t->link;
572 t->link = END_TSO_QUEUE;
573 if (run_queue_hd == END_TSO_QUEUE) {
574 run_queue_tl = END_TSO_QUEUE;
579 if (blocked_queue_hd != END_TSO_QUEUE) {
586 /* -----------------------------------------------------------------------------
587 Where are the roots that we know about?
589 - all the threads on the runnable queue
590 - all the threads on the blocked queue
591 - all the thread currently executing a _ccall_GC
592 - all the "main threads"
594 -------------------------------------------------------------------------- */
596 static void GetRoots(void)
600 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
601 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
603 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
604 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
606 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
608 for (i = 0; i < next_main_thread; i++) {
609 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
613 /* -----------------------------------------------------------------------------
616 This is the interface to the garbage collector from Haskell land.
617 We provide this so that external C code can allocate and garbage
618 collect when called from Haskell via _ccall_GC.
620 It might be useful to provide an interface whereby the programmer
621 can specify more roots (ToDo).
622 -------------------------------------------------------------------------- */
624 void (*extra_roots)(void);
629 GarbageCollect(GetRoots);
635 GetRoots(); /* the scheduler's roots */
636 extra_roots(); /* the user's roots */
640 performGCWithRoots(void (*get_roots)(void))
642 extra_roots = get_roots;
644 GarbageCollect(AllRoots);
647 /* -----------------------------------------------------------------------------
650 If the thread has reached its maximum stack size,
651 then bomb out. Otherwise relocate the TSO into a larger chunk of
652 memory and adjust its stack size appropriately.
653 -------------------------------------------------------------------------- */
656 threadStackOverflow(StgTSO *tso)
658 nat new_stack_size, new_tso_size, diff, stack_words;
662 if (tso->stack_size >= tso->max_stack_size) {
663 /* ToDo: just kill this thread? */
665 /* If we're debugging, just print out the top of the stack */
666 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
669 stackOverflow(tso->max_stack_size);
672 /* Try to double the current stack size. If that takes us over the
673 * maximum stack size for this thread, then use the maximum instead.
674 * Finally round up so the TSO ends up as a whole number of blocks.
676 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
677 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
678 TSO_STRUCT_SIZE)/sizeof(W_);
679 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
681 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
683 dest = (StgTSO *)allocate(new_tso_size);
684 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
686 /* copy the TSO block and the old stack into the new area */
687 memcpy(dest,tso,TSO_STRUCT_SIZE);
688 stack_words = tso->stack + tso->stack_size - tso->sp;
689 new_sp = (P_)dest + new_tso_size - stack_words;
690 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
692 /* relocate the stack pointers... */
693 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
694 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
696 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
697 dest->stack_size = new_stack_size;
699 /* and relocate the update frame list */
700 relocate_TSO(tso, dest);
702 /* Mark the old one as dead so we don't try to scavenge it during
703 * garbage collection (the TSO will likely be on a mutables list in
704 * some generation, but it'll get collected soon enough).
706 tso->whatNext = ThreadKilled;
707 dest->mut_link = NULL;
709 IF_DEBUG(sanity,checkTSO(tso));
711 IF_DEBUG(scheduler,printTSO(dest));
713 if (tso == MainTSO) { /* hack */
719 /* -----------------------------------------------------------------------------
720 Wake up a queue that was blocked on some resource (usually a
721 computation in progress).
722 -------------------------------------------------------------------------- */
724 void awaken_blocked_queue(StgTSO *q)
728 while (q != END_TSO_QUEUE) {
729 ASSERT(get_itbl(q)->type == TSO);
732 PUSH_ON_RUN_QUEUE(tso);
733 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
737 /* -----------------------------------------------------------------------------
739 - usually called inside a signal handler so it mustn't do anything fancy.
740 -------------------------------------------------------------------------- */
742 void interruptStgRts(void)