1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.14 1999/03/03 19:05:55 sof Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
16 #include "StgStartup.h"
20 #include "StgMiscClosures.h"
22 #include "Evaluator.h"
26 #include "Profiling.h"
29 StgTSO *run_queue_hd, *run_queue_tl;
30 StgTSO *blocked_queue_hd, *blocked_queue_tl;
31 StgTSO *ccalling_threads;
33 #define MAX_SCHEDULE_NESTING 256
35 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
37 static void GetRoots(void);
38 static StgTSO *threadStackOverflow(StgTSO *tso);
40 /* flag set by signal handler to precipitate a context switch */
42 /* if this flag is set as well, give up execution */
43 static nat interrupted;
45 /* Next thread ID to allocate */
46 StgThreadID next_thread_id = 1;
49 * Pointers to the state of the current thread.
50 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
51 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
54 StgRegTable MainRegTable;
57 * The thread state for the main thread.
61 /* The smallest stack size that makes any sense is:
62 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
63 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
64 * + 1 (the realworld token for an IO thread)
65 * + 1 (the closure to enter)
67 * A thread with this stack will bomb immediately with a stack
68 * overflow, which will increase its stack size.
71 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
73 /* -----------------------------------------------------------------------------
76 The new thread starts with the given stack size. Before the
77 scheduler can run, however, this thread needs to have a closure
78 (and possibly some arguments) pushed on its stack. See
79 pushClosure() in Schedule.h.
81 createGenThread() and createIOThread() (in Schedule.h) are
82 convenient packaged versions of this function.
83 -------------------------------------------------------------------------- */
86 createThread(nat stack_size)
90 /* catch ridiculously small stack sizes */
91 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
92 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
95 tso = (StgTSO *)allocate(stack_size);
96 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
98 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
103 initThread(StgTSO *tso, nat stack_size)
105 SET_INFO(tso,&TSO_info);
106 tso->whatNext = ThreadEnterGHC;
107 tso->state = tso_state_runnable;
108 tso->id = next_thread_id++;
110 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
111 tso->stack_size = stack_size;
112 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
114 tso->sp = (P_)&(tso->stack) + stack_size;
117 tso->prof.CCCS = CCS_MAIN;
120 /* put a stop frame on the stack */
121 tso->sp -= sizeofW(StgStopFrame);
122 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
123 tso->su = (StgUpdateFrame*)tso->sp;
125 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
126 tso->id, tso->stack_size));
128 /* Put the new thread on the head of the runnable queue.
129 * The caller of createThread better push an appropriate closure
130 * on this thread's stack before the scheduler is invoked.
132 tso->link = run_queue_hd;
134 if (run_queue_tl == END_TSO_QUEUE) {
138 IF_DEBUG(scheduler,printTSO(tso));
141 /* -----------------------------------------------------------------------------
142 Delete a thread - reverting all blackholes to (something
143 equivalent to) their former state.
145 We create an AP_UPD for every UpdateFrame on the stack.
146 Entering one of these AP_UPDs pushes everything from the corresponding
147 update frame upwards onto the stack. (Actually, it pushes everything
148 up to the next update frame plus a pointer to the next AP_UPD
149 object. Entering the next AP_UPD object pushes more onto the
150 stack until we reach the last AP_UPD object - at which point
151 the stack should look exactly as it did when we killed the TSO
152 and we can continue execution by entering the closure on top of
154 -------------------------------------------------------------------------- */
156 void deleteThread(StgTSO *tso)
158 StgUpdateFrame* su = tso->su;
161 /* Thread already dead? */
162 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
166 IF_DEBUG(scheduler, belch("Killing thread %ld.", tso->id));
168 tso->whatNext = ThreadKilled; /* changed to ThreadComplete in schedule() */
169 tso->link = END_TSO_QUEUE; /* Just to be on the safe side... */
171 /* Threads that finish normally leave Su pointing to the word
172 * beyond the top of the stack, and Sp pointing to the last word
173 * on the stack, which is the return value of the thread.
175 if ((P_)tso->su >= tso->stack + tso->stack_size
176 || get_itbl(tso->su)->type == STOP_FRAME) {
181 fprintf(stderr, "Freezing TSO stack\n");
185 /* The stack freezing code assumes there's a closure pointer on
186 * the top of the stack. This isn't always the case with compiled
187 * code, so we have to push a dummy closure on the top which just
188 * returns to the next return address on the stack.
190 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
191 *(--sp) = (W_)&dummy_ret_closure;
195 int words = (stgCast(StgPtr,su) - stgCast(StgPtr,sp)) - 1;
197 StgAP_UPD* ap = stgCast(StgAP_UPD*,allocate(AP_sizeW(words)));
198 TICK_ALLOC_THK(words+1,0);
200 /* First build an AP_UPD consisting of the stack chunk above the
201 * current update frame, with the top word on the stack as the
206 /* if (words == 0) { -- optimisation
207 ap = stgCast(StgAP_UPD*,*stgCast(StgPtr*,sp)++);
210 ap->fun = stgCast(StgClosure*,*stgCast(StgPtr*,sp)++);
211 for(i=0; i < (nat)words; ++i) {
212 payloadWord(ap,i) = *sp++;
216 switch (get_itbl(su)->type) {
220 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
223 fprintf(stderr, "Updating ");
224 printPtr(stgCast(StgPtr,su->updatee));
225 fprintf(stderr, " with ");
226 printObj(stgCast(StgClosure*,ap));
229 /* Replace the updatee with an indirection - happily
230 * this will also wake up any threads currently
231 * waiting on the result.
233 UPD_IND(su->updatee,ap); /* revert the black hole */
235 sp += sizeofW(StgUpdateFrame) -1;
236 sp[0] = stgCast(StgWord,ap); /* push onto stack */
242 StgCatchFrame *cf = (StgCatchFrame *)su;
245 /* We want a PAP, not an AP_UPD. Fortunately, the
248 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
250 /* now build o = FUN(catch,ap,handler) */
251 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+2));
253 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
254 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
255 payloadCPtr(o,1) = cf->handler;
258 fprintf(stderr, "Built ");
259 printObj(stgCast(StgClosure*,o));
262 /* pop the old handler and put o on the stack */
264 sp += sizeofW(StgCatchFrame) - 1;
271 StgSeqFrame *sf = (StgSeqFrame *)su;
274 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
276 /* now build o = FUN(seq,ap) */
277 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+1));
279 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
280 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
283 fprintf(stderr, "Built ");
284 printObj(stgCast(StgClosure*,o));
287 /* pop the old handler and put o on the stack */
289 sp += sizeofW(StgSeqFrame) - 1;
303 void initScheduler(void)
305 run_queue_hd = END_TSO_QUEUE;
306 run_queue_tl = END_TSO_QUEUE;
307 blocked_queue_hd = END_TSO_QUEUE;
308 blocked_queue_tl = END_TSO_QUEUE;
309 ccalling_threads = END_TSO_QUEUE;
310 next_main_thread = 0;
315 enteredCAFs = END_CAF_LIST;
318 /* -----------------------------------------------------------------------------
319 Main scheduling loop.
321 We use round-robin scheduling, each thread returning to the
322 scheduler loop when one of these conditions is detected:
326 * timer expires (thread yields)
329 -------------------------------------------------------------------------- */
331 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
334 StgThreadReturnCode ret;
338 /* Return value is NULL by default, it is only filled in if the
339 * main thread completes successfully.
341 if (ret_val) { *ret_val = NULL; }
343 /* Save away a pointer to the main thread so that we can keep track
344 * of it should a garbage collection happen. We keep a stack of
345 * main threads in order to support scheduler re-entry. We can't
346 * use the normal TSO linkage for this stack, because the main TSO
347 * may need to be linked onto other queues.
349 main_threads[next_main_thread] = main;
350 MainTSO = &main_threads[next_main_thread];
353 fprintf(stderr, "Scheduler entered: nesting = %d\n",
356 /* Are we being re-entered?
358 if (CurrentTSO != NULL) {
359 /* This happens when a _ccall_gc from Haskell ends up re-entering
362 * Block the current thread (put it on the ccalling_queue) and
363 * continue executing. The calling thread better have stashed
364 * away its state properly and left its stack with a proper stack
367 threadPaused(CurrentTSO);
368 CurrentTSO->link = ccalling_threads;
369 ccalling_threads = CurrentTSO;
370 in_ccall_gc = rtsTrue;
372 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
375 in_ccall_gc = rtsFalse;
378 /* Take a thread from the run queue.
381 if (t != END_TSO_QUEUE) {
382 run_queue_hd = t->link;
383 t->link = END_TSO_QUEUE;
384 if (run_queue_hd == END_TSO_QUEUE) {
385 run_queue_tl = END_TSO_QUEUE;
389 while (t != END_TSO_QUEUE) {
392 /* If we have more threads on the run queue, set up a context
393 * switch at some point in the future.
395 if (run_queue_hd != END_TSO_QUEUE) {
400 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
402 /* Be friendly to the storage manager: we're about to *run* this
403 * thread, so we better make sure the TSO is mutable.
405 if (t->mut_link == NULL) {
406 recordMutable((StgMutClosure *)t);
409 /* Run the current thread */
410 switch (t->whatNext) {
413 /* thread already killed. Drop it and carry on. */
416 ret = StgRun((StgFunPtr) stg_enterStackTop);
419 ret = StgRun((StgFunPtr) stg_returnToStackTop);
421 case ThreadEnterHugs:
424 IF_DEBUG(scheduler,belch("entering Hugs"));
426 /* CHECK_SENSIBLE_REGS(); */
428 StgClosure* c = stgCast(StgClosure*,*Sp);
436 barf("Panic: entered a BCO but no bytecode interpreter in this build");
439 barf("schedule: invalid whatNext field");
442 /* We may have garbage collected while running the thread
443 * (eg. something nefarious like _ccall_GC_ performGC), and hence
444 * CurrentTSO may have moved. Update t to reflect this.
449 /* Costs for the scheduler are assigned to CCS_SYSTEM */
457 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
459 PUSH_ON_RUN_QUEUE(t);
460 GarbageCollect(GetRoots);
464 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
467 /* enlarge the stack */
468 StgTSO *new_t = threadStackOverflow(t);
470 /* This TSO has moved, so update any pointers to it from the
471 * main thread stack. It better not be on any other queues...
474 for (i = 0; i < next_main_thread; i++) {
475 if (main_threads[i] == t) {
476 main_threads[i] = new_t;
481 PUSH_ON_RUN_QUEUE(t);
486 if (t->whatNext == ThreadEnterHugs) {
487 /* ToDo: or maybe a timer expired when we were in Hugs?
488 * or maybe someone hit ctrl-C
490 belch("Thread %ld stopped to switch to Hugs\n", t->id);
492 belch("Thread %ld stopped, timer expired\n", t->id);
497 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
499 while (run_queue_hd != END_TSO_QUEUE) {
500 run_queue_hd = t->link;
503 run_queue_tl = END_TSO_QUEUE;
504 /* ToDo: should I do the same with blocked queues? */
508 /* Put the thread back on the run queue, at the end.
509 * t->link is already set to END_TSO_QUEUE.
511 ASSERT(t->link == END_TSO_QUEUE);
512 if (run_queue_tl == END_TSO_QUEUE) {
513 run_queue_hd = run_queue_tl = t;
515 ASSERT(get_itbl(run_queue_tl)->type == TSO);
516 if (run_queue_hd == run_queue_tl) {
517 run_queue_hd->link = t;
520 run_queue_tl->link = t;
527 IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
529 /* assume the thread has put itself on some blocked queue
535 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
537 t->whatNext = ThreadComplete;
541 barf("schedule: invalid thread return code");
544 /* check for signals each time around the scheduler */
546 if (signals_pending()) {
547 start_signal_handlers();
550 /* If our main thread has finished or been killed, return.
551 * If we were re-entered as a result of a _ccall_gc, then
552 * pop the blocked thread off the ccalling_threads stack back
555 if ((*MainTSO)->whatNext == ThreadComplete
556 || (*MainTSO)->whatNext == ThreadKilled) {
559 CurrentTSO = ccalling_threads;
560 ccalling_threads = ccalling_threads->link;
561 /* remember to stub the link field of CurrentTSO */
562 CurrentTSO->link = END_TSO_QUEUE;
564 if ((*MainTSO)->whatNext == ThreadComplete) {
565 /* we finished successfully, fill in the return value */
566 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
575 if (t != END_TSO_QUEUE) {
576 run_queue_hd = t->link;
577 t->link = END_TSO_QUEUE;
578 if (run_queue_hd == END_TSO_QUEUE) {
579 run_queue_tl = END_TSO_QUEUE;
584 if (blocked_queue_hd != END_TSO_QUEUE) {
591 /* -----------------------------------------------------------------------------
592 Where are the roots that we know about?
594 - all the threads on the runnable queue
595 - all the threads on the blocked queue
596 - all the thread currently executing a _ccall_GC
597 - all the "main threads"
599 -------------------------------------------------------------------------- */
601 static void GetRoots(void)
605 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
606 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
608 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
609 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
611 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
613 for (i = 0; i < next_main_thread; i++) {
614 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
618 /* -----------------------------------------------------------------------------
621 This is the interface to the garbage collector from Haskell land.
622 We provide this so that external C code can allocate and garbage
623 collect when called from Haskell via _ccall_GC.
625 It might be useful to provide an interface whereby the programmer
626 can specify more roots (ToDo).
627 -------------------------------------------------------------------------- */
629 void (*extra_roots)(void);
634 GarbageCollect(GetRoots);
640 GetRoots(); /* the scheduler's roots */
641 extra_roots(); /* the user's roots */
645 performGCWithRoots(void (*get_roots)(void))
647 extra_roots = get_roots;
649 GarbageCollect(AllRoots);
652 /* -----------------------------------------------------------------------------
655 If the thread has reached its maximum stack size,
656 then bomb out. Otherwise relocate the TSO into a larger chunk of
657 memory and adjust its stack size appropriately.
658 -------------------------------------------------------------------------- */
661 threadStackOverflow(StgTSO *tso)
663 nat new_stack_size, new_tso_size, diff, stack_words;
667 if (tso->stack_size >= tso->max_stack_size) {
668 /* ToDo: just kill this thread? */
670 /* If we're debugging, just print out the top of the stack */
671 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
674 stackOverflow(tso->max_stack_size);
677 /* Try to double the current stack size. If that takes us over the
678 * maximum stack size for this thread, then use the maximum instead.
679 * Finally round up so the TSO ends up as a whole number of blocks.
681 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
682 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
683 TSO_STRUCT_SIZE)/sizeof(W_);
684 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
685 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
687 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
689 dest = (StgTSO *)allocate(new_tso_size);
690 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
692 /* copy the TSO block and the old stack into the new area */
693 memcpy(dest,tso,TSO_STRUCT_SIZE);
694 stack_words = tso->stack + tso->stack_size - tso->sp;
695 new_sp = (P_)dest + new_tso_size - stack_words;
696 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
698 /* relocate the stack pointers... */
699 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
700 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
702 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
703 dest->stack_size = new_stack_size;
705 /* and relocate the update frame list */
706 relocate_TSO(tso, dest);
708 /* Mark the old one as dead so we don't try to scavenge it during
709 * garbage collection (the TSO will likely be on a mutables list in
710 * some generation, but it'll get collected soon enough).
712 tso->whatNext = ThreadKilled;
713 dest->mut_link = NULL;
715 IF_DEBUG(sanity,checkTSO(tso));
717 IF_DEBUG(scheduler,printTSO(dest));
719 if (tso == MainTSO) { /* hack */
725 /* -----------------------------------------------------------------------------
726 Wake up a queue that was blocked on some resource (usually a
727 computation in progress).
728 -------------------------------------------------------------------------- */
730 void awaken_blocked_queue(StgTSO *q)
734 while (q != END_TSO_QUEUE) {
735 ASSERT(get_itbl(q)->type == TSO);
738 PUSH_ON_RUN_QUEUE(tso);
739 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
743 /* -----------------------------------------------------------------------------
745 - usually called inside a signal handler so it mustn't do anything fancy.
746 -------------------------------------------------------------------------- */
748 void interruptStgRts(void)