1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.4 1999/01/13 17:25:44 simonm Exp $
6 * ---------------------------------------------------------------------------*/
14 #include "StgStartup.h"
18 #include "StgMiscClosures.h"
20 #include "Evaluator.h"
24 #include "StablePtr.h"
25 #include "Profiling.h"
28 StgTSO *run_queue_hd, *run_queue_tl;
29 StgTSO *blocked_queue_hd, *blocked_queue_tl;
30 StgTSO *ccalling_threads;
32 #define MAX_SCHEDULE_NESTING 256
34 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
36 static void GetRoots(void);
37 static StgTSO *threadStackOverflow(StgTSO *tso);
39 /* flag set by signal handler to precipitate a context switch */
41 /* if this flag is set as well, give up execution */
42 static nat interrupted;
44 /* Next thread ID to allocate */
45 StgThreadID next_thread_id = 1;
48 * Pointers to the state of the current thread.
49 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
50 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
53 StgRegTable MainRegTable;
56 * The thread state for the main thread.
60 /* The smallest stack size that makes any sense is:
61 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
62 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
63 * + 1 (the realworld token for an IO thread)
64 * + 1 (the closure to enter)
66 * A thread with this stack will bomb immediately with a stack
67 * overflow, which will increase its stack size.
70 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
72 /* -----------------------------------------------------------------------------
75 The new thread starts with the given stack size. Before the
76 scheduler can run, however, this thread needs to have a closure
77 (and possibly some arguments) pushed on its stack. See
78 pushClosure() in Schedule.h.
80 createGenThread() and createIOThread() (in Schedule.h) are
81 convenient packaged versions of this function.
82 -------------------------------------------------------------------------- */
85 createThread(nat stack_size)
89 /* catch ridiculously small stack sizes */
90 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
91 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
94 tso = (StgTSO *)allocate(stack_size);
96 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
101 initThread(StgTSO *tso, nat stack_size)
103 SET_INFO(tso,&TSO_info);
104 tso->whatNext = ThreadEnterGHC;
105 tso->state = tso_state_runnable;
106 tso->id = next_thread_id++;
108 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
109 tso->stack_size = stack_size;
110 tso->max_stack_size = RtsFlags.GcFlags.maxStkSize - TSO_STRUCT_SIZEW;
111 tso->sp = (P_)&(tso->stack) + stack_size;
114 tso->prof.CCCS = CCS_MAIN;
117 /* put a stop frame on the stack */
118 tso->sp -= sizeofW(StgStopFrame);
119 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
120 tso->su = (StgUpdateFrame*)tso->sp;
122 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
123 tso->id, tso->stack_size));
125 /* Put the new thread on the head of the runnable queue.
126 * The caller of createThread better push an appropriate closure
127 * on this thread's stack before the scheduler is invoked.
129 tso->link = run_queue_hd;
131 if (run_queue_tl == END_TSO_QUEUE) {
135 IF_DEBUG(scheduler,printTSO(tso));
138 /* -----------------------------------------------------------------------------
139 Delete a thread - reverting all blackholes to (something
140 equivalent to) their former state.
142 We create an AP_UPD for every UpdateFrame on the stack.
143 Entering one of these AP_UPDs pushes everything from the corresponding
144 update frame upwards onto the stack. (Actually, it pushes everything
145 up to the next update frame plus a pointer to the next AP_UPD
146 object. Entering the next AP_UPD object pushes more onto the
147 stack until we reach the last AP_UPD object - at which point
148 the stack should look exactly as it did when we killed the TSO
149 and we can continue execution by entering the closure on top of
151 -------------------------------------------------------------------------- */
153 void deleteThread(StgTSO *tso)
155 StgUpdateFrame* su = tso->su;
158 /* Thread already dead? */
159 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
163 IF_DEBUG(scheduler, belch("Killing thread %ld.", tso->id));
165 tso->whatNext = ThreadKilled; /* changed to ThreadComplete in schedule() */
166 tso->link = END_TSO_QUEUE; /* Just to be on the safe side... */
168 /* Threads that finish normally leave Su pointing to the word
169 * beyond the top of the stack, and Sp pointing to the last word
170 * on the stack, which is the return value of the thread.
172 if ((P_)tso->su >= tso->stack + tso->stack_size
173 || get_itbl(tso->su)->type == STOP_FRAME) {
178 fprintf(stderr, "Freezing TSO stack\n");
182 /* The stack freezing code assumes there's a closure pointer on
183 * the top of the stack. This isn't always the case with compiled
184 * code, so we have to push a dummy closure on the top which just
185 * returns to the next return address on the stack.
187 if (LOOKS_LIKE_GHC_INFO(*sp)) {
188 *(--sp) = (W_)&dummy_ret_closure;
192 int words = (stgCast(StgPtr,su) - stgCast(StgPtr,sp)) - 1;
194 StgAP_UPD* ap = stgCast(StgAP_UPD*,allocate(AP_sizeW(words)));
196 /* First build an AP_UPD consisting of the stack chunk above the
197 * current update frame, with the top word on the stack as the
202 /* if (words == 0) { -- optimisation
203 ap = stgCast(StgAP_UPD*,*stgCast(StgPtr*,sp)++);
206 ap->fun = stgCast(StgClosure*,*stgCast(StgPtr*,sp)++);
207 for(i=0; i < (nat)words; ++i) {
208 payloadWord(ap,i) = *sp++;
212 switch (get_itbl(su)->type) {
216 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
219 fprintf(stderr, "Updating ");
220 printPtr(stgCast(StgPtr,su->updatee));
221 fprintf(stderr, " with ");
222 printObj(stgCast(StgClosure*,ap));
225 /* Replace the updatee with an indirection - happily
226 * this will also wake up any threads currently
227 * waiting on the result.
229 UPD_IND(su->updatee,ap); /* revert the black hole */
231 sp += sizeofW(StgUpdateFrame) -1;
232 sp[0] = stgCast(StgWord,ap); /* push onto stack */
238 StgCatchFrame *cf = (StgCatchFrame *)su;
241 /* We want a PAP, not an AP_UPD. Fortunately, the
244 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
246 /* now build o = FUN(catch,ap,handler) */
247 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+2));
248 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
249 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
250 payloadCPtr(o,1) = cf->handler;
253 fprintf(stderr, "Built ");
254 printObj(stgCast(StgClosure*,o));
257 /* pop the old handler and put o on the stack */
259 sp += sizeofW(StgCatchFrame) - 1;
266 StgSeqFrame *sf = (StgSeqFrame *)su;
269 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
271 /* now build o = FUN(seq,ap) */
272 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+1));
273 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
274 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
277 fprintf(stderr, "Built ");
278 printObj(stgCast(StgClosure*,o));
281 /* pop the old handler and put o on the stack */
283 sp += sizeofW(StgSeqFrame) - 1;
297 void initScheduler(void)
299 run_queue_hd = END_TSO_QUEUE;
300 run_queue_tl = END_TSO_QUEUE;
301 blocked_queue_hd = END_TSO_QUEUE;
302 blocked_queue_tl = END_TSO_QUEUE;
303 ccalling_threads = END_TSO_QUEUE;
304 next_main_thread = 0;
309 enteredCAFs = END_CAF_LIST;
312 /* -----------------------------------------------------------------------------
313 Main scheduling loop.
315 We use round-robin scheduling, each thread returning to the
316 scheduler loop when one of these conditions is detected:
320 * timer expires (thread yields)
323 -------------------------------------------------------------------------- */
325 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
328 StgThreadReturnCode ret;
332 /* Return value is NULL by default, it is only filled in if the
333 * main thread completes successfully.
335 if (ret_val) { *ret_val = NULL; }
337 /* Save away a pointer to the main thread so that we can keep track
338 * of it should a garbage collection happen. We keep a stack of
339 * main threads in order to support scheduler re-entry. We can't
340 * use the normal TSO linkage for this stack, because the main TSO
341 * may need to be linked onto other queues.
343 main_threads[next_main_thread] = main;
344 MainTSO = &main_threads[next_main_thread];
347 fprintf(stderr, "Scheduler entered: nesting = %d\n",
350 /* Are we being re-entered?
352 if (CurrentTSO != NULL) {
353 /* This happens when a _ccall_gc from Haskell ends up re-entering
356 * Block the current thread (put it on the ccalling_queue) and
357 * continue executing. The calling thread better have stashed
358 * away its state properly and left its stack with a proper stack
361 threadPaused(CurrentTSO);
362 CurrentTSO->link = ccalling_threads;
363 ccalling_threads = CurrentTSO;
364 in_ccall_gc = rtsTrue;
366 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
369 in_ccall_gc = rtsFalse;
372 /* Take a thread from the run queue.
375 if (t != END_TSO_QUEUE) {
376 run_queue_hd = t->link;
377 t->link = END_TSO_QUEUE;
378 if (run_queue_hd == END_TSO_QUEUE) {
379 run_queue_tl = END_TSO_QUEUE;
383 while (t != END_TSO_QUEUE) {
386 /* If we have more threads on the run queue, set up a context
387 * switch at some point in the future.
389 if (run_queue_hd != END_TSO_QUEUE) {
394 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
396 /* Be friendly to the storage manager: we're about to *run* this
397 * thread, so we better make sure the TSO is mutable.
399 recordMutable((StgMutClosure *)t);
401 /* Run the current thread */
402 switch (t->whatNext) {
405 /* thread already killed. Drop it and carry on. */
408 ret = StgRun((StgFunPtr) stg_enterStackTop);
411 ret = StgRun((StgFunPtr) stg_returnToStackTop);
413 case ThreadEnterHugs:
416 IF_DEBUG(scheduler,belch("entering Hugs"));
418 /* CHECK_SENSIBLE_REGS(); */
420 StgClosure* c = stgCast(StgClosure*,*Sp);
428 barf("Panic: entered a BCO but no bytecode interpreter in this build");
431 barf("schedule: invalid whatNext field");
434 /* We may have garbage collected while running the thread
435 * (eg. something nefarious like _ccall_GC_ performGC), and hence
436 * CurrentTSO may have moved. Update t to reflect this.
441 /* Costs for the scheduler are assigned to CCS_SYSTEM */
449 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
451 PUSH_ON_RUN_QUEUE(t);
452 GarbageCollect(GetRoots);
456 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
459 /* enlarge the stack */
460 StgTSO *new_t = threadStackOverflow(t);
462 /* This TSO has moved, so update any pointers to it from the
463 * main thread stack. It better not be on any other queues...
466 for (i = 0; i < next_main_thread; i++) {
467 if (main_threads[i] == t) {
468 main_threads[i] = new_t;
473 PUSH_ON_RUN_QUEUE(t);
478 if (t->whatNext == ThreadEnterHugs) {
479 /* ToDo: or maybe a timer expired when we were in Hugs?
480 * or maybe someone hit ctrl-C
482 belch("Thread %ld stopped to switch to Hugs\n", t->id);
484 belch("Thread %ld stopped, timer expired\n", t->id);
489 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
491 while (run_queue_hd != END_TSO_QUEUE) {
492 run_queue_hd = t->link;
495 run_queue_tl = END_TSO_QUEUE;
496 /* ToDo: should I do the same with blocked queues? */
500 /* Put the thread back on the run queue, at the end.
501 * t->link is already set to END_TSO_QUEUE.
503 ASSERT(t->link == END_TSO_QUEUE);
504 if (run_queue_tl != END_TSO_QUEUE) {
505 ASSERT(get_itbl(run_queue_tl)->type == TSO);
506 if (run_queue_hd == run_queue_tl) {
507 run_queue_hd->link = t;
510 run_queue_tl->link = t;
513 run_queue_hd = run_queue_tl = t;
518 IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
520 /* assume the thread has put itself on some blocked queue
526 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
528 t->whatNext = ThreadComplete;
532 barf("schedule: invalid thread return code");
535 /* check for signals each time around the scheduler */
536 if (signals_pending()) {
537 start_signal_handlers();
540 /* If our main thread has finished or been killed, return.
541 * If we were re-entered as a result of a _ccall_gc, then
542 * pop the blocked thread off the ccalling_threads stack back
545 if ((*MainTSO)->whatNext == ThreadComplete
546 || (*MainTSO)->whatNext == ThreadKilled) {
549 CurrentTSO = ccalling_threads;
550 ccalling_threads = ccalling_threads->link;
551 /* remember to stub the link field of CurrentTSO */
552 CurrentTSO->link = END_TSO_QUEUE;
554 if ((*MainTSO)->whatNext == ThreadComplete) {
555 /* we finished successfully, fill in the return value */
556 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
565 if (t != END_TSO_QUEUE) {
566 run_queue_hd = t->link;
567 t->link = END_TSO_QUEUE;
568 if (run_queue_hd == END_TSO_QUEUE) {
569 run_queue_tl = END_TSO_QUEUE;
574 if (blocked_queue_hd != END_TSO_QUEUE) {
581 /* -----------------------------------------------------------------------------
582 Where are the roots that we know about?
584 - all the threads on the runnable queue
585 - all the threads on the blocked queue
586 - all the thread currently executing a _ccall_GC
587 - all the "main threads"
589 -------------------------------------------------------------------------- */
591 static void GetRoots(void)
595 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
596 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
598 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
599 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
601 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
603 for (i = 0; i < next_main_thread; i++) {
604 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
607 markStablePtrTable();
610 /* -----------------------------------------------------------------------------
613 This is the interface to the garbage collector from Haskell land.
614 We provide this so that external C code can allocate and garbage
615 collect when called from Haskell via _ccall_GC.
617 It might be useful to provide an interface whereby the programmer
618 can specify more roots (ToDo).
619 -------------------------------------------------------------------------- */
621 void (*extra_roots)(void);
626 GarbageCollect(GetRoots);
632 GetRoots(); /* the scheduler's roots */
633 extra_roots(); /* the user's roots */
637 performGCWithRoots(void (*get_roots)(void))
639 extra_roots = get_roots;
641 GarbageCollect(AllRoots);
644 /* -----------------------------------------------------------------------------
647 If the thread has reached its maximum stack size,
648 then bomb out. Otherwise relocate the TSO into a larger chunk of
649 memory and adjust its stack size appropriately.
650 -------------------------------------------------------------------------- */
653 threadStackOverflow(StgTSO *tso)
655 nat new_stack_size, new_tso_size, diff, stack_words;
659 if (tso->stack_size >= tso->max_stack_size) {
660 /* ToDo: just kill this thread? */
662 /* If we're debugging, just print out the top of the stack */
663 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
666 stackOverflow(tso->max_stack_size);
669 /* Try to double the current stack size. If that takes us over the
670 * maximum stack size for this thread, then use the maximum instead.
671 * Finally round up so the TSO ends up as a whole number of blocks.
673 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
674 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
675 TSO_STRUCT_SIZE)/sizeof(W_);
676 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
678 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
680 dest = (StgTSO *)allocate(new_tso_size);
682 /* copy the TSO block and the old stack into the new area */
683 memcpy(dest,tso,TSO_STRUCT_SIZE);
684 stack_words = tso->stack + tso->stack_size - tso->sp;
685 new_sp = (P_)dest + new_tso_size - stack_words;
686 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
688 /* relocate the stack pointers... */
689 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
690 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
692 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
693 dest->stack_size = new_stack_size;
695 /* and relocate the update frame list */
696 relocate_TSO(tso, dest);
698 /* Mark the old one as dead so we don't try to scavenge it during
699 * garbage collection (the TSO will likely be on a mutables list in
700 * some generation, but it'll get collected soon enough).
702 tso->whatNext = ThreadKilled;
703 dest->mut_link = NULL;
705 IF_DEBUG(sanity,checkTSO(tso));
707 IF_DEBUG(scheduler,printTSO(dest));
709 if (tso == MainTSO) { /* hack */
715 /* -----------------------------------------------------------------------------
716 Wake up a queue that was blocked on some resource (usually a
717 computation in progress).
718 -------------------------------------------------------------------------- */
720 void awaken_blocked_queue(StgTSO *q)
724 while (q != END_TSO_QUEUE) {
725 ASSERT(get_itbl(q)->type == TSO);
728 PUSH_ON_RUN_QUEUE(tso);
729 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
733 /* -----------------------------------------------------------------------------
735 - usually called inside a signal handler so it mustn't do anything fancy.
736 -------------------------------------------------------------------------- */
738 void interruptStgRts(void)