1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.13 1999/03/02 20:04:03 sof Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
16 #include "StgStartup.h"
20 #include "StgMiscClosures.h"
22 #include "Evaluator.h"
26 #include "Profiling.h"
33 #define IS_CHARLIKE_CLOSURE(p) ( stgCast(StgPtr,p) >= stgCast(StgPtr,CHARLIKE_closure) && stgCast(char*,p) <= (stgCast(char*,CHARLIKE_closure) + 255 * sizeof(StgIntCharlikeClosure)))
34 #define IS_INTLIKE_CLOSURE(p) ( stgCast(StgPtr,p) >= stgCast(StgPtr,INTLIKE_closure) && stgCast(char*,p) <= (stgCast(char*,INTLIKE_closure) + 32 * sizeof(StgIntCharlikeClosure)))
36 StgTSO *run_queue_hd, *run_queue_tl;
37 StgTSO *blocked_queue_hd, *blocked_queue_tl;
38 StgTSO *ccalling_threads;
40 #define MAX_SCHEDULE_NESTING 256
42 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
44 static void GetRoots(void);
45 static StgTSO *threadStackOverflow(StgTSO *tso);
47 /* flag set by signal handler to precipitate a context switch */
49 /* if this flag is set as well, give up execution */
50 static nat interrupted;
52 /* Next thread ID to allocate */
53 StgThreadID next_thread_id = 1;
56 * Pointers to the state of the current thread.
57 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
58 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
61 StgRegTable MainRegTable;
64 * The thread state for the main thread.
68 /* The smallest stack size that makes any sense is:
69 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
70 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
71 * + 1 (the realworld token for an IO thread)
72 * + 1 (the closure to enter)
74 * A thread with this stack will bomb immediately with a stack
75 * overflow, which will increase its stack size.
78 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
80 /* -----------------------------------------------------------------------------
83 The new thread starts with the given stack size. Before the
84 scheduler can run, however, this thread needs to have a closure
85 (and possibly some arguments) pushed on its stack. See
86 pushClosure() in Schedule.h.
88 createGenThread() and createIOThread() (in Schedule.h) are
89 convenient packaged versions of this function.
90 -------------------------------------------------------------------------- */
93 createThread(nat stack_size)
97 /* catch ridiculously small stack sizes */
98 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
99 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
102 tso = (StgTSO *)allocate(stack_size);
103 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
105 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
110 initThread(StgTSO *tso, nat stack_size)
112 SET_INFO(tso,&TSO_info);
113 tso->whatNext = ThreadEnterGHC;
114 tso->state = tso_state_runnable;
115 tso->id = next_thread_id++;
117 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
118 tso->stack_size = stack_size;
119 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
121 tso->sp = (P_)&(tso->stack) + stack_size;
124 tso->prof.CCCS = CCS_MAIN;
127 /* put a stop frame on the stack */
128 tso->sp -= sizeofW(StgStopFrame);
129 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
130 tso->su = (StgUpdateFrame*)tso->sp;
132 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
133 tso->id, tso->stack_size));
135 /* Put the new thread on the head of the runnable queue.
136 * The caller of createThread better push an appropriate closure
137 * on this thread's stack before the scheduler is invoked.
139 tso->link = run_queue_hd;
141 if (run_queue_tl == END_TSO_QUEUE) {
145 IF_DEBUG(scheduler,printTSO(tso));
148 /* -----------------------------------------------------------------------------
149 Delete a thread - reverting all blackholes to (something
150 equivalent to) their former state.
152 We create an AP_UPD for every UpdateFrame on the stack.
153 Entering one of these AP_UPDs pushes everything from the corresponding
154 update frame upwards onto the stack. (Actually, it pushes everything
155 up to the next update frame plus a pointer to the next AP_UPD
156 object. Entering the next AP_UPD object pushes more onto the
157 stack until we reach the last AP_UPD object - at which point
158 the stack should look exactly as it did when we killed the TSO
159 and we can continue execution by entering the closure on top of
161 -------------------------------------------------------------------------- */
163 void deleteThread(StgTSO *tso)
165 StgUpdateFrame* su = tso->su;
168 /* Thread already dead? */
169 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
173 IF_DEBUG(scheduler, belch("Killing thread %ld.", tso->id));
175 tso->whatNext = ThreadKilled; /* changed to ThreadComplete in schedule() */
176 tso->link = END_TSO_QUEUE; /* Just to be on the safe side... */
178 /* Threads that finish normally leave Su pointing to the word
179 * beyond the top of the stack, and Sp pointing to the last word
180 * on the stack, which is the return value of the thread.
182 if ((P_)tso->su >= tso->stack + tso->stack_size
183 || get_itbl(tso->su)->type == STOP_FRAME) {
188 fprintf(stderr, "Freezing TSO stack\n");
192 /* The stack freezing code assumes there's a closure pointer on
193 * the top of the stack. This isn't always the case with compiled
194 * code, so we have to push a dummy closure on the top which just
195 * returns to the next return address on the stack.
197 if ( LOOKS_LIKE_GHC_INFO(*sp) ) {
198 *(--sp) = (W_)&dummy_ret_closure;
202 int words = (stgCast(StgPtr,su) - stgCast(StgPtr,sp)) - 1;
204 StgAP_UPD* ap = stgCast(StgAP_UPD*,allocate(AP_sizeW(words)));
205 TICK_ALLOC_THK(words+1,0);
207 /* First build an AP_UPD consisting of the stack chunk above the
208 * current update frame, with the top word on the stack as the
213 /* if (words == 0) { -- optimisation
214 ap = stgCast(StgAP_UPD*,*stgCast(StgPtr*,sp)++);
217 ap->fun = stgCast(StgClosure*,*stgCast(StgPtr*,sp)++);
218 for(i=0; i < (nat)words; ++i) {
219 payloadWord(ap,i) = *sp++;
223 switch (get_itbl(su)->type) {
227 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
230 fprintf(stderr, "Updating ");
231 printPtr(stgCast(StgPtr,su->updatee));
232 fprintf(stderr, " with ");
233 printObj(stgCast(StgClosure*,ap));
236 /* Replace the updatee with an indirection - happily
237 * this will also wake up any threads currently
238 * waiting on the result.
240 UPD_IND(su->updatee,ap); /* revert the black hole */
242 sp += sizeofW(StgUpdateFrame) -1;
243 sp[0] = stgCast(StgWord,ap); /* push onto stack */
249 StgCatchFrame *cf = (StgCatchFrame *)su;
252 /* We want a PAP, not an AP_UPD. Fortunately, the
255 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
257 /* now build o = FUN(catch,ap,handler) */
258 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+2));
260 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
261 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
262 payloadCPtr(o,1) = cf->handler;
265 fprintf(stderr, "Built ");
266 printObj(stgCast(StgClosure*,o));
269 /* pop the old handler and put o on the stack */
271 sp += sizeofW(StgCatchFrame) - 1;
278 StgSeqFrame *sf = (StgSeqFrame *)su;
281 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
283 /* now build o = FUN(seq,ap) */
284 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+1));
286 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
287 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
290 fprintf(stderr, "Built ");
291 printObj(stgCast(StgClosure*,o));
294 /* pop the old handler and put o on the stack */
296 sp += sizeofW(StgSeqFrame) - 1;
310 void initScheduler(void)
312 run_queue_hd = END_TSO_QUEUE;
313 run_queue_tl = END_TSO_QUEUE;
314 blocked_queue_hd = END_TSO_QUEUE;
315 blocked_queue_tl = END_TSO_QUEUE;
316 ccalling_threads = END_TSO_QUEUE;
317 next_main_thread = 0;
322 enteredCAFs = END_CAF_LIST;
325 /* -----------------------------------------------------------------------------
326 Main scheduling loop.
328 We use round-robin scheduling, each thread returning to the
329 scheduler loop when one of these conditions is detected:
333 * timer expires (thread yields)
336 -------------------------------------------------------------------------- */
338 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
341 StgThreadReturnCode ret;
345 /* Return value is NULL by default, it is only filled in if the
346 * main thread completes successfully.
348 if (ret_val) { *ret_val = NULL; }
350 /* Save away a pointer to the main thread so that we can keep track
351 * of it should a garbage collection happen. We keep a stack of
352 * main threads in order to support scheduler re-entry. We can't
353 * use the normal TSO linkage for this stack, because the main TSO
354 * may need to be linked onto other queues.
356 main_threads[next_main_thread] = main;
357 MainTSO = &main_threads[next_main_thread];
360 fprintf(stderr, "Scheduler entered: nesting = %d\n",
363 /* Are we being re-entered?
365 if (CurrentTSO != NULL) {
366 /* This happens when a _ccall_gc from Haskell ends up re-entering
369 * Block the current thread (put it on the ccalling_queue) and
370 * continue executing. The calling thread better have stashed
371 * away its state properly and left its stack with a proper stack
374 threadPaused(CurrentTSO);
375 CurrentTSO->link = ccalling_threads;
376 ccalling_threads = CurrentTSO;
377 in_ccall_gc = rtsTrue;
379 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
382 in_ccall_gc = rtsFalse;
385 /* Take a thread from the run queue.
388 if (t != END_TSO_QUEUE) {
389 run_queue_hd = t->link;
390 t->link = END_TSO_QUEUE;
391 if (run_queue_hd == END_TSO_QUEUE) {
392 run_queue_tl = END_TSO_QUEUE;
396 while (t != END_TSO_QUEUE) {
399 /* If we have more threads on the run queue, set up a context
400 * switch at some point in the future.
402 if (run_queue_hd != END_TSO_QUEUE) {
407 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
409 /* Be friendly to the storage manager: we're about to *run* this
410 * thread, so we better make sure the TSO is mutable.
412 if (t->mut_link == NULL) {
413 recordMutable((StgMutClosure *)t);
416 /* Run the current thread */
417 switch (t->whatNext) {
420 /* thread already killed. Drop it and carry on. */
423 ret = StgRun((StgFunPtr) stg_enterStackTop);
426 ret = StgRun((StgFunPtr) stg_returnToStackTop);
428 case ThreadEnterHugs:
431 IF_DEBUG(scheduler,belch("entering Hugs"));
433 /* CHECK_SENSIBLE_REGS(); */
435 StgClosure* c = stgCast(StgClosure*,*Sp);
443 barf("Panic: entered a BCO but no bytecode interpreter in this build");
446 barf("schedule: invalid whatNext field");
449 /* We may have garbage collected while running the thread
450 * (eg. something nefarious like _ccall_GC_ performGC), and hence
451 * CurrentTSO may have moved. Update t to reflect this.
456 /* Costs for the scheduler are assigned to CCS_SYSTEM */
464 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
466 PUSH_ON_RUN_QUEUE(t);
467 GarbageCollect(GetRoots);
471 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
474 /* enlarge the stack */
475 StgTSO *new_t = threadStackOverflow(t);
477 /* This TSO has moved, so update any pointers to it from the
478 * main thread stack. It better not be on any other queues...
481 for (i = 0; i < next_main_thread; i++) {
482 if (main_threads[i] == t) {
483 main_threads[i] = new_t;
488 PUSH_ON_RUN_QUEUE(t);
493 if (t->whatNext == ThreadEnterHugs) {
494 /* ToDo: or maybe a timer expired when we were in Hugs?
495 * or maybe someone hit ctrl-C
497 belch("Thread %ld stopped to switch to Hugs\n", t->id);
499 belch("Thread %ld stopped, timer expired\n", t->id);
504 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
506 while (run_queue_hd != END_TSO_QUEUE) {
507 run_queue_hd = t->link;
510 run_queue_tl = END_TSO_QUEUE;
511 /* ToDo: should I do the same with blocked queues? */
515 /* Put the thread back on the run queue, at the end.
516 * t->link is already set to END_TSO_QUEUE.
518 ASSERT(t->link == END_TSO_QUEUE);
519 if (run_queue_tl == END_TSO_QUEUE) {
520 run_queue_hd = run_queue_tl = t;
522 ASSERT(get_itbl(run_queue_tl)->type == TSO);
523 if (run_queue_hd == run_queue_tl) {
524 run_queue_hd->link = t;
527 run_queue_tl->link = t;
534 IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
536 /* assume the thread has put itself on some blocked queue
542 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
544 t->whatNext = ThreadComplete;
548 barf("schedule: invalid thread return code");
551 /* check for signals each time around the scheduler */
553 if (signals_pending()) {
554 start_signal_handlers();
557 /* If our main thread has finished or been killed, return.
558 * If we were re-entered as a result of a _ccall_gc, then
559 * pop the blocked thread off the ccalling_threads stack back
562 if ((*MainTSO)->whatNext == ThreadComplete
563 || (*MainTSO)->whatNext == ThreadKilled) {
566 CurrentTSO = ccalling_threads;
567 ccalling_threads = ccalling_threads->link;
568 /* remember to stub the link field of CurrentTSO */
569 CurrentTSO->link = END_TSO_QUEUE;
571 if ((*MainTSO)->whatNext == ThreadComplete) {
572 /* we finished successfully, fill in the return value */
573 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
582 if (t != END_TSO_QUEUE) {
583 run_queue_hd = t->link;
584 t->link = END_TSO_QUEUE;
585 if (run_queue_hd == END_TSO_QUEUE) {
586 run_queue_tl = END_TSO_QUEUE;
591 if (blocked_queue_hd != END_TSO_QUEUE) {
598 /* -----------------------------------------------------------------------------
599 Where are the roots that we know about?
601 - all the threads on the runnable queue
602 - all the threads on the blocked queue
603 - all the thread currently executing a _ccall_GC
604 - all the "main threads"
606 -------------------------------------------------------------------------- */
608 static void GetRoots(void)
612 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
613 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
615 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
616 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
618 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
620 for (i = 0; i < next_main_thread; i++) {
621 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
625 /* -----------------------------------------------------------------------------
628 This is the interface to the garbage collector from Haskell land.
629 We provide this so that external C code can allocate and garbage
630 collect when called from Haskell via _ccall_GC.
632 It might be useful to provide an interface whereby the programmer
633 can specify more roots (ToDo).
634 -------------------------------------------------------------------------- */
636 void (*extra_roots)(void);
641 GarbageCollect(GetRoots);
647 GetRoots(); /* the scheduler's roots */
648 extra_roots(); /* the user's roots */
652 performGCWithRoots(void (*get_roots)(void))
654 extra_roots = get_roots;
656 GarbageCollect(AllRoots);
659 /* -----------------------------------------------------------------------------
662 If the thread has reached its maximum stack size,
663 then bomb out. Otherwise relocate the TSO into a larger chunk of
664 memory and adjust its stack size appropriately.
665 -------------------------------------------------------------------------- */
668 threadStackOverflow(StgTSO *tso)
670 nat new_stack_size, new_tso_size, diff, stack_words;
674 if (tso->stack_size >= tso->max_stack_size) {
675 /* ToDo: just kill this thread? */
677 /* If we're debugging, just print out the top of the stack */
678 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
681 stackOverflow(tso->max_stack_size);
684 /* Try to double the current stack size. If that takes us over the
685 * maximum stack size for this thread, then use the maximum instead.
686 * Finally round up so the TSO ends up as a whole number of blocks.
688 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
689 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
690 TSO_STRUCT_SIZE)/sizeof(W_);
691 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
692 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
694 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
696 dest = (StgTSO *)allocate(new_tso_size);
697 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
699 /* copy the TSO block and the old stack into the new area */
700 memcpy(dest,tso,TSO_STRUCT_SIZE);
701 stack_words = tso->stack + tso->stack_size - tso->sp;
702 new_sp = (P_)dest + new_tso_size - stack_words;
703 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
705 /* relocate the stack pointers... */
706 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
707 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
709 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
710 dest->stack_size = new_stack_size;
712 /* and relocate the update frame list */
713 relocate_TSO(tso, dest);
715 /* Mark the old one as dead so we don't try to scavenge it during
716 * garbage collection (the TSO will likely be on a mutables list in
717 * some generation, but it'll get collected soon enough).
719 tso->whatNext = ThreadKilled;
720 dest->mut_link = NULL;
722 IF_DEBUG(sanity,checkTSO(tso));
724 IF_DEBUG(scheduler,printTSO(dest));
726 if (tso == MainTSO) { /* hack */
732 /* -----------------------------------------------------------------------------
733 Wake up a queue that was blocked on some resource (usually a
734 computation in progress).
735 -------------------------------------------------------------------------- */
737 void awaken_blocked_queue(StgTSO *q)
741 while (q != END_TSO_QUEUE) {
742 ASSERT(get_itbl(q)->type == TSO);
745 PUSH_ON_RUN_QUEUE(tso);
746 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
750 /* -----------------------------------------------------------------------------
752 - usually called inside a signal handler so it mustn't do anything fancy.
753 -------------------------------------------------------------------------- */
755 void interruptStgRts(void)