1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.8 1999/02/03 16:32:47 simonm Exp $
6 * ---------------------------------------------------------------------------*/
14 #include "StgStartup.h"
18 #include "StgMiscClosures.h"
20 #include "Evaluator.h"
24 #include "Profiling.h"
27 StgTSO *run_queue_hd, *run_queue_tl;
28 StgTSO *blocked_queue_hd, *blocked_queue_tl;
29 StgTSO *ccalling_threads;
31 #define MAX_SCHEDULE_NESTING 256
33 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
35 static void GetRoots(void);
36 static StgTSO *threadStackOverflow(StgTSO *tso);
38 /* flag set by signal handler to precipitate a context switch */
40 /* if this flag is set as well, give up execution */
41 static nat interrupted;
43 /* Next thread ID to allocate */
44 StgThreadID next_thread_id = 1;
47 * Pointers to the state of the current thread.
48 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
49 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
52 StgRegTable MainRegTable;
55 * The thread state for the main thread.
59 /* The smallest stack size that makes any sense is:
60 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
61 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
62 * + 1 (the realworld token for an IO thread)
63 * + 1 (the closure to enter)
65 * A thread with this stack will bomb immediately with a stack
66 * overflow, which will increase its stack size.
69 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
71 /* -----------------------------------------------------------------------------
74 The new thread starts with the given stack size. Before the
75 scheduler can run, however, this thread needs to have a closure
76 (and possibly some arguments) pushed on its stack. See
77 pushClosure() in Schedule.h.
79 createGenThread() and createIOThread() (in Schedule.h) are
80 convenient packaged versions of this function.
81 -------------------------------------------------------------------------- */
84 createThread(nat stack_size)
88 /* catch ridiculously small stack sizes */
89 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
90 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
93 tso = (StgTSO *)allocate(stack_size);
94 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
96 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
101 initThread(StgTSO *tso, nat stack_size)
103 SET_INFO(tso,&TSO_info);
104 tso->whatNext = ThreadEnterGHC;
105 tso->state = tso_state_runnable;
106 tso->id = next_thread_id++;
108 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
109 tso->stack_size = stack_size;
110 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
112 tso->sp = (P_)&(tso->stack) + stack_size;
115 tso->prof.CCCS = CCS_MAIN;
118 /* put a stop frame on the stack */
119 tso->sp -= sizeofW(StgStopFrame);
120 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
121 tso->su = (StgUpdateFrame*)tso->sp;
123 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
124 tso->id, tso->stack_size));
126 /* Put the new thread on the head of the runnable queue.
127 * The caller of createThread better push an appropriate closure
128 * on this thread's stack before the scheduler is invoked.
130 tso->link = run_queue_hd;
132 if (run_queue_tl == END_TSO_QUEUE) {
136 IF_DEBUG(scheduler,printTSO(tso));
139 /* -----------------------------------------------------------------------------
140 Delete a thread - reverting all blackholes to (something
141 equivalent to) their former state.
143 We create an AP_UPD for every UpdateFrame on the stack.
144 Entering one of these AP_UPDs pushes everything from the corresponding
145 update frame upwards onto the stack. (Actually, it pushes everything
146 up to the next update frame plus a pointer to the next AP_UPD
147 object. Entering the next AP_UPD object pushes more onto the
148 stack until we reach the last AP_UPD object - at which point
149 the stack should look exactly as it did when we killed the TSO
150 and we can continue execution by entering the closure on top of
152 -------------------------------------------------------------------------- */
154 void deleteThread(StgTSO *tso)
156 StgUpdateFrame* su = tso->su;
159 /* Thread already dead? */
160 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
164 IF_DEBUG(scheduler, belch("Killing thread %ld.", tso->id));
166 tso->whatNext = ThreadKilled; /* changed to ThreadComplete in schedule() */
167 tso->link = END_TSO_QUEUE; /* Just to be on the safe side... */
169 /* Threads that finish normally leave Su pointing to the word
170 * beyond the top of the stack, and Sp pointing to the last word
171 * on the stack, which is the return value of the thread.
173 if ((P_)tso->su >= tso->stack + tso->stack_size
174 || get_itbl(tso->su)->type == STOP_FRAME) {
179 fprintf(stderr, "Freezing TSO stack\n");
183 /* The stack freezing code assumes there's a closure pointer on
184 * the top of the stack. This isn't always the case with compiled
185 * code, so we have to push a dummy closure on the top which just
186 * returns to the next return address on the stack.
188 if (LOOKS_LIKE_GHC_INFO(*sp)) {
189 *(--sp) = (W_)&dummy_ret_closure;
193 int words = (stgCast(StgPtr,su) - stgCast(StgPtr,sp)) - 1;
195 StgAP_UPD* ap = stgCast(StgAP_UPD*,allocate(AP_sizeW(words)));
196 TICK_ALLOC_THK(words+1,0);
198 /* First build an AP_UPD consisting of the stack chunk above the
199 * current update frame, with the top word on the stack as the
204 /* if (words == 0) { -- optimisation
205 ap = stgCast(StgAP_UPD*,*stgCast(StgPtr*,sp)++);
208 ap->fun = stgCast(StgClosure*,*stgCast(StgPtr*,sp)++);
209 for(i=0; i < (nat)words; ++i) {
210 payloadWord(ap,i) = *sp++;
214 switch (get_itbl(su)->type) {
218 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
221 fprintf(stderr, "Updating ");
222 printPtr(stgCast(StgPtr,su->updatee));
223 fprintf(stderr, " with ");
224 printObj(stgCast(StgClosure*,ap));
227 /* Replace the updatee with an indirection - happily
228 * this will also wake up any threads currently
229 * waiting on the result.
231 UPD_IND(su->updatee,ap); /* revert the black hole */
233 sp += sizeofW(StgUpdateFrame) -1;
234 sp[0] = stgCast(StgWord,ap); /* push onto stack */
240 StgCatchFrame *cf = (StgCatchFrame *)su;
243 /* We want a PAP, not an AP_UPD. Fortunately, the
246 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
248 /* now build o = FUN(catch,ap,handler) */
249 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+2));
251 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
252 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
253 payloadCPtr(o,1) = cf->handler;
256 fprintf(stderr, "Built ");
257 printObj(stgCast(StgClosure*,o));
260 /* pop the old handler and put o on the stack */
262 sp += sizeofW(StgCatchFrame) - 1;
269 StgSeqFrame *sf = (StgSeqFrame *)su;
272 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
274 /* now build o = FUN(seq,ap) */
275 o = stgCast(StgClosure*, allocate(sizeofW(StgClosure)+1));
277 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
278 payloadCPtr(o,0) = stgCast(StgClosure*,ap);
281 fprintf(stderr, "Built ");
282 printObj(stgCast(StgClosure*,o));
285 /* pop the old handler and put o on the stack */
287 sp += sizeofW(StgSeqFrame) - 1;
301 void initScheduler(void)
303 run_queue_hd = END_TSO_QUEUE;
304 run_queue_tl = END_TSO_QUEUE;
305 blocked_queue_hd = END_TSO_QUEUE;
306 blocked_queue_tl = END_TSO_QUEUE;
307 ccalling_threads = END_TSO_QUEUE;
308 next_main_thread = 0;
313 enteredCAFs = END_CAF_LIST;
316 /* -----------------------------------------------------------------------------
317 Main scheduling loop.
319 We use round-robin scheduling, each thread returning to the
320 scheduler loop when one of these conditions is detected:
324 * timer expires (thread yields)
327 -------------------------------------------------------------------------- */
329 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
332 StgThreadReturnCode ret;
336 /* Return value is NULL by default, it is only filled in if the
337 * main thread completes successfully.
339 if (ret_val) { *ret_val = NULL; }
341 /* Save away a pointer to the main thread so that we can keep track
342 * of it should a garbage collection happen. We keep a stack of
343 * main threads in order to support scheduler re-entry. We can't
344 * use the normal TSO linkage for this stack, because the main TSO
345 * may need to be linked onto other queues.
347 main_threads[next_main_thread] = main;
348 MainTSO = &main_threads[next_main_thread];
351 fprintf(stderr, "Scheduler entered: nesting = %d\n",
354 /* Are we being re-entered?
356 if (CurrentTSO != NULL) {
357 /* This happens when a _ccall_gc from Haskell ends up re-entering
360 * Block the current thread (put it on the ccalling_queue) and
361 * continue executing. The calling thread better have stashed
362 * away its state properly and left its stack with a proper stack
365 threadPaused(CurrentTSO);
366 CurrentTSO->link = ccalling_threads;
367 ccalling_threads = CurrentTSO;
368 in_ccall_gc = rtsTrue;
370 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
373 in_ccall_gc = rtsFalse;
376 /* Take a thread from the run queue.
379 if (t != END_TSO_QUEUE) {
380 run_queue_hd = t->link;
381 t->link = END_TSO_QUEUE;
382 if (run_queue_hd == END_TSO_QUEUE) {
383 run_queue_tl = END_TSO_QUEUE;
387 while (t != END_TSO_QUEUE) {
390 /* If we have more threads on the run queue, set up a context
391 * switch at some point in the future.
393 if (run_queue_hd != END_TSO_QUEUE) {
398 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
400 /* Be friendly to the storage manager: we're about to *run* this
401 * thread, so we better make sure the TSO is mutable.
403 if (t->mut_link == NULL) {
404 recordMutable((StgMutClosure *)t);
407 /* Run the current thread */
408 switch (t->whatNext) {
411 /* thread already killed. Drop it and carry on. */
414 ret = StgRun((StgFunPtr) stg_enterStackTop);
417 ret = StgRun((StgFunPtr) stg_returnToStackTop);
419 case ThreadEnterHugs:
422 IF_DEBUG(scheduler,belch("entering Hugs"));
424 /* CHECK_SENSIBLE_REGS(); */
426 StgClosure* c = stgCast(StgClosure*,*Sp);
434 barf("Panic: entered a BCO but no bytecode interpreter in this build");
437 barf("schedule: invalid whatNext field");
440 /* We may have garbage collected while running the thread
441 * (eg. something nefarious like _ccall_GC_ performGC), and hence
442 * CurrentTSO may have moved. Update t to reflect this.
447 /* Costs for the scheduler are assigned to CCS_SYSTEM */
455 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
457 PUSH_ON_RUN_QUEUE(t);
458 GarbageCollect(GetRoots);
462 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
465 /* enlarge the stack */
466 StgTSO *new_t = threadStackOverflow(t);
468 /* This TSO has moved, so update any pointers to it from the
469 * main thread stack. It better not be on any other queues...
472 for (i = 0; i < next_main_thread; i++) {
473 if (main_threads[i] == t) {
474 main_threads[i] = new_t;
479 PUSH_ON_RUN_QUEUE(t);
484 if (t->whatNext == ThreadEnterHugs) {
485 /* ToDo: or maybe a timer expired when we were in Hugs?
486 * or maybe someone hit ctrl-C
488 belch("Thread %ld stopped to switch to Hugs\n", t->id);
490 belch("Thread %ld stopped, timer expired\n", t->id);
495 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
497 while (run_queue_hd != END_TSO_QUEUE) {
498 run_queue_hd = t->link;
501 run_queue_tl = END_TSO_QUEUE;
502 /* ToDo: should I do the same with blocked queues? */
506 /* Put the thread back on the run queue, at the end.
507 * t->link is already set to END_TSO_QUEUE.
509 ASSERT(t->link == END_TSO_QUEUE);
510 if (run_queue_tl != END_TSO_QUEUE) {
511 ASSERT(get_itbl(run_queue_tl)->type == TSO);
512 if (run_queue_hd == run_queue_tl) {
513 run_queue_hd->link = t;
516 run_queue_tl->link = t;
519 run_queue_hd = run_queue_tl = t;
524 IF_DEBUG(scheduler,belch("Thread %ld stopped, blocking\n", t->id));
526 /* assume the thread has put itself on some blocked queue
532 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
534 t->whatNext = ThreadComplete;
538 barf("schedule: invalid thread return code");
541 /* check for signals each time around the scheduler */
542 if (signals_pending()) {
543 start_signal_handlers();
546 /* If our main thread has finished or been killed, return.
547 * If we were re-entered as a result of a _ccall_gc, then
548 * pop the blocked thread off the ccalling_threads stack back
551 if ((*MainTSO)->whatNext == ThreadComplete
552 || (*MainTSO)->whatNext == ThreadKilled) {
555 CurrentTSO = ccalling_threads;
556 ccalling_threads = ccalling_threads->link;
557 /* remember to stub the link field of CurrentTSO */
558 CurrentTSO->link = END_TSO_QUEUE;
560 if ((*MainTSO)->whatNext == ThreadComplete) {
561 /* we finished successfully, fill in the return value */
562 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
571 if (t != END_TSO_QUEUE) {
572 run_queue_hd = t->link;
573 t->link = END_TSO_QUEUE;
574 if (run_queue_hd == END_TSO_QUEUE) {
575 run_queue_tl = END_TSO_QUEUE;
580 if (blocked_queue_hd != END_TSO_QUEUE) {
587 /* -----------------------------------------------------------------------------
588 Where are the roots that we know about?
590 - all the threads on the runnable queue
591 - all the threads on the blocked queue
592 - all the thread currently executing a _ccall_GC
593 - all the "main threads"
595 -------------------------------------------------------------------------- */
597 static void GetRoots(void)
601 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
602 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
604 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
605 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
607 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
609 for (i = 0; i < next_main_thread; i++) {
610 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
614 /* -----------------------------------------------------------------------------
617 This is the interface to the garbage collector from Haskell land.
618 We provide this so that external C code can allocate and garbage
619 collect when called from Haskell via _ccall_GC.
621 It might be useful to provide an interface whereby the programmer
622 can specify more roots (ToDo).
623 -------------------------------------------------------------------------- */
625 void (*extra_roots)(void);
630 GarbageCollect(GetRoots);
636 GetRoots(); /* the scheduler's roots */
637 extra_roots(); /* the user's roots */
641 performGCWithRoots(void (*get_roots)(void))
643 extra_roots = get_roots;
645 GarbageCollect(AllRoots);
648 /* -----------------------------------------------------------------------------
651 If the thread has reached its maximum stack size,
652 then bomb out. Otherwise relocate the TSO into a larger chunk of
653 memory and adjust its stack size appropriately.
654 -------------------------------------------------------------------------- */
657 threadStackOverflow(StgTSO *tso)
659 nat new_stack_size, new_tso_size, diff, stack_words;
663 if (tso->stack_size >= tso->max_stack_size) {
664 /* ToDo: just kill this thread? */
666 /* If we're debugging, just print out the top of the stack */
667 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
670 stackOverflow(tso->max_stack_size);
673 /* Try to double the current stack size. If that takes us over the
674 * maximum stack size for this thread, then use the maximum instead.
675 * Finally round up so the TSO ends up as a whole number of blocks.
677 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
678 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
679 TSO_STRUCT_SIZE)/sizeof(W_);
680 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
681 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
683 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
685 dest = (StgTSO *)allocate(new_tso_size);
686 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
688 /* copy the TSO block and the old stack into the new area */
689 memcpy(dest,tso,TSO_STRUCT_SIZE);
690 stack_words = tso->stack + tso->stack_size - tso->sp;
691 new_sp = (P_)dest + new_tso_size - stack_words;
692 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
694 /* relocate the stack pointers... */
695 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
696 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
698 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
699 dest->stack_size = new_stack_size;
701 /* and relocate the update frame list */
702 relocate_TSO(tso, dest);
704 /* Mark the old one as dead so we don't try to scavenge it during
705 * garbage collection (the TSO will likely be on a mutables list in
706 * some generation, but it'll get collected soon enough).
708 tso->whatNext = ThreadKilled;
709 dest->mut_link = NULL;
711 IF_DEBUG(sanity,checkTSO(tso));
713 IF_DEBUG(scheduler,printTSO(dest));
715 if (tso == MainTSO) { /* hack */
721 /* -----------------------------------------------------------------------------
722 Wake up a queue that was blocked on some resource (usually a
723 computation in progress).
724 -------------------------------------------------------------------------- */
726 void awaken_blocked_queue(StgTSO *q)
730 while (q != END_TSO_QUEUE) {
731 ASSERT(get_itbl(q)->type == TSO);
734 PUSH_ON_RUN_QUEUE(tso);
735 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
739 /* -----------------------------------------------------------------------------
741 - usually called inside a signal handler so it mustn't do anything fancy.
742 -------------------------------------------------------------------------- */
744 void interruptStgRts(void)