1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.c,v 1.25 1999/09/10 11:11:51 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
8 * ---------------------------------------------------------------------------*/
16 #include "StgStartup.h"
20 #include "StgMiscClosures.h"
22 #include "Evaluator.h"
26 #include "Profiling.h"
29 StgTSO *run_queue_hd, *run_queue_tl;
30 StgTSO *blocked_queue_hd, *blocked_queue_tl;
31 StgTSO *ccalling_threads;
33 #define MAX_SCHEDULE_NESTING 256
35 StgTSO *main_threads[MAX_SCHEDULE_NESTING];
37 static void GetRoots(void);
38 static StgTSO *threadStackOverflow(StgTSO *tso);
40 /* flag set by signal handler to precipitate a context switch */
42 /* if this flag is set as well, give up execution */
43 static nat interrupted;
45 /* Next thread ID to allocate */
46 StgThreadID next_thread_id = 1;
49 * Pointers to the state of the current thread.
50 * Rule of thumb: if CurrentTSO != NULL, then we're running a Haskell
51 * thread. If CurrentTSO == NULL, then we're at the scheduler level.
54 StgRegTable MainRegTable;
57 * The thread state for the main thread.
61 /* The smallest stack size that makes any sense is:
62 * RESERVED_STACK_WORDS (so we can get back from the stack overflow)
63 * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame)
64 * + 1 (the realworld token for an IO thread)
65 * + 1 (the closure to enter)
67 * A thread with this stack will bomb immediately with a stack
68 * overflow, which will increase its stack size.
71 #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2)
73 /* -----------------------------------------------------------------------------
75 * -------------------------------------------------------------------------- */
76 static void unblockThread(StgTSO *tso);
78 /* -----------------------------------------------------------------------------
79 * Comparing Thread ids.
81 * This is used from STG land in the implementation of the
82 * instances of Eq/Ord for ThreadIds.
83 * -------------------------------------------------------------------------- */
85 int cmp_thread(const StgTSO *tso1, const StgTSO *tso2)
87 StgThreadID id1 = tso1->id;
88 StgThreadID id2 = tso2->id;
90 if (id1 < id2) return (-1);
91 if (id1 > id2) return 1;
95 /* -----------------------------------------------------------------------------
98 The new thread starts with the given stack size. Before the
99 scheduler can run, however, this thread needs to have a closure
100 (and possibly some arguments) pushed on its stack. See
101 pushClosure() in Schedule.h.
103 createGenThread() and createIOThread() (in SchedAPI.h) are
104 convenient packaged versions of this function.
105 -------------------------------------------------------------------------- */
108 createThread(nat stack_size)
112 /* catch ridiculously small stack sizes */
113 if (stack_size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
114 stack_size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
117 tso = (StgTSO *)allocate(stack_size);
118 TICK_ALLOC_TSO(stack_size-sizeofW(StgTSO),0);
120 initThread(tso, stack_size - TSO_STRUCT_SIZEW);
125 initThread(StgTSO *tso, nat stack_size)
127 SET_INFO(tso,&TSO_info);
128 tso->whatNext = ThreadEnterGHC;
129 tso->id = next_thread_id++;
130 tso->why_blocked = NotBlocked;
132 tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
133 tso->stack_size = stack_size;
134 tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
136 tso->sp = (P_)&(tso->stack) + stack_size;
139 tso->prof.CCCS = CCS_MAIN;
142 /* put a stop frame on the stack */
143 tso->sp -= sizeofW(StgStopFrame);
144 SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_MAIN);
145 tso->su = (StgUpdateFrame*)tso->sp;
147 IF_DEBUG(scheduler,belch("Initialised thread %ld, stack size = %lx words\n",
148 tso->id, tso->stack_size));
150 /* Put the new thread on the head of the runnable queue.
151 * The caller of createThread better push an appropriate closure
152 * on this thread's stack before the scheduler is invoked.
154 tso->link = run_queue_hd;
156 if (run_queue_tl == END_TSO_QUEUE) {
160 IF_DEBUG(scheduler,printTSO(tso));
163 /* -----------------------------------------------------------------------------
166 * Initialise the scheduler. This resets all the queues - if the
167 * queues contained any threads, they'll be garbage collected at the
169 * -------------------------------------------------------------------------- */
171 void initScheduler(void)
173 run_queue_hd = END_TSO_QUEUE;
174 run_queue_tl = END_TSO_QUEUE;
175 blocked_queue_hd = END_TSO_QUEUE;
176 blocked_queue_tl = END_TSO_QUEUE;
177 ccalling_threads = END_TSO_QUEUE;
178 next_main_thread = 0;
183 enteredCAFs = END_CAF_LIST;
186 /* -----------------------------------------------------------------------------
187 Main scheduling loop.
189 We use round-robin scheduling, each thread returning to the
190 scheduler loop when one of these conditions is detected:
194 * timer expires (thread yields)
197 -------------------------------------------------------------------------- */
199 SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
202 StgThreadReturnCode ret;
206 /* Return value is NULL by default, it is only filled in if the
207 * main thread completes successfully.
209 if (ret_val) { *ret_val = NULL; }
211 /* Save away a pointer to the main thread so that we can keep track
212 * of it should a garbage collection happen. We keep a stack of
213 * main threads in order to support scheduler re-entry. We can't
214 * use the normal TSO linkage for this stack, because the main TSO
215 * may need to be linked onto other queues.
217 main_threads[next_main_thread] = main;
218 MainTSO = &main_threads[next_main_thread];
221 fprintf(stderr, "Scheduler entered: nesting = %d\n",
224 /* Are we being re-entered?
226 if (CurrentTSO != NULL) {
227 /* This happens when a _ccall_gc from Haskell ends up re-entering
230 * Block the current thread (put it on the ccalling_queue) and
231 * continue executing. The calling thread better have stashed
232 * away its state properly and left its stack with a proper stack
235 threadPaused(CurrentTSO);
236 CurrentTSO->link = ccalling_threads;
237 ccalling_threads = CurrentTSO;
238 in_ccall_gc = rtsTrue;
240 fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
243 in_ccall_gc = rtsFalse;
246 /* Take a thread from the run queue.
249 if (t != END_TSO_QUEUE) {
250 run_queue_hd = t->link;
251 t->link = END_TSO_QUEUE;
252 if (run_queue_hd == END_TSO_QUEUE) {
253 run_queue_tl = END_TSO_QUEUE;
257 while (t != END_TSO_QUEUE) {
260 /* If we have more threads on the run queue, set up a context
261 * switch at some point in the future.
263 if (run_queue_hd != END_TSO_QUEUE || blocked_queue_hd != END_TSO_QUEUE) {
268 IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
270 /* Be friendly to the storage manager: we're about to *run* this
271 * thread, so we better make sure the TSO is mutable.
273 if (t->mut_link == NULL) {
274 recordMutable((StgMutClosure *)t);
277 /* Run the current thread */
278 switch (t->whatNext) {
281 /* thread already killed. Drop it and carry on. */
284 ret = StgRun((StgFunPtr) stg_enterStackTop);
287 ret = StgRun((StgFunPtr) stg_returnToStackTop);
289 case ThreadEnterHugs:
292 IF_DEBUG(scheduler,belch("entering Hugs"));
294 /* CHECK_SENSIBLE_REGS(); */
296 StgClosure* c = (StgClosure *)Sp[0];
304 barf("Panic: entered a BCO but no bytecode interpreter in this build");
307 barf("schedule: invalid whatNext field");
310 /* We may have garbage collected while running the thread
311 * (eg. something nefarious like _ccall_GC_ performGC), and hence
312 * CurrentTSO may have moved. Update t to reflect this.
317 /* Costs for the scheduler are assigned to CCS_SYSTEM */
325 IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
327 PUSH_ON_RUN_QUEUE(t);
328 GarbageCollect(GetRoots);
332 IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
335 /* enlarge the stack */
336 StgTSO *new_t = threadStackOverflow(t);
338 /* This TSO has moved, so update any pointers to it from the
339 * main thread stack. It better not be on any other queues...
342 for (i = 0; i < next_main_thread; i++) {
343 if (main_threads[i] == t) {
344 main_threads[i] = new_t;
349 PUSH_ON_RUN_QUEUE(t);
354 if (t->whatNext == ThreadEnterHugs) {
355 /* ToDo: or maybe a timer expired when we were in Hugs?
356 * or maybe someone hit ctrl-C
358 belch("Thread %ld stopped to switch to Hugs\n", t->id);
360 belch("Thread %ld stopped, timer expired\n", t->id);
365 IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
367 while (run_queue_hd != END_TSO_QUEUE) {
368 run_queue_hd = t->link;
371 run_queue_tl = END_TSO_QUEUE;
372 /* ToDo: should I do the same with blocked queues? */
376 /* Put the thread back on the run queue, at the end.
377 * t->link is already set to END_TSO_QUEUE.
379 ASSERT(t->link == END_TSO_QUEUE);
380 if (run_queue_tl == END_TSO_QUEUE) {
381 run_queue_hd = run_queue_tl = t;
383 ASSERT(get_itbl(run_queue_tl)->type == TSO);
384 if (run_queue_hd == run_queue_tl) {
385 run_queue_hd->link = t;
388 run_queue_tl->link = t;
396 fprintf(stderr, "Thread %d stopped, ", t->id);
397 printThreadBlockage(t);
398 fprintf(stderr, "\n"));
400 /* assume the thread has put itself on some blocked queue
406 IF_DEBUG(scheduler,belch("Thread %ld finished\n", t->id));
407 t->whatNext = ThreadComplete;
411 barf("schedule: invalid thread return code");
414 /* check for signals each time around the scheduler */
416 if (signals_pending()) {
417 start_signal_handlers();
420 /* If our main thread has finished or been killed, return.
421 * If we were re-entered as a result of a _ccall_gc, then
422 * pop the blocked thread off the ccalling_threads stack back
425 if ((*MainTSO)->whatNext == ThreadComplete
426 || (*MainTSO)->whatNext == ThreadKilled) {
429 CurrentTSO = ccalling_threads;
430 ccalling_threads = ccalling_threads->link;
431 /* remember to stub the link field of CurrentTSO */
432 CurrentTSO->link = END_TSO_QUEUE;
434 if ((*MainTSO)->whatNext == ThreadComplete) {
435 /* we finished successfully, fill in the return value */
436 if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
444 /* Checked whether any waiting threads need to be woken up.
445 * If the run queue is empty, we can wait indefinitely for
446 * something to happen.
448 if (blocked_queue_hd != END_TSO_QUEUE) {
449 awaitEvent(run_queue_hd == END_TSO_QUEUE);
453 if (t != END_TSO_QUEUE) {
454 run_queue_hd = t->link;
455 t->link = END_TSO_QUEUE;
456 if (run_queue_hd == END_TSO_QUEUE) {
457 run_queue_tl = END_TSO_QUEUE;
462 /* If we got to here, then we ran out of threads to run, but the
463 * main thread hasn't finished yet. It must be blocked on an MVar
464 * or a black hole somewhere, so we return deadlock.
469 /* -----------------------------------------------------------------------------
470 Debugging: why is a thread blocked
471 -------------------------------------------------------------------------- */
474 void printThreadBlockage(StgTSO *tso)
476 switch (tso->why_blocked) {
478 fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd);
481 fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd);
484 fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay);
487 fprintf(stderr,"blocked on an MVar");
489 case BlockedOnBlackHole:
490 fprintf(stderr,"blocked on a black hole");
493 fprintf(stderr,"not blocked");
499 /* -----------------------------------------------------------------------------
500 Where are the roots that we know about?
502 - all the threads on the runnable queue
503 - all the threads on the blocked queue
504 - all the thread currently executing a _ccall_GC
505 - all the "main threads"
507 -------------------------------------------------------------------------- */
509 static void GetRoots(void)
513 run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd);
514 run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl);
516 blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd);
517 blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl);
519 ccalling_threads = (StgTSO *)MarkRoot((StgClosure *)ccalling_threads);
521 for (i = 0; i < next_main_thread; i++) {
522 main_threads[i] = (StgTSO *)MarkRoot((StgClosure *)main_threads[i]);
526 /* -----------------------------------------------------------------------------
529 This is the interface to the garbage collector from Haskell land.
530 We provide this so that external C code can allocate and garbage
531 collect when called from Haskell via _ccall_GC.
533 It might be useful to provide an interface whereby the programmer
534 can specify more roots (ToDo).
535 -------------------------------------------------------------------------- */
537 void (*extra_roots)(void);
542 GarbageCollect(GetRoots);
548 GetRoots(); /* the scheduler's roots */
549 extra_roots(); /* the user's roots */
553 performGCWithRoots(void (*get_roots)(void))
555 extra_roots = get_roots;
557 GarbageCollect(AllRoots);
560 /* -----------------------------------------------------------------------------
563 If the thread has reached its maximum stack size,
564 then bomb out. Otherwise relocate the TSO into a larger chunk of
565 memory and adjust its stack size appropriately.
566 -------------------------------------------------------------------------- */
569 threadStackOverflow(StgTSO *tso)
571 nat new_stack_size, new_tso_size, diff, stack_words;
575 if (tso->stack_size >= tso->max_stack_size) {
577 /* If we're debugging, just print out the top of the stack */
578 printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
582 fprintf(stderr, "fatal: stack overflow in Hugs; aborting\n" );
585 /* Send this thread the StackOverflow exception */
586 raiseAsync(tso, (StgClosure *)&stackOverflow_closure);
591 /* Try to double the current stack size. If that takes us over the
592 * maximum stack size for this thread, then use the maximum instead.
593 * Finally round up so the TSO ends up as a whole number of blocks.
595 new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
596 new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
597 TSO_STRUCT_SIZE)/sizeof(W_);
598 new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
599 new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
601 IF_DEBUG(scheduler, fprintf(stderr,"increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
603 dest = (StgTSO *)allocate(new_tso_size);
604 TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
606 /* copy the TSO block and the old stack into the new area */
607 memcpy(dest,tso,TSO_STRUCT_SIZE);
608 stack_words = tso->stack + tso->stack_size - tso->sp;
609 new_sp = (P_)dest + new_tso_size - stack_words;
610 memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
612 /* relocate the stack pointers... */
613 diff = (P_)new_sp - (P_)tso->sp; /* In *words* */
614 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
616 dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso);
617 dest->stack_size = new_stack_size;
619 /* and relocate the update frame list */
620 relocate_TSO(tso, dest);
622 /* Mark the old one as dead so we don't try to scavenge it during
623 * garbage collection (the TSO will likely be on a mutables list in
624 * some generation, but it'll get collected soon enough). It's
625 * important to set the sp and su values to just beyond the end of
626 * the stack, so we don't attempt to scavenge any part of the dead
629 tso->whatNext = ThreadKilled;
630 tso->sp = (P_)&(tso->stack[tso->stack_size]);
631 tso->su = (StgUpdateFrame *)tso->sp;
632 tso->why_blocked = NotBlocked;
633 dest->mut_link = NULL;
635 IF_DEBUG(sanity,checkTSO(tso));
637 IF_DEBUG(scheduler,printTSO(dest));
639 if (tso == MainTSO) { /* hack */
645 /* -----------------------------------------------------------------------------
646 Wake up a queue that was blocked on some resource.
647 -------------------------------------------------------------------------- */
649 StgTSO *unblockOne(StgTSO *tso)
653 ASSERT(get_itbl(tso)->type == TSO);
654 ASSERT(tso->why_blocked != NotBlocked);
655 tso->why_blocked = NotBlocked;
657 tso->link = END_TSO_QUEUE;
658 PUSH_ON_RUN_QUEUE(tso);
659 IF_DEBUG(scheduler,belch("Waking up thread %ld", tso->id));
663 void awakenBlockedQueue(StgTSO *tso)
665 while (tso != END_TSO_QUEUE) {
666 tso = unblockOne(tso);
670 /* -----------------------------------------------------------------------------
672 - usually called inside a signal handler so it mustn't do anything fancy.
673 -------------------------------------------------------------------------- */
676 interruptStgRts(void)
682 /* -----------------------------------------------------------------------------
685 This is for use when we raise an exception in another thread, which
687 -------------------------------------------------------------------------- */
690 unblockThread(StgTSO *tso)
694 switch (tso->why_blocked) {
697 return; /* not blocked */
700 ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
702 StgTSO *last_tso = END_TSO_QUEUE;
703 StgMVar *mvar = (StgMVar *)(tso->block_info.closure);
706 for (t = mvar->head; t != END_TSO_QUEUE;
707 last = &t->link, last_tso = t, t = t->link) {
710 if (mvar->tail == tso) {
711 mvar->tail = last_tso;
716 barf("unblockThread (MVAR): TSO not found");
719 case BlockedOnBlackHole:
720 ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
722 StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
724 last = &bq->blocking_queue;
725 for (t = bq->blocking_queue; t != END_TSO_QUEUE;
726 last = &t->link, t = t->link) {
732 barf("unblockThread (BLACKHOLE): TSO not found");
739 barf("unblockThread {read,write,delay}");
742 barf("unblockThread");
746 tso->link = END_TSO_QUEUE;
747 tso->why_blocked = NotBlocked;
748 tso->block_info.closure = NULL;
749 PUSH_ON_RUN_QUEUE(tso);
752 /* -----------------------------------------------------------------------------
755 * The following function implements the magic for raising an
756 * asynchronous exception in an existing thread.
758 * We first remove the thread from any queue on which it might be
759 * blocked. The possible blockages are MVARs and BLACKHOLE_BQs.
761 * We strip the stack down to the innermost CATCH_FRAME, building
762 * thunks in the heap for all the active computations, so they can
763 * be restarted if necessary. When we reach a CATCH_FRAME, we build
764 * an application of the handler to the exception, and push it on
765 * the top of the stack.
767 * How exactly do we save all the active computations? We create an
768 * AP_UPD for every UpdateFrame on the stack. Entering one of these
769 * AP_UPDs pushes everything from the corresponding update frame
770 * upwards onto the stack. (Actually, it pushes everything up to the
771 * next update frame plus a pointer to the next AP_UPD object.
772 * Entering the next AP_UPD object pushes more onto the stack until we
773 * reach the last AP_UPD object - at which point the stack should look
774 * exactly as it did when we killed the TSO and we can continue
775 * execution by entering the closure on top of the stack.
777 * We can also kill a thread entirely - this happens if either (a) the
778 * exception passed to raiseAsync is NULL, or (b) there's no
779 * CATCH_FRAME on the stack. In either case, we strip the entire
780 * stack and replace the thread with a zombie.
782 * -------------------------------------------------------------------------- */
785 deleteThread(StgTSO *tso)
787 raiseAsync(tso,NULL);
791 raiseAsync(StgTSO *tso, StgClosure *exception)
793 StgUpdateFrame* su = tso->su;
796 /* Thread already dead? */
797 if (tso->whatNext == ThreadComplete || tso->whatNext == ThreadKilled) {
801 IF_DEBUG(scheduler, belch("Raising exception in thread %ld.", tso->id));
803 /* Remove it from any blocking queues */
806 /* The stack freezing code assumes there's a closure pointer on
807 * the top of the stack. This isn't always the case with compiled
808 * code, so we have to push a dummy closure on the top which just
809 * returns to the next return address on the stack.
811 if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) {
812 *(--sp) = (W_)&dummy_ret_closure;
816 int words = ((P_)su - (P_)sp) - 1;
820 /* If we find a CATCH_FRAME, and we've got an exception to raise,
821 * then build PAP(handler,exception), and leave it on top of
822 * the stack ready to enter.
824 if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) {
825 StgCatchFrame *cf = (StgCatchFrame *)su;
826 /* we've got an exception to raise, so let's pass it to the
827 * handler in this frame.
829 ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 1);
830 TICK_ALLOC_UPD_PAP(2,0);
831 SET_HDR(ap,&PAP_info,cf->header.prof.ccs);
834 ap->fun = cf->handler;
835 ap->payload[0] = (P_)exception;
837 /* sp currently points to the word above the CATCH_FRAME on the
838 * stack. Replace the CATCH_FRAME with a pointer to the new handler
841 sp += sizeofW(StgCatchFrame);
845 tso->whatNext = ThreadEnterGHC;
849 /* First build an AP_UPD consisting of the stack chunk above the
850 * current update frame, with the top word on the stack as the
853 ap = (StgAP_UPD *)allocate(AP_sizeW(words));
858 ap->fun = (StgClosure *)sp[0];
860 for(i=0; i < (nat)words; ++i) {
861 ap->payload[i] = (P_)*sp++;
864 switch (get_itbl(su)->type) {
868 SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */);
869 TICK_ALLOC_UP_THK(words+1,0);
872 fprintf(stderr, "Updating ");
873 printPtr((P_)su->updatee);
874 fprintf(stderr, " with ");
875 printObj((StgClosure *)ap);
878 /* Replace the updatee with an indirection - happily
879 * this will also wake up any threads currently
880 * waiting on the result.
882 UPD_IND(su->updatee,ap); /* revert the black hole */
884 sp += sizeofW(StgUpdateFrame) -1;
885 sp[0] = (W_)ap; /* push onto stack */
891 StgCatchFrame *cf = (StgCatchFrame *)su;
894 /* We want a PAP, not an AP_UPD. Fortunately, the
897 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
898 TICK_ALLOC_UPD_PAP(words+1,0);
900 /* now build o = FUN(catch,ap,handler) */
901 o = (StgClosure *)allocate(sizeofW(StgClosure)+2);
903 SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */);
904 o->payload[0] = (StgClosure *)ap;
905 o->payload[1] = cf->handler;
908 fprintf(stderr, "Built ");
909 printObj((StgClosure *)o);
912 /* pop the old handler and put o on the stack */
914 sp += sizeofW(StgCatchFrame) - 1;
921 StgSeqFrame *sf = (StgSeqFrame *)su;
924 SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */);
925 TICK_ALLOC_UPD_PAP(words+1,0);
927 /* now build o = FUN(seq,ap) */
928 o = (StgClosure *)allocate(sizeofW(StgClosure)+1);
929 TICK_ALLOC_SE_THK(1,0);
930 SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */);
931 payloadCPtr(o,0) = (StgClosure *)ap;
934 fprintf(stderr, "Built ");
935 printObj((StgClosure *)o);
938 /* pop the old handler and put o on the stack */
940 sp += sizeofW(StgSeqFrame) - 1;
946 /* We've stripped the entire stack, the thread is now dead. */
947 sp += sizeofW(StgStopFrame) - 1;
948 sp[0] = (W_)exception; /* save the exception */
949 tso->whatNext = ThreadKilled;
950 tso->su = (StgUpdateFrame *)(sp+1);