X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FSchedule.c;h=63abd17c2e43242fff9a5817791c675df8547432;hb=43b212f520c00ee42d2d711f26183cdb14096158;hp=50009f272120814e9783ff2bbcc34d9f54c6a1c3;hpb=d50874325473c23699a7d77222b1902f28c942af;p=ghc-hetmet.git diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c index 50009f2..63abd17 100644 --- a/ghc/rts/Schedule.c +++ b/ghc/rts/Schedule.c @@ -1,5 +1,5 @@ /* --------------------------------------------------------------------------- - * $Id: Schedule.c,v 1.66 2000/04/11 16:36:53 sewardj Exp $ + * $Id: Schedule.c,v 1.87 2001/01/24 15:46:19 simonmar Exp $ * * (c) The GHC Team, 1998-2000 * @@ -62,7 +62,7 @@ #include "Schedule.h" #include "StgMiscClosures.h" #include "Storage.h" -#include "Evaluator.h" +#include "Interpreter.h" #include "Exception.h" #include "Printer.h" #include "Main.h" @@ -144,6 +144,7 @@ StgTSO *ccalling_threadss[MAX_PROC]; StgTSO *run_queue_hd, *run_queue_tl; StgTSO *blocked_queue_hd, *blocked_queue_tl; +StgTSO *sleeping_queue; /* perhaps replace with a hash table? */ #endif @@ -213,6 +214,12 @@ Capability MainRegTable; /* for non-SMP, we have one global capability */ StgTSO *CurrentTSO; #endif +/* This is used in `TSO.h' and gcc 2.96 insists that this variable actually + * exists - earlier gccs apparently didn't. + * -= chak + */ +StgTSO dummy_tso; + rtsBool ready_to_gc; /* All our current task ids, saved in case we need to kill them later. @@ -232,6 +239,8 @@ static StgTSO * createThread_ ( nat size, rtsBool have_lock, StgInt pri ); static StgTSO * createThread_ ( nat size, rtsBool have_lock ); #endif +static void detectBlackHoles ( void ); + #ifdef DEBUG static void sched_belch(char *s, ...); #endif @@ -258,7 +267,7 @@ rtsTime TimeOfLastYield; char *whatNext_strs[] = { "ThreadEnterGHC", "ThreadRunGHC", - "ThreadEnterHugs", + "ThreadEnterInterp", "ThreadKilled", "ThreadComplete" }; @@ -377,14 +386,7 @@ schedule( void ) */ if (interrupted) { IF_DEBUG(scheduler, sched_belch("interrupted")); - for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) { - deleteThread(t); - } - for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) { - deleteThread(t); - } - run_queue_hd = run_queue_tl = END_TSO_QUEUE; - blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE; + deleteAllThreads(); interrupted = rtsFalse; was_interrupted = rtsTrue; } @@ -504,7 +506,7 @@ schedule( void ) * ToDo: what if another client comes along & requests another * main thread? */ - if (blocked_queue_hd != END_TSO_QUEUE) { + if (blocked_queue_hd != END_TSO_QUEUE || sleeping_queue != END_TSO_QUEUE) { awaitEvent( (run_queue_hd == END_TSO_QUEUE) #ifdef SMP @@ -512,40 +514,59 @@ schedule( void ) #endif ); } - + /* we can be interrupted while waiting for I/O... */ + if (interrupted) continue; + /* check for signals each time around the scheduler */ -#ifndef __MINGW32__ +#ifndef mingw32_TARGET_OS if (signals_pending()) { start_signal_handlers(); } #endif - /* Detect deadlock: when we have no threads to run, there are - * no threads waiting on I/O or sleeping, and all the other - * tasks are waiting for work, we must have a deadlock. Inform - * all the main threads. + /* + * Detect deadlock: when we have no threads to run, there are no + * threads waiting on I/O or sleeping, and all the other tasks are + * waiting for work, we must have a deadlock of some description. + * + * We first try to find threads blocked on themselves (ie. black + * holes), and generate NonTermination exceptions where necessary. + * + * If no threads are black holed, we have a deadlock situation, so + * inform all the main threads. */ #ifdef SMP if (blocked_queue_hd == END_TSO_QUEUE && run_queue_hd == END_TSO_QUEUE - && (n_free_capabilities == RtsFlags.ParFlags.nNodes) - ) { - StgMainThread *m; - for (m = main_threads; m != NULL; m = m->link) { - m->ret = NULL; - m->stat = Deadlock; - pthread_cond_broadcast(&m->wakeup); - } - main_threads = NULL; + && sleeping_queue == END_TSO_QUEUE + && (n_free_capabilities == RtsFlags.ParFlags.nNodes)) + { + IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes...")); + detectBlackHoles(); + if (run_queue_hd == END_TSO_QUEUE) { + StgMainThread *m; + for (m = main_threads; m != NULL; m = m->link) { + m->ret = NULL; + m->stat = Deadlock; + pthread_cond_broadcast(&m->wakeup); + } + main_threads = NULL; + } } #else /* ! SMP */ if (blocked_queue_hd == END_TSO_QUEUE - && run_queue_hd == END_TSO_QUEUE) { - StgMainThread *m = main_threads; - m->ret = NULL; - m->stat = Deadlock; - main_threads = m->link; - return; + && run_queue_hd == END_TSO_QUEUE + && sleeping_queue == END_TSO_QUEUE) + { + IF_DEBUG(scheduler, sched_belch("deadlocked, checking for black holes...")); + detectBlackHoles(); + if (run_queue_hd == END_TSO_QUEUE) { + StgMainThread *m = main_threads; + m->ret = NULL; + m->stat = Deadlock; + main_threads = m->link; + return; + } } #endif @@ -833,12 +854,17 @@ schedule( void ) cap->rCurrentTSO = t; - /* set the context_switch flag + /* context switches are now initiated by the timer signal, unless + * the user specified "context switch as often as possible", with + * +RTS -C0 */ - if (run_queue_hd == END_TSO_QUEUE) - context_switch = 0; + if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0 + && (run_queue_hd != END_TSO_QUEUE + || blocked_queue_hd != END_TSO_QUEUE + || sleeping_queue != END_TSO_QUEUE)) + context_switch = 1; else - context_switch = 1; + context_switch = 0; RELEASE_LOCK(&sched_mutex); @@ -860,14 +886,11 @@ schedule( void ) case ThreadRunGHC: ret = StgRun((StgFunPtr) stg_returnToStackTop, cap); break; - case ThreadEnterHugs: -#ifdef INTERPRETER + case ThreadEnterInterp: +#ifdef GHCI { - StgClosure* c; - IF_DEBUG(scheduler,sched_belch("entering Hugs")); - c = (StgClosure *)(cap->rCurrentTSO->sp[0]); - cap->rCurrentTSO->sp += 1; - ret = enter(cap,c); + IF_DEBUG(scheduler,sched_belch("entering interpreter")); + ret = interpretBCO(cap); break; } #else @@ -960,7 +983,7 @@ schedule( void ) * GC is finished. */ IF_DEBUG(scheduler, - if (t->what_next == ThreadEnterHugs) { + if (t->what_next == ThreadEnterInterp) { /* ToDo: or maybe a timer expired when we were in Hugs? * or maybe someone hit ctrl-C */ @@ -971,7 +994,9 @@ schedule( void ) t->id, t, whatNext_strs[t->what_next]); } ); + threadPaused(t); + IF_DEBUG(sanity, //belch("&& Doing sanity check on yielding TSO %ld.", t->id); checkTSO(t)); @@ -1054,8 +1079,10 @@ schedule( void ) * more main threads, we probably need to stop all the tasks until * we get a new one. */ + /* We also end up here if the thread kills itself with an + * uncaught exception, see Exception.hc. + */ IF_DEBUG(scheduler,belch("--++ thread %d (%p) finished", t->id, t)); - t->what_next = ThreadComplete; #if defined(GRAN) endThread(t, CurrentProc); // clean-up the thread #elif defined(PAR) @@ -1066,7 +1093,7 @@ schedule( void ) break; default: - barf("doneThread: invalid thread return code"); + barf("schedule: invalid thread return code %d", (int)ret); } #ifdef SMP @@ -1125,19 +1152,29 @@ schedule( void ) } /* end of while(1) */ } -/* A hack for Hugs concurrency support. Needs sanitisation (?) */ +/* --------------------------------------------------------------------------- + * deleteAllThreads(): kill all the live threads. + * + * This is used when we catch a user interrupt (^C), before performing + * any necessary cleanups and running finalizers. + * ------------------------------------------------------------------------- */ + void deleteAllThreads ( void ) { StgTSO* t; - IF_DEBUG(scheduler,sched_belch("deleteAllThreads()")); + IF_DEBUG(scheduler,sched_belch("deleting all threads")); for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) { - deleteThread(t); + deleteThread(t); } for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) { - deleteThread(t); + deleteThread(t); + } + for (t = sleeping_queue; t != END_TSO_QUEUE; t = t->link) { + deleteThread(t); } run_queue_hd = run_queue_tl = END_TSO_QUEUE; blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE; + sleeping_queue = END_TSO_QUEUE; } /* startThread and insertThread are now in GranSim.c -- HWL */ @@ -1168,7 +1205,7 @@ suspendThread( Capability *cap ) ACQUIRE_LOCK(&sched_mutex); IF_DEBUG(scheduler, - sched_belch("thread %d did a _ccall_gc\n", cap->rCurrentTSO->id)); + sched_belch("thread %d did a _ccall_gc", cap->rCurrentTSO->id)); threadPaused(cap->rCurrentTSO); cap->rCurrentTSO->link = suspended_ccalling_threads; @@ -1207,6 +1244,7 @@ resumeThread( StgInt tok ) if (tso == END_TSO_QUEUE) { barf("resumeThread: thread not found"); } + tso->link = END_TSO_QUEUE; #ifdef SMP while (free_capabilities == NULL) { @@ -1318,7 +1356,7 @@ createThread_(nat size, rtsBool have_lock) tso = (StgTSO *)allocate(size); TICK_ALLOC_TSO(size-TSO_STRUCT_SIZEW, 0); - SET_HDR(tso, &TSO_info, CCS_SYSTEM); + SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM); #if defined(GRAN) SET_GRAN_HDR(tso, ThisPE); #endif @@ -1335,7 +1373,6 @@ createThread_(nat size, rtsBool have_lock) tso->why_blocked = NotBlocked; tso->blocked_exceptions = NULL; - tso->splim = (P_)&(tso->stack) + RESERVED_STACK_WORDS; tso->stack_size = stack_size; tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize) - TSO_STRUCT_SIZEW; @@ -1556,12 +1593,14 @@ initScheduler(void) blocked_queue_hds[i] = END_TSO_QUEUE; blocked_queue_tls[i] = END_TSO_QUEUE; ccalling_threadss[i] = END_TSO_QUEUE; + sleeping_queue = END_TSO_QUEUE; } #else run_queue_hd = END_TSO_QUEUE; run_queue_tl = END_TSO_QUEUE; blocked_queue_hd = END_TSO_QUEUE; blocked_queue_tl = END_TSO_QUEUE; + sleeping_queue = END_TSO_QUEUE; #endif suspended_ccalling_threads = END_TSO_QUEUE; @@ -1572,7 +1611,13 @@ initScheduler(void) context_switch = 0; interrupted = 0; - enteredCAFs = END_CAF_LIST; + RtsFlags.ConcFlags.ctxtSwitchTicks = + RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS; + +#ifdef INTERPRETER + ecafList = END_ECAF_LIST; + clearECafTable(); +#endif /* Install the SIGHUP handler */ #ifdef SMP @@ -1702,6 +1747,39 @@ exitScheduler( void ) * will be in the main_thread struct. * -------------------------------------------------------------------------- */ +int +howManyThreadsAvail ( void ) +{ + int i = 0; + StgTSO* q; + for (q = run_queue_hd; q != END_TSO_QUEUE; q = q->link) + i++; + for (q = blocked_queue_hd; q != END_TSO_QUEUE; q = q->link) + i++; + for (q = sleeping_queue; q != END_TSO_QUEUE; q = q->link) + i++; + return i; +} + +void +finishAllThreads ( void ) +{ + do { + while (run_queue_hd != END_TSO_QUEUE) { + waitThread ( run_queue_hd, NULL ); + } + while (blocked_queue_hd != END_TSO_QUEUE) { + waitThread ( blocked_queue_hd, NULL ); + } + while (sleeping_queue != END_TSO_QUEUE) { + waitThread ( blocked_queue_hd, NULL ); + } + } while + (blocked_queue_hd != END_TSO_QUEUE || + run_queue_hd != END_TSO_QUEUE || + sleeping_queue != END_TSO_QUEUE); +} + SchedulerStatus waitThread(StgTSO *tso, /*out*/StgClosure **ret) { @@ -1865,6 +1943,7 @@ take_off_run_queue(StgTSO *tso) { - all the threads on the runnable queue - all the threads on the blocked queue + - all the threads on the sleeping queue - all the thread currently executing a _ccall_GC - all the "main threads" @@ -1911,6 +1990,10 @@ static void GetRoots(void) blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd); blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl); } + + if (sleeping_queue != END_TSO_QUEUE) { + sleeping_queue = (StgTSO *)MarkRoot((StgClosure *)sleeping_queue); + } #endif for (m = main_threads; m != NULL; m = m->link) { @@ -1923,6 +2006,10 @@ static void GetRoots(void) #if defined(SMP) || defined(PAR) || defined(GRAN) markSparkQueue(); #endif + +#if defined(GHCI) + markCafs(); +#endif } /* ----------------------------------------------------------------------------- @@ -2028,7 +2115,6 @@ threadStackOverflow(StgTSO *tso) diff = (P_)new_sp - (P_)tso->sp; /* In *words* */ dest->su = (StgUpdateFrame *) ((P_)dest->su + diff); dest->sp = new_sp; - dest->splim = (P_)dest->splim + (nat)((P_)dest - (P_)tso); dest->stack_size = new_stack_size; /* and relocate the update frame list */ @@ -2070,8 +2156,6 @@ threadStackOverflow(StgTSO *tso) Wake up a queue that was blocked on some resource. ------------------------------------------------------------------------ */ -/* ToDo: check push_on_run_queue vs. PUSH_ON_RUN_QUEUE */ - #if defined(GRAN) static inline void unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node ) @@ -2175,9 +2259,9 @@ unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node) see comments on RBHSave closures above */ case CONSTR: /* check that the closure is an RBHSave closure */ - ASSERT(get_itbl((StgClosure *)bqe) == &RBH_Save_0_info || - get_itbl((StgClosure *)bqe) == &RBH_Save_1_info || - get_itbl((StgClosure *)bqe) == &RBH_Save_2_info); + ASSERT(get_itbl((StgClosure *)bqe) == &stg_RBH_Save_0_info || + get_itbl((StgClosure *)bqe) == &stg_RBH_Save_1_info || + get_itbl((StgClosure *)bqe) == &stg_RBH_Save_2_info); break; default: @@ -2442,7 +2526,6 @@ unblockThread(StgTSO *tso) barf("unblockThread (Exception): TSO not found"); } - case BlockedOnDelay: case BlockedOnRead: case BlockedOnWrite: { @@ -2467,6 +2550,23 @@ unblockThread(StgTSO *tso) barf("unblockThread (I/O): TSO not found"); } + case BlockedOnDelay: + { + StgBlockingQueueElement *prev = NULL; + for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE; + prev = t, t = t->link) { + if (t == (StgBlockingQueueElement *)tso) { + if (prev == NULL) { + sleeping_queue = (StgTSO *)t->link; + } else { + prev->link = t->link; + } + goto done; + } + } + barf("unblockThread (I/O): TSO not found"); + } + default: barf("unblockThread"); } @@ -2545,7 +2645,6 @@ unblockThread(StgTSO *tso) barf("unblockThread (Exception): TSO not found"); } - case BlockedOnDelay: case BlockedOnRead: case BlockedOnWrite: { @@ -2570,6 +2669,23 @@ unblockThread(StgTSO *tso) barf("unblockThread (I/O): TSO not found"); } + case BlockedOnDelay: + { + StgTSO *prev = NULL; + for (t = sleeping_queue; t != END_TSO_QUEUE; + prev = t, t = t->link) { + if (t == tso) { + if (prev == NULL) { + sleeping_queue = t->link; + } else { + prev->link = t->link; + } + goto done; + } + } + barf("unblockThread (I/O): TSO not found"); + } + default: barf("unblockThread"); } @@ -2643,7 +2759,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception) * returns to the next return address on the stack. */ if ( LOOKS_LIKE_GHC_INFO((void*)*sp) ) { - *(--sp) = (W_)&dummy_ret_closure; + *(--sp) = (W_)&stg_dummy_ret_closure; } while (1) { @@ -2662,11 +2778,11 @@ raiseAsync(StgTSO *tso, StgClosure *exception) */ ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 2); TICK_ALLOC_UPD_PAP(3,0); - SET_HDR(ap,&PAP_info,cf->header.prof.ccs); + SET_HDR(ap,&stg_PAP_info,cf->header.prof.ccs); ap->n_args = 2; ap->fun = cf->handler; /* :: Exception -> IO a */ - ap->payload[0] = (P_)exception; + ap->payload[0] = exception; ap->payload[1] = ARG_TAG(0); /* realworld token */ /* throw away the stack from Sp up to and including the @@ -2683,7 +2799,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception) * unblockAsyncExceptions_ret stack frame. */ if (!cf->exceptions_blocked) { - *(sp--) = (W_)&unblockAsyncExceptionszh_ret_info; + *(sp--) = (W_)&stg_unblockAsyncExceptionszh_ret_info; } /* Ensure that async exceptions are blocked when running the handler. @@ -2698,6 +2814,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception) sp[0] = (W_)ap; tso->sp = sp; tso->what_next = ThreadEnterGHC; + IF_DEBUG(sanity, checkTSO(tso)); return; } @@ -2713,14 +2830,14 @@ raiseAsync(StgTSO *tso, StgClosure *exception) ap->fun = (StgClosure *)sp[0]; sp++; for(i=0; i < (nat)words; ++i) { - ap->payload[i] = (P_)*sp++; + ap->payload[i] = (StgClosure *)*sp++; } switch (get_itbl(su)->type) { case UPDATE_FRAME: { - SET_HDR(ap,&AP_UPD_info,su->header.prof.ccs /* ToDo */); + SET_HDR(ap,&stg_AP_UPD_info,su->header.prof.ccs /* ToDo */); TICK_ALLOC_UP_THK(words+1,0); IF_DEBUG(scheduler, @@ -2749,13 +2866,13 @@ raiseAsync(StgTSO *tso, StgClosure *exception) /* We want a PAP, not an AP_UPD. Fortunately, the * layout's the same. */ - SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */); + SET_HDR(ap,&stg_PAP_info,su->header.prof.ccs /* ToDo */); TICK_ALLOC_UPD_PAP(words+1,0); /* now build o = FUN(catch,ap,handler) */ o = (StgClosure *)allocate(sizeofW(StgClosure)+2); TICK_ALLOC_FUN(2,0); - SET_HDR(o,&catch_info,su->header.prof.ccs /* ToDo */); + SET_HDR(o,&stg_catch_info,su->header.prof.ccs /* ToDo */); o->payload[0] = (StgClosure *)ap; o->payload[1] = cf->handler; @@ -2776,13 +2893,13 @@ raiseAsync(StgTSO *tso, StgClosure *exception) StgSeqFrame *sf = (StgSeqFrame *)su; StgClosure* o; - SET_HDR(ap,&PAP_info,su->header.prof.ccs /* ToDo */); + SET_HDR(ap,&stg_PAP_info,su->header.prof.ccs /* ToDo */); TICK_ALLOC_UPD_PAP(words+1,0); /* now build o = FUN(seq,ap) */ o = (StgClosure *)allocate(sizeofW(StgClosure)+1); TICK_ALLOC_SE_THK(1,0); - SET_HDR(o,&seq_info,su->header.prof.ccs /* ToDo */); + SET_HDR(o,&stg_seq_info,su->header.prof.ccs /* ToDo */); o->payload[0] = (StgClosure *)ap; IF_DEBUG(scheduler, @@ -2805,7 +2922,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception) tso->su = (StgUpdateFrame *)(sp+1); tso->sp = sp; return; - + default: barf("raiseAsync"); } @@ -2852,6 +2969,61 @@ resurrectThreads( StgTSO *threads ) } } +/* ----------------------------------------------------------------------------- + * Blackhole detection: if we reach a deadlock, test whether any + * threads are blocked on themselves. Any threads which are found to + * be self-blocked get sent a NonTermination exception. + * + * This is only done in a deadlock situation in order to avoid + * performance overhead in the normal case. + * -------------------------------------------------------------------------- */ + +static void +detectBlackHoles( void ) +{ + StgTSO *t = all_threads; + StgUpdateFrame *frame; + StgClosure *blocked_on; + + for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) { + + if (t->why_blocked != BlockedOnBlackHole) { + continue; + } + + blocked_on = t->block_info.closure; + + for (frame = t->su; ; frame = frame->link) { + switch (get_itbl(frame)->type) { + + case UPDATE_FRAME: + if (frame->updatee == blocked_on) { + /* We are blocking on one of our own computations, so + * send this thread the NonTermination exception. + */ + IF_DEBUG(scheduler, + sched_belch("thread %d is blocked on itself", t->id)); + raiseAsync(t, (StgClosure *)NonTermination_closure); + goto done; + } + else { + continue; + } + + case CATCH_FRAME: + case SEQ_FRAME: + continue; + + case STOP_FRAME: + break; + } + break; + } + + done: ; + } +} + //@node Debugging Routines, Index, Exception Handling Routines, Main scheduling code //@subsection Debugging Routines @@ -2866,39 +3038,34 @@ printThreadBlockage(StgTSO *tso) { switch (tso->why_blocked) { case BlockedOnRead: - fprintf(stderr,"blocked on read from fd %d", tso->block_info.fd); + fprintf(stderr,"is blocked on read from fd %d", tso->block_info.fd); break; case BlockedOnWrite: - fprintf(stderr,"blocked on write to fd %d", tso->block_info.fd); + fprintf(stderr,"is blocked on write to fd %d", tso->block_info.fd); break; case BlockedOnDelay: -#if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS) - fprintf(stderr,"blocked on delay of %d ms", tso->block_info.delay); -#else - fprintf(stderr,"blocked on delay of %d ms", - tso->block_info.target - getourtimeofday()); -#endif + fprintf(stderr,"is blocked until %d", tso->block_info.target); break; case BlockedOnMVar: - fprintf(stderr,"blocked on an MVar"); + fprintf(stderr,"is blocked on an MVar"); break; case BlockedOnException: - fprintf(stderr,"blocked on delivering an exception to thread %d", + fprintf(stderr,"is blocked on delivering an exception to thread %d", tso->block_info.tso->id); break; case BlockedOnBlackHole: - fprintf(stderr,"blocked on a black hole"); + fprintf(stderr,"is blocked on a black hole"); break; case NotBlocked: - fprintf(stderr,"not blocked"); + fprintf(stderr,"is not blocked"); break; #if defined(PAR) case BlockedOnGA: - fprintf(stderr,"blocked on global address; local FM_BQ is %p (%s)", + fprintf(stderr,"is blocked on global address; local FM_BQ is %p (%s)", tso->block_info.closure, info_type(tso->block_info.closure)); break; case BlockedOnGA_NoSend: - fprintf(stderr,"blocked on global address (no send); local FM_BQ is %p (%s)", + fprintf(stderr,"is blocked on global address (no send); local FM_BQ is %p (%s)", tso->block_info.closure, info_type(tso->block_info.closure)); break; #endif @@ -2930,7 +3097,7 @@ printAllThreads(void) sched_belch("all threads:"); for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) { - fprintf(stderr, "\tthread %d is ", t->id); + fprintf(stderr, "\tthread %d ", t->id); printThreadStatus(t); fprintf(stderr,"\n"); } @@ -2986,9 +3153,9 @@ print_bq (StgClosure *node) break; case CONSTR: fprintf(stderr," %s (IP %p),", - (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" : - get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" : - get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" : + (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" : + get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" : + get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" : "RBH_Save_?"), get_itbl(bqe)); break; default: @@ -3040,9 +3207,9 @@ print_bq (StgClosure *node) break; case CONSTR: fprintf(stderr," %s (IP %p),", - (get_itbl(bqe) == &RBH_Save_0_info ? "RBH_Save_0" : - get_itbl(bqe) == &RBH_Save_1_info ? "RBH_Save_1" : - get_itbl(bqe) == &RBH_Save_2_info ? "RBH_Save_2" : + (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" : + get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" : + get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" : "RBH_Save_?"), get_itbl(bqe)); break; default: