X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2FSchedule.c;h=585ddec0ef852ef4ffb7b72cfde1c32b7355ef5a;hp=46f575eb45a86158fdb44000bf5b69412b700d0a;hb=1525a5819aa3a6eae8d8b05cfe348a2384da0c84;hpb=90d88088ee8b3697ce68f7b1e07506bc4f33687d diff --git a/rts/Schedule.c b/rts/Schedule.c index 46f575e..585ddec 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -1,6 +1,6 @@ /* --------------------------------------------------------------------------- * - * (c) The GHC Team, 1998-2005 + * (c) The GHC Team, 1998-2006 * * The scheduler and thread-related functionality * @@ -19,7 +19,6 @@ #include "Schedule.h" #include "StgMiscClosures.h" #include "Interpreter.h" -#include "Exception.h" #include "Printer.h" #include "RtsSignals.h" #include "Sanity.h" @@ -50,6 +49,9 @@ #if defined(mingw32_HOST_OS) #include "win32/IOManager.h" #endif +#include "Trace.h" +#include "RaiseAsync.h" +#include "Threads.h" #ifdef HAVE_SYS_TYPES_H #include @@ -139,23 +141,6 @@ nat recent_activity = ACTIVITY_YES; */ rtsBool sched_state = SCHED_RUNNING; -/* Next thread ID to allocate. - * LOCK: sched_mutex - */ -static StgThreadID next_thread_id = 1; - -/* The smallest stack size that makes any sense is: - * RESERVED_STACK_WORDS (so we can get back from the stack overflow) - * + sizeofW(StgStopFrame) (the stg_stop_thread_info frame) - * + 1 (the closure to enter) - * + 1 (stg_ap_v_ret) - * + 1 (spare slot req'd by stg_ap_v_ret) - * - * A thread with this stack will bomb immediately with a stack - * overflow, which will increase its stack size. - */ -#define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 3) - #if defined(GRAN) StgTSO *CurrentTSO; #endif @@ -187,6 +172,10 @@ rtsTime TimeOfLastYield; rtsBool emitSchedule = rtsTrue; #endif +#if !defined(mingw32_HOST_OS) +#define FORKPROCESS_PRIMOP_SUPPORTED +#endif + /* ----------------------------------------------------------------------------- * static function prototypes * -------------------------------------------------------------------------- */ @@ -232,22 +221,16 @@ static Capability *scheduleDoGC(Capability *cap, Task *task, rtsBool force_major, void (*get_roots)(evac_fn)); -static void unblockThread(Capability *cap, StgTSO *tso); static rtsBool checkBlackHoles(Capability *cap); static void AllRoots(evac_fn evac); static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso); -static void raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception, - rtsBool stop_at_atomically, StgPtr stop_here); - static void deleteThread (Capability *cap, StgTSO *tso); static void deleteAllThreads (Capability *cap); -#ifdef DEBUG -static void printThreadBlockage(StgTSO *tso); -static void printThreadStatus(StgTSO *tso); -void printThreadQueue(StgTSO *tso); +#ifdef FORKPROCESS_PRIMOP_SUPPORTED +static void deleteThread_(Capability *cap, StgTSO *tso); #endif #if defined(PARALLEL_HASKELL) @@ -344,10 +327,9 @@ schedule (Capability *initialCapability, Task *task) // The sched_mutex is *NOT* held // NB. on return, we still hold a capability. - IF_DEBUG(scheduler, - sched_belch("### NEW SCHEDULER LOOP (task: %p, cap: %p)", - task, initialCapability); - ); + debugTrace (DEBUG_sched, + "### NEW SCHEDULER LOOP (task: %p, cap: %p)", + task, initialCapability); schedulePreLoop(); @@ -434,7 +416,7 @@ schedule (Capability *initialCapability, Task *task) case SCHED_RUNNING: break; case SCHED_INTERRUPTING: - IF_DEBUG(scheduler, sched_belch("SCHED_INTERRUPTING")); + debugTrace(DEBUG_sched, "SCHED_INTERRUPTING"); #if defined(THREADED_RTS) discardSparksCap(cap); #endif @@ -442,7 +424,7 @@ schedule (Capability *initialCapability, Task *task) cap = scheduleDoGC(cap,task,rtsFalse,GetRoots); break; case SCHED_SHUTTING_DOWN: - IF_DEBUG(scheduler, sched_belch("SCHED_SHUTTING_DOWN")); + debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN"); // If we are a worker, just exit. If we're a bound thread // then we will exit below when we've removed our TSO from // the run queue. @@ -461,9 +443,9 @@ schedule (Capability *initialCapability, Task *task) StgClosure *spark; spark = findSpark(cap); if (spark != NULL) { - IF_DEBUG(scheduler, - sched_belch("turning spark of closure %p into a thread", - (StgClosure *)spark)); + debugTrace(DEBUG_sched, + "turning spark of closure %p into a thread", + (StgClosure *)spark); createSparkThread(cap,spark); } } @@ -552,14 +534,12 @@ schedule (Capability *initialCapability, Task *task) if (bound) { if (bound == task) { - IF_DEBUG(scheduler, - sched_belch("### Running thread %d in bound thread", - t->id)); + debugTrace(DEBUG_sched, + "### Running thread %lu in bound thread", (unsigned long)t->id); // yes, the Haskell thread is bound to the current native thread } else { - IF_DEBUG(scheduler, - sched_belch("### thread %d bound to another OS thread", - t->id)); + debugTrace(DEBUG_sched, + "### thread %lu bound to another OS thread", (unsigned long)t->id); // no, bound to a different Haskell thread: pass to that thread pushOnRunQueue(cap,t); continue; @@ -567,8 +547,8 @@ schedule (Capability *initialCapability, Task *task) } else { // The thread we want to run is unbound. if (task->tso) { - IF_DEBUG(scheduler, - sched_belch("### this OS thread cannot run thread %d", t->id)); + debugTrace(DEBUG_sched, + "### this OS thread cannot run thread %lu", (unsigned long)t->id); // no, the current native thread is bound to a different // Haskell thread, so pass it to any worker thread pushOnRunQueue(cap,t); @@ -591,13 +571,16 @@ schedule (Capability *initialCapability, Task *task) run_thread: - IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...", - (long)t->id, whatNext_strs[t->what_next])); + debugTrace(DEBUG_sched, "-->> running thread %ld %s ...", + (long)t->id, whatNext_strs[t->what_next]); #if defined(PROFILING) startHeapProfTimer(); #endif + // Check for exceptions blocked on this thread + maybePerformBlockedException (cap, t); + // ---------------------------------------------------------------------- // Run the current thread @@ -665,9 +648,9 @@ run_thread: // that task->cap != cap. We better yield this Capability // immediately and return to normaility. if (ret == ThreadBlocked) { - IF_DEBUG(scheduler, - sched_belch("--<< thread %d (%s) stopped: blocked\n", - t->id, whatNext_strs[t->what_next])); + debugTrace(DEBUG_sched, + "--<< thread %lu (%s) stopped: blocked", + (unsigned long)t->id, whatNext_strs[t->what_next]); continue; } #endif @@ -683,12 +666,6 @@ run_thread: CCCS = CCS_SYSTEM; #endif -#if defined(THREADED_RTS) - IF_DEBUG(scheduler,debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId());); -#elif !defined(GRAN) && !defined(PARALLEL_HASKELL) - IF_DEBUG(scheduler,debugBelch("sched: ");); -#endif - schedulePostRunThread(); ready_to_gc = rtsFalse; @@ -728,8 +705,8 @@ run_thread: } } /* end of while() */ - IF_PAR_DEBUG(verbose, - debugBelch("== Leaving schedule() after having received Finish\n")); + debugTrace(PAR_DEBUG_verbose, + "== Leaving schedule() after having received Finish"); } /* ---------------------------------------------------------------------------- @@ -746,10 +723,10 @@ schedulePreLoop(void) ContinueThread, CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL); - IF_DEBUG(gran, - debugBelch("GRAN: Init CurrentTSO (in schedule) = %p\n", - CurrentTSO); - G_TSO(CurrentTSO, 5)); + debugTrace (DEBUG_gran, + "GRAN: Init CurrentTSO (in schedule) = %p", + CurrentTSO); + IF_DEBUG(gran, G_TSO(CurrentTSO, 5)); if (RtsFlags.GranFlags.Light) { /* Save current time; GranSim Light only */ @@ -811,7 +788,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, StgTSO *prev, *t, *next; rtsBool pushed_to_all; - IF_DEBUG(scheduler, sched_belch("excess threads on run queue and %d free capabilities, sharing...", n_free_caps)); + debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps); i = 0; pushed_to_all = rtsFalse; @@ -835,7 +812,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, prev->link = t; prev = t; } else { - IF_DEBUG(scheduler, sched_belch("pushing thread %d to capability %d", t->id, free_caps[i]->no)); + debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no); appendToRunQueue(free_caps[i],t); if (t->bound) { t->bound->cap = free_caps[i]; } t->cap = free_caps[i]; @@ -854,7 +831,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS, if (emptySparkPoolCap(free_caps[i])) { spark = findSpark(cap); if (spark != NULL) { - IF_DEBUG(scheduler, sched_belch("pushing spark %p to capability %d", spark, free_caps[i]->no)); + debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no); newSpark(&(free_caps[i]->r), spark); } } @@ -984,7 +961,7 @@ scheduleDetectDeadlock (Capability *cap, Task *task) if (recent_activity != ACTIVITY_INACTIVE) return; #endif - IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC...")); + debugTrace(DEBUG_sched, "deadlocked, forcing major GC..."); // Garbage collection can release some new threads due to // either (a) finalizers or (b) threads resurrected because @@ -1003,8 +980,8 @@ scheduleDetectDeadlock (Capability *cap, Task *task) * deadlock. */ if ( anyUserHandlers() ) { - IF_DEBUG(scheduler, - sched_belch("still deadlocked, waiting for signals...")); + debugTrace(DEBUG_sched, + "still deadlocked, waiting for signals..."); awaitUserSignals(); @@ -1027,7 +1004,8 @@ scheduleDetectDeadlock (Capability *cap, Task *task) case BlockedOnBlackHole: case BlockedOnException: case BlockedOnMVar: - raiseAsync(cap, task->tso, (StgClosure *)NonTermination_closure); + throwToSingleThreaded(cap, task->tso, + (StgClosure *)NonTermination_closure); return; default: barf("deadlock: main thread blocked in a strange way"); @@ -1510,10 +1488,10 @@ schedulePostRunThread(void) case ThreadBlocked: # if defined(GRAN) - IF_DEBUG(scheduler, - debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ", - t->id, t, whatNext_strs[t->what_next], t->block_info.closure, - (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure))); + debugTrace(DEBUG_sched, + "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ", + t->id, t, whatNext_strs[t->what_next], t->block_info.closure, + (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure))); if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure); debugBelch("\n")); @@ -1562,10 +1540,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE; - IF_DEBUG(scheduler, - debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", - (long)t->id, whatNext_strs[t->what_next], blocks)); - + debugTrace(DEBUG_sched, + "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", + (long)t->id, whatNext_strs[t->what_next], blocks); + // don't do this if the nursery is (nearly) full, we'll GC first. if (cap->r.rCurrentNursery->link != NULL || cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop @@ -1622,9 +1600,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) } } - IF_DEBUG(scheduler, - debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n", - (long)t->id, whatNext_strs[t->what_next])); + debugTrace(DEBUG_sched, + "--<< thread %ld (%s) stopped: HeapOverflow\n", + (long)t->id, whatNext_strs[t->what_next]); + #if defined(GRAN) ASSERT(!is_on_queue(t,CurrentProc)); #elif defined(PARALLEL_HASKELL) @@ -1650,8 +1629,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) static void scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t) { - IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n", - (long)t->id, whatNext_strs[t->what_next])); + debugTrace (DEBUG_sched, + "--<< thread %ld (%s) stopped, StackOverflow", + (long)t->id, whatNext_strs[t->what_next]); + /* just adjust the stack for this thread, then pop it back * on the run queue. */ @@ -1689,15 +1670,17 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) * up the GC thread. getThread will block during a GC until the * GC is finished. */ - IF_DEBUG(scheduler, - if (t->what_next != prev_what_next) { - debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n", - (long)t->id, whatNext_strs[t->what_next]); - } else { - debugBelch("--<< thread %ld (%s) stopped, yielding\n", - (long)t->id, whatNext_strs[t->what_next]); - } - ); +#ifdef DEBUG + if (t->what_next != prev_what_next) { + debugTrace(DEBUG_sched, + "--<< thread %ld (%s) stopped to switch evaluators", + (long)t->id, whatNext_strs[t->what_next]); + } else { + debugTrace(DEBUG_sched, + "--<< thread %ld (%s) stopped, yielding", + (long)t->id, whatNext_strs[t->what_next]); + } +#endif IF_DEBUG(sanity, //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id); @@ -1795,11 +1778,14 @@ scheduleHandleThreadBlocked( StgTSO *t // conc023 +RTS -N2. #endif - IF_DEBUG(scheduler, - debugBelch("--<< thread %d (%s) stopped: ", - t->id, whatNext_strs[t->what_next]); - printThreadBlockage(t); - debugBelch("\n")); +#ifdef DEBUG + if (traceClass(DEBUG_sched)) { + debugTraceBegin("--<< thread %lu (%s) stopped: ", + (unsigned long)t->id, whatNext_strs[t->what_next]); + printThreadBlockage(t); + debugTraceEnd(); + } +#endif /* Only for dumping event to log file ToDo: do I need this in GranSim, too? @@ -1821,8 +1807,8 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) * We also end up here if the thread kills itself with an * uncaught exception, see Exception.cmm. */ - IF_DEBUG(scheduler,debugBelch("--++ thread %d (%s) finished\n", - t->id, whatNext_strs[t->what_next])); + debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", + (unsigned long)t->id, whatNext_strs[t->what_next]); #if defined(GRAN) endThread(t, CurrentProc); // clean-up the thread @@ -1942,10 +1928,10 @@ scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED ) // deadlocked. scheduleCheckBlackHoles(&MainCapability); - IF_DEBUG(scheduler, sched_belch("garbage collecting before heap census")); + debugTrace(DEBUG_sched, "garbage collecting before heap census"); GarbageCollect(GetRoots, rtsTrue); - IF_DEBUG(scheduler, sched_belch("performing heap census")); + debugTrace(DEBUG_sched, "performing heap census"); heapCensus(); performHeapProfile = rtsFalse; @@ -1985,14 +1971,14 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, was_waiting = cas(&waiting_for_gc, 0, 1); if (was_waiting) { do { - IF_DEBUG(scheduler, sched_belch("someone else is trying to GC...")); + debugTrace(DEBUG_sched, "someone else is trying to GC..."); if (cap) yieldCapability(&cap,task); } while (waiting_for_gc); return cap; // NOTE: task->cap might have changed here } for (i=0; i < n_capabilities; i++) { - IF_DEBUG(scheduler, sched_belch("ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities)); + debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities); if (cap != &capabilities[i]) { Capability *pcap = &capabilities[i]; // we better hope this task doesn't get migrated to @@ -2024,15 +2010,29 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, next = t->link; } else { next = t->global_link; + + // This is a good place to check for blocked + // exceptions. It might be the case that a thread is + // blocked on delivering an exception to a thread that + // is also blocked - we try to ensure that this + // doesn't happen in throwTo(), but it's too hard (or + // impossible) to close all the race holes, so we + // accept that some might get through and deal with + // them here. A GC will always happen at some point, + // even if the system is otherwise deadlocked. + maybePerformBlockedException (&capabilities[0], t); + if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) { if (!stmValidateNestOfTransactions (t -> trec)) { - IF_DEBUG(stm, sched_belch("trec %p found wasting its time", t)); + debugTrace(DEBUG_sched | DEBUG_stm, + "trec %p found wasting its time", t); // strip the stack back to the // ATOMICALLY_FRAME, aborting the (nested) // transaction, and saving the stack of any // partially-evaluated thunks on the heap. - raiseAsync_(&capabilities[0], t, NULL, rtsTrue, NULL); + throwToSingleThreaded_(&capabilities[0], t, + NULL, rtsTrue, NULL); #ifdef REG_R1 ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME); @@ -2064,7 +2064,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, * broadcast on gc_pending_cond afterward. */ #if defined(THREADED_RTS) - IF_DEBUG(scheduler,sched_belch("doing GC")); + debugTrace(DEBUG_sched, "doing GC"); #endif GarbageCollect(get_roots, force_major); @@ -2098,45 +2098,9 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, } /* --------------------------------------------------------------------------- - * rtsSupportsBoundThreads(): is the RTS built to support bound threads? - * used by Control.Concurrent for error checking. - * ------------------------------------------------------------------------- */ - -StgBool -rtsSupportsBoundThreads(void) -{ -#if defined(THREADED_RTS) - return rtsTrue; -#else - return rtsFalse; -#endif -} - -/* --------------------------------------------------------------------------- - * isThreadBound(tso): check whether tso is bound to an OS thread. - * ------------------------------------------------------------------------- */ - -StgBool -isThreadBound(StgTSO* tso USED_IF_THREADS) -{ -#if defined(THREADED_RTS) - return (tso->bound != NULL); -#endif - return rtsFalse; -} - -/* --------------------------------------------------------------------------- * Singleton fork(). Do not copy any running threads. * ------------------------------------------------------------------------- */ -#if !defined(mingw32_HOST_OS) -#define FORKPROCESS_PRIMOP_SUPPORTED -#endif - -#ifdef FORKPROCESS_PRIMOP_SUPPORTED -static void -deleteThread_(Capability *cap, StgTSO *tso); -#endif StgInt forkProcess(HsStablePtr *entry #ifndef FORKPROCESS_PRIMOP_SUPPORTED @@ -2157,7 +2121,7 @@ forkProcess(HsStablePtr *entry } #endif - IF_DEBUG(scheduler,sched_belch("forking!")); + debugTrace(DEBUG_sched, "forking!"); // ToDo: for SMP, we should probably acquire *all* the capabilities cap = rts_lock(); @@ -2242,26 +2206,28 @@ forkProcess(HsStablePtr *entry static void deleteAllThreads ( Capability *cap ) { - StgTSO* t, *next; - IF_DEBUG(scheduler,sched_belch("deleting all threads")); - for (t = all_threads; t != END_TSO_QUEUE; t = next) { - if (t->what_next == ThreadRelocated) { - next = t->link; - } else { - next = t->global_link; - deleteThread(cap,t); - } - } + // NOTE: only safe to call if we own all capabilities. - // The run queue now contains a bunch of ThreadKilled threads. We - // must not throw these away: the main thread(s) will be in there - // somewhere, and the main scheduler loop has to deal with it. - // Also, the run queue is the only thing keeping these threads from - // being GC'd, and we don't want the "main thread has been GC'd" panic. + StgTSO* t, *next; + debugTrace(DEBUG_sched,"deleting all threads"); + for (t = all_threads; t != END_TSO_QUEUE; t = next) { + if (t->what_next == ThreadRelocated) { + next = t->link; + } else { + next = t->global_link; + deleteThread(cap,t); + } + } + + // The run queue now contains a bunch of ThreadKilled threads. We + // must not throw these away: the main thread(s) will be in there + // somewhere, and the main scheduler loop has to deal with it. + // Also, the run queue is the only thing keeping these threads from + // being GC'd, and we don't want the "main thread has been GC'd" panic. #if !defined(THREADED_RTS) - ASSERT(blocked_queue_hd == END_TSO_QUEUE); - ASSERT(sleeping_queue == END_TSO_QUEUE); + ASSERT(blocked_queue_hd == END_TSO_QUEUE); + ASSERT(sleeping_queue == END_TSO_QUEUE); #endif } @@ -2327,17 +2293,19 @@ suspendThread (StgRegTable *reg) task = cap->running_task; tso = cap->r.rCurrentTSO; - IF_DEBUG(scheduler, - sched_belch("thread %d did a safe foreign call", cap->r.rCurrentTSO->id)); + debugTrace(DEBUG_sched, + "thread %lu did a safe foreign call", + (unsigned long)cap->r.rCurrentTSO->id); // XXX this might not be necessary --SDM tso->what_next = ThreadRunGHC; threadPaused(cap,tso); - if(tso->blocked_exceptions == NULL) { + if ((tso->flags & TSO_BLOCKEX) == 0) { tso->why_blocked = BlockedOnCCall; - tso->blocked_exceptions = END_TSO_QUEUE; + tso->flags |= TSO_BLOCKEX; + tso->flags &= ~TSO_INTERRUPTIBLE; } else { tso->why_blocked = BlockedOnCCall_NoUnblockExc; } @@ -2357,7 +2325,7 @@ suspendThread (StgRegTable *reg) /* Preparing to leave the RTS, so ensure there's a native thread/task waiting to take over. */ - IF_DEBUG(scheduler, sched_belch("thread %d: leaving RTS", tso->id)); + debugTrace(DEBUG_sched, "thread %lu: leaving RTS", (unsigned long)tso->id); #endif errno = saved_errno; @@ -2385,11 +2353,11 @@ resumeThread (void *task_) tso = task->suspended_tso; task->suspended_tso = NULL; tso->link = END_TSO_QUEUE; - IF_DEBUG(scheduler, sched_belch("thread %d: re-entering RTS", tso->id)); + debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id); if (tso->why_blocked == BlockedOnCCall) { - awakenBlockedQueue(cap,tso->blocked_exceptions); - tso->blocked_exceptions = NULL; + awakenBlockedExceptionQueue(cap,tso); + tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE); } /* Reset blocking status */ @@ -2408,299 +2376,6 @@ resumeThread (void *task_) } /* --------------------------------------------------------------------------- - * Comparing Thread ids. - * - * This is used from STG land in the implementation of the - * instances of Eq/Ord for ThreadIds. - * ------------------------------------------------------------------------ */ - -int -cmp_thread(StgPtr tso1, StgPtr tso2) -{ - StgThreadID id1 = ((StgTSO *)tso1)->id; - StgThreadID id2 = ((StgTSO *)tso2)->id; - - if (id1 < id2) return (-1); - if (id1 > id2) return 1; - return 0; -} - -/* --------------------------------------------------------------------------- - * Fetching the ThreadID from an StgTSO. - * - * This is used in the implementation of Show for ThreadIds. - * ------------------------------------------------------------------------ */ -int -rts_getThreadId(StgPtr tso) -{ - return ((StgTSO *)tso)->id; -} - -#ifdef DEBUG -void -labelThread(StgPtr tso, char *label) -{ - int len; - void *buf; - - /* Caveat: Once set, you can only set the thread name to "" */ - len = strlen(label)+1; - buf = stgMallocBytes(len * sizeof(char), "Schedule.c:labelThread()"); - strncpy(buf,label,len); - /* Update will free the old memory for us */ - updateThreadLabel(((StgTSO *)tso)->id,buf); -} -#endif /* DEBUG */ - -/* --------------------------------------------------------------------------- - Create a new thread. - - The new thread starts with the given stack size. Before the - scheduler can run, however, this thread needs to have a closure - (and possibly some arguments) pushed on its stack. See - pushClosure() in Schedule.h. - - createGenThread() and createIOThread() (in SchedAPI.h) are - convenient packaged versions of this function. - - currently pri (priority) is only used in a GRAN setup -- HWL - ------------------------------------------------------------------------ */ -#if defined(GRAN) -/* currently pri (priority) is only used in a GRAN setup -- HWL */ -StgTSO * -createThread(nat size, StgInt pri) -#else -StgTSO * -createThread(Capability *cap, nat size) -#endif -{ - StgTSO *tso; - nat stack_size; - - /* sched_mutex is *not* required */ - - /* First check whether we should create a thread at all */ -#if defined(PARALLEL_HASKELL) - /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */ - if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) { - threadsIgnored++; - debugBelch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)\n", - RtsFlags.ParFlags.maxThreads, advisory_thread_count); - return END_TSO_QUEUE; - } - threadsCreated++; -#endif - -#if defined(GRAN) - ASSERT(!RtsFlags.GranFlags.Light || CurrentProc==0); -#endif - - // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW - - /* catch ridiculously small stack sizes */ - if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) { - size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW; - } - - stack_size = size - TSO_STRUCT_SIZEW; - - tso = (StgTSO *)allocateLocal(cap, size); - TICK_ALLOC_TSO(stack_size, 0); - - SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM); -#if defined(GRAN) - SET_GRAN_HDR(tso, ThisPE); -#endif - - // Always start with the compiled code evaluator - tso->what_next = ThreadRunGHC; - - tso->why_blocked = NotBlocked; - tso->blocked_exceptions = NULL; - tso->flags = TSO_DIRTY; - - tso->saved_errno = 0; - tso->bound = NULL; - tso->cap = cap; - - tso->stack_size = stack_size; - tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize) - - TSO_STRUCT_SIZEW; - tso->sp = (P_)&(tso->stack) + stack_size; - - tso->trec = NO_TREC; - -#ifdef PROFILING - tso->prof.CCCS = CCS_MAIN; -#endif - - /* put a stop frame on the stack */ - tso->sp -= sizeofW(StgStopFrame); - SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM); - tso->link = END_TSO_QUEUE; - - // ToDo: check this -#if defined(GRAN) - /* uses more flexible routine in GranSim */ - insertThread(tso, CurrentProc); -#else - /* In a non-GranSim setup the pushing of a TSO onto the runq is separated - * from its creation - */ -#endif - -#if defined(GRAN) - if (RtsFlags.GranFlags.GranSimStats.Full) - DumpGranEvent(GR_START,tso); -#elif defined(PARALLEL_HASKELL) - if (RtsFlags.ParFlags.ParStats.Full) - DumpGranEvent(GR_STARTQ,tso); - /* HACk to avoid SCHEDULE - LastTSO = tso; */ -#endif - - /* Link the new thread on the global thread list. - */ - ACQUIRE_LOCK(&sched_mutex); - tso->id = next_thread_id++; // while we have the mutex - tso->global_link = all_threads; - all_threads = tso; - RELEASE_LOCK(&sched_mutex); - -#if defined(DIST) - tso->dist.priority = MandatoryPriority; //by default that is... -#endif - -#if defined(GRAN) - tso->gran.pri = pri; -# if defined(DEBUG) - tso->gran.magic = TSO_MAGIC; // debugging only -# endif - tso->gran.sparkname = 0; - tso->gran.startedat = CURRENT_TIME; - tso->gran.exported = 0; - tso->gran.basicblocks = 0; - tso->gran.allocs = 0; - tso->gran.exectime = 0; - tso->gran.fetchtime = 0; - tso->gran.fetchcount = 0; - tso->gran.blocktime = 0; - tso->gran.blockcount = 0; - tso->gran.blockedat = 0; - tso->gran.globalsparks = 0; - tso->gran.localsparks = 0; - if (RtsFlags.GranFlags.Light) - tso->gran.clock = Now; /* local clock */ - else - tso->gran.clock = 0; - - IF_DEBUG(gran,printTSO(tso)); -#elif defined(PARALLEL_HASKELL) -# if defined(DEBUG) - tso->par.magic = TSO_MAGIC; // debugging only -# endif - tso->par.sparkname = 0; - tso->par.startedat = CURRENT_TIME; - tso->par.exported = 0; - tso->par.basicblocks = 0; - tso->par.allocs = 0; - tso->par.exectime = 0; - tso->par.fetchtime = 0; - tso->par.fetchcount = 0; - tso->par.blocktime = 0; - tso->par.blockcount = 0; - tso->par.blockedat = 0; - tso->par.globalsparks = 0; - tso->par.localsparks = 0; -#endif - -#if defined(GRAN) - globalGranStats.tot_threads_created++; - globalGranStats.threads_created_on_PE[CurrentProc]++; - globalGranStats.tot_sq_len += spark_queue_len(CurrentProc); - globalGranStats.tot_sq_probes++; -#elif defined(PARALLEL_HASKELL) - // collect parallel global statistics (currently done together with GC stats) - if (RtsFlags.ParFlags.ParStats.Global && - RtsFlags.GcFlags.giveStats > NO_GC_STATS) { - //debugBelch("Creating thread %d @ %11.2f\n", tso->id, usertime()); - globalParStats.tot_threads_created++; - } -#endif - -#if defined(GRAN) - IF_GRAN_DEBUG(pri, - sched_belch("==__ schedule: Created TSO %d (%p);", - CurrentProc, tso, tso->id)); -#elif defined(PARALLEL_HASKELL) - IF_PAR_DEBUG(verbose, - sched_belch("==__ schedule: Created TSO %d (%p); %d threads active", - (long)tso->id, tso, advisory_thread_count)); -#else - IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words", - (long)tso->id, (long)tso->stack_size)); -#endif - return tso; -} - -#if defined(PAR) -/* RFP: - all parallel thread creation calls should fall through the following routine. -*/ -StgTSO * -createThreadFromSpark(rtsSpark spark) -{ StgTSO *tso; - ASSERT(spark != (rtsSpark)NULL); -// JB: TAKE CARE OF THIS COUNTER! BUGGY - if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) - { threadsIgnored++; - barf("{createSparkThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)", - RtsFlags.ParFlags.maxThreads, advisory_thread_count); - return END_TSO_QUEUE; - } - else - { threadsCreated++; - tso = createThread(RtsFlags.GcFlags.initialStkSize); - if (tso==END_TSO_QUEUE) - barf("createSparkThread: Cannot create TSO"); -#if defined(DIST) - tso->priority = AdvisoryPriority; -#endif - pushClosure(tso,spark); - addToRunQueue(tso); - advisory_thread_count++; // JB: TAKE CARE OF THIS COUNTER! BUGGY - } - return tso; -} -#endif - -/* - Turn a spark into a thread. - ToDo: fix for SMP (needs to acquire SCHED_MUTEX!) -*/ -#if 0 -StgTSO * -activateSpark (rtsSpark spark) -{ - StgTSO *tso; - - tso = createSparkThread(spark); - if (RtsFlags.ParFlags.ParStats.Full) { - //ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ... - IF_PAR_DEBUG(verbose, - debugBelch("==^^ activateSpark: turning spark of closure %p (%s) into a thread\n", - (StgClosure *)spark, info_type((StgClosure *)spark))); - } - // ToDo: fwd info on local/global spark to thread -- HWL - // tso->gran.exported = spark->exported; - // tso->gran.locked = !spark->global; - // tso->gran.sparkname = spark->name; - - return tso; -} -#endif - -/* --------------------------------------------------------------------------- * scheduleThread() * * scheduleThread puts a thread on the end of the runnable queue. @@ -2728,12 +2403,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { - Capability *target_cap = &capabilities[cpu]; - if (tso->bound) { - tso->bound->cap = target_cap; - } - tso->cap = target_cap; - wakeupThreadOnCapability(target_cap,tso); + migrateThreadToCapability_lock(&capabilities[cpu],tso); } #else appendToRunQueue(cap,tso); @@ -2759,7 +2429,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) appendToRunQueue(cap,tso); - IF_DEBUG(scheduler, sched_belch("new bound thread (%d)", tso->id)); + debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id); #if defined(GRAN) /* GranSim specific init */ @@ -2773,7 +2443,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) ASSERT(task->stat != NoStatus); ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); - IF_DEBUG(scheduler, sched_belch("bound thread (%d) finished", task->tso->id)); + debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id); return cap; } @@ -2800,7 +2470,7 @@ workerStart(Task *task) // On exit from schedule(), we have a Capability. releaseCapability(cap); - taskStop(task); + workerTaskStop(task); } #endif @@ -2839,9 +2509,6 @@ initScheduler(void) context_switch = 0; sched_state = SCHED_RUNNING; - RtsFlags.ConcFlags.ctxtSwitchTicks = - RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS; - #if defined(THREADED_RTS) /* Initialise the mutex and condition variables used by * the scheduler. */ @@ -2881,6 +2548,8 @@ initScheduler(void) } #endif + trace(TRACE_sched, "start: %d capabilities", n_capabilities); + RELEASE_LOCK(&sched_mutex); } @@ -2912,6 +2581,7 @@ exitScheduler( void ) boundTaskExiting(task); stopTaskManager(); } + closeMutex(&sched_mutex); #endif } @@ -2967,7 +2637,8 @@ GetRoots( evac_fn evac ) #endif for (task = cap->suspended_ccalling_tasks; task != NULL; task=task->next) { - IF_DEBUG(scheduler,sched_belch("evac'ing suspended TSO %d", task->suspended_tso->id)); + debugTrace(DEBUG_sched, + "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id); evac((StgClosure **)(void *)&task->suspended_tso); } @@ -3066,18 +2737,25 @@ threadStackOverflow(Capability *cap, StgTSO *tso) StgTSO *dest; IF_DEBUG(sanity,checkTSO(tso)); + + // don't allow throwTo() to modify the blocked_exceptions queue + // while we are moving the TSO: + lockClosure((StgClosure *)tso); + if (tso->stack_size >= tso->max_stack_size) { - IF_DEBUG(gc, - debugBelch("@@ threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)\n", - (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size); - /* If we're debugging, just print out the top of the stack */ - printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, - tso->sp+64))); + debugTrace(DEBUG_gc, + "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)", + (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size); + IF_DEBUG(gc, + /* If we're debugging, just print out the top of the stack */ + printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, + tso->sp+64))); - /* Send this thread the StackOverflow exception */ - raiseAsync(cap, tso, (StgClosure *)stackOverflow_closure); - return tso; + // Send this thread the StackOverflow exception + unlockTSO(tso); + throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure); + return tso; } /* Try to double the current stack size. If that takes us over the @@ -3090,7 +2768,9 @@ threadStackOverflow(Capability *cap, StgTSO *tso) new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */ new_stack_size = new_tso_size - TSO_STRUCT_SIZEW; - IF_DEBUG(scheduler, sched_belch("increasing stack size from %ld words to %d.\n", (long)tso->stack_size, new_stack_size)); + debugTrace(DEBUG_sched, + "increasing stack size from %ld words to %d.", + (long)tso->stack_size, new_stack_size); dest = (StgTSO *)allocate(new_tso_size); TICK_ALLOC_TSO(new_stack_size,0); @@ -3124,7 +2804,10 @@ threadStackOverflow(Capability *cap, StgTSO *tso) printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, tso->sp+64))); - IF_DEBUG(sanity,checkTSO(tso)); + unlockTSO(dest); + unlockTSO(tso); + + IF_DEBUG(sanity,checkTSO(dest)); #if 0 IF_DEBUG(scheduler,printTSO(dest)); #endif @@ -3133,648 +2816,75 @@ threadStackOverflow(Capability *cap, StgTSO *tso) } /* --------------------------------------------------------------------------- - Wake up a queue that was blocked on some resource. + Interrupt execution + - usually called inside a signal handler so it mustn't do anything fancy. ------------------------------------------------------------------------ */ -#if defined(GRAN) -STATIC_INLINE void -unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node ) -{ -} -#elif defined(PARALLEL_HASKELL) -STATIC_INLINE void -unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node ) -{ - /* write RESUME events to log file and - update blocked and fetch time (depending on type of the orig closure) */ - if (RtsFlags.ParFlags.ParStats.Full) { - DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC, - GR_RESUMEQ, ((StgTSO *)bqe), ((StgTSO *)bqe)->block_info.closure, - 0, 0 /* spark_queue_len(ADVISORY_POOL) */); - if (emptyRunQueue()) - emitSchedule = rtsTrue; - - switch (get_itbl(node)->type) { - case FETCH_ME_BQ: - ((StgTSO *)bqe)->par.fetchtime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat; - break; - case RBH: - case FETCH_ME: - case BLACKHOLE_BQ: - ((StgTSO *)bqe)->par.blocktime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat; - break; -#ifdef DIST - case MVAR: - break; -#endif - default: - barf("{unblockOne}Daq Qagh: unexpected closure in blocking queue"); - } - } -} -#endif - -#if defined(GRAN) -StgBlockingQueueElement * -unblockOne(StgBlockingQueueElement *bqe, StgClosure *node) +void +interruptStgRts(void) { - StgTSO *tso; - PEs node_loc, tso_loc; - - node_loc = where_is(node); // should be lifted out of loop - tso = (StgTSO *)bqe; // wastes an assignment to get the type right - tso_loc = where_is((StgClosure *)tso); - if (IS_LOCAL_TO(PROCS(node),tso_loc)) { // TSO is local - /* !fake_fetch => TSO is on CurrentProc is same as IS_LOCAL_TO */ - ASSERT(CurrentProc!=node_loc || tso_loc==CurrentProc); - CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.lunblocktime; - // insertThread(tso, node_loc); - new_event(tso_loc, tso_loc, CurrentTime[CurrentProc], - ResumeThread, - tso, node, (rtsSpark*)NULL); - tso->link = END_TSO_QUEUE; // overwrite link just to be sure - // len_local++; - // len++; - } else { // TSO is remote (actually should be FMBQ) - CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.mpacktime + - RtsFlags.GranFlags.Costs.gunblocktime + - RtsFlags.GranFlags.Costs.latency; - new_event(tso_loc, CurrentProc, CurrentTime[CurrentProc], - UnblockThread, - tso, node, (rtsSpark*)NULL); - tso->link = END_TSO_QUEUE; // overwrite link just to be sure - // len++; - } - /* the thread-queue-overhead is accounted for in either Resume or UnblockThread */ - IF_GRAN_DEBUG(bq, - debugBelch(" %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,", - (node_loc==tso_loc ? "Local" : "Global"), - tso->id, tso, CurrentProc, tso->block_info.closure, tso->link)); - tso->block_info.closure = NULL; - IF_DEBUG(scheduler,debugBelch("-- Waking up thread %ld (%p)\n", - tso->id, tso)); + sched_state = SCHED_INTERRUPTING; + context_switch = 1; + wakeUpRts(); } -#elif defined(PARALLEL_HASKELL) -StgBlockingQueueElement * -unblockOne(StgBlockingQueueElement *bqe, StgClosure *node) -{ - StgBlockingQueueElement *next; - - switch (get_itbl(bqe)->type) { - case TSO: - ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked); - /* if it's a TSO just push it onto the run_queue */ - next = bqe->link; - ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging? - APPEND_TO_RUN_QUEUE((StgTSO *)bqe); - threadRunnable(); - unblockCount(bqe, node); - /* reset blocking status after dumping event */ - ((StgTSO *)bqe)->why_blocked = NotBlocked; - break; - - case BLOCKED_FETCH: - /* if it's a BLOCKED_FETCH put it on the PendingFetches list */ - next = bqe->link; - bqe->link = (StgBlockingQueueElement *)PendingFetches; - PendingFetches = (StgBlockedFetch *)bqe; - break; -# if defined(DEBUG) - /* can ignore this case in a non-debugging setup; - see comments on RBHSave closures above */ - case CONSTR: - /* check that the closure is an RBHSave closure */ - ASSERT(get_itbl((StgClosure *)bqe) == &stg_RBH_Save_0_info || - get_itbl((StgClosure *)bqe) == &stg_RBH_Save_1_info || - get_itbl((StgClosure *)bqe) == &stg_RBH_Save_2_info); - break; - - default: - barf("{unblockOne}Daq Qagh: Unexpected IP (%#lx; %s) in blocking queue at %#lx\n", - get_itbl((StgClosure *)bqe), info_type((StgClosure *)bqe), - (StgClosure *)bqe); -# endif - } - IF_PAR_DEBUG(bq, debugBelch(", %p (%s)\n", bqe, info_type((StgClosure*)bqe))); - return next; -} -#endif +/* ----------------------------------------------------------------------------- + Wake up the RTS + + This function causes at least one OS thread to wake up and run the + scheduler loop. It is invoked when the RTS might be deadlocked, or + an external event has arrived that may need servicing (eg. a + keyboard interrupt). + + In the single-threaded RTS we don't do anything here; we only have + one thread anyway, and the event that caused us to want to wake up + will have interrupted any blocking system call in progress anyway. + -------------------------------------------------------------------------- */ -StgTSO * -unblockOne(Capability *cap, StgTSO *tso) +void +wakeUpRts(void) { - StgTSO *next; - - ASSERT(get_itbl(tso)->type == TSO); - ASSERT(tso->why_blocked != NotBlocked); - - tso->why_blocked = NotBlocked; - next = tso->link; - tso->link = END_TSO_QUEUE; - #if defined(THREADED_RTS) - if (tso->cap == cap || (!tsoLocked(tso) && RtsFlags.ParFlags.wakeupMigrate)) { - // We are waking up this thread on the current Capability, which - // might involve migrating it from the Capability it was last on. - if (tso->bound) { - ASSERT(tso->bound->cap == tso->cap); - tso->bound->cap = cap; - } - tso->cap = cap; - appendToRunQueue(cap,tso); - // we're holding a newly woken thread, make sure we context switch - // quickly so we can migrate it if necessary. - context_switch = 1; - } else { - // we'll try to wake it up on the Capability it was last on. - wakeupThreadOnCapability(tso->cap, tso); - } +#if !defined(mingw32_HOST_OS) + // This forces the IO Manager thread to wakeup, which will + // in turn ensure that some OS thread wakes up and runs the + // scheduler loop, which will cause a GC and deadlock check. + ioManagerWakeup(); #else - appendToRunQueue(cap,tso); - context_switch = 1; + // On Windows this might be safe enough, because we aren't + // in a signal handler. Later we should use the IO Manager, + // though. + prodOneCapability(); +#endif #endif - - IF_DEBUG(scheduler,sched_belch("waking up thread %ld on cap %d", (long)tso->id, tso->cap->no)); - return next; } +/* ----------------------------------------------------------------------------- + * checkBlackHoles() + * + * Check the blackhole_queue for threads that can be woken up. We do + * this periodically: before every GC, and whenever the run queue is + * empty. + * + * An elegant solution might be to just wake up all the blocked + * threads with awakenBlockedQueue occasionally: they'll go back to + * sleep again if the object is still a BLACKHOLE. Unfortunately this + * doesn't give us a way to tell whether we've actually managed to + * wake up any threads, so we would be busy-waiting. + * + * -------------------------------------------------------------------------- */ -#if defined(GRAN) -void -awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node) +static rtsBool +checkBlackHoles (Capability *cap) { - StgBlockingQueueElement *bqe; - PEs node_loc; - nat len = 0; - - IF_GRAN_DEBUG(bq, - debugBelch("##-_ AwBQ for node %p on PE %d @ %ld by TSO %d (%p): \n", \ - node, CurrentProc, CurrentTime[CurrentProc], - CurrentTSO->id, CurrentTSO)); + StgTSO **prev, *t; + rtsBool any_woke_up = rtsFalse; + StgHalfWord type; - node_loc = where_is(node); + // blackhole_queue is global: + ASSERT_LOCK_HELD(&sched_mutex); - ASSERT(q == END_BQ_QUEUE || - get_itbl(q)->type == TSO || // q is either a TSO or an RBHSave - get_itbl(q)->type == CONSTR); // closure (type constructor) - ASSERT(is_unique(node)); - - /* FAKE FETCH: magically copy the node to the tso's proc; - no Fetch necessary because in reality the node should not have been - moved to the other PE in the first place - */ - if (CurrentProc!=node_loc) { - IF_GRAN_DEBUG(bq, - debugBelch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)\n", - node, node_loc, CurrentProc, CurrentTSO->id, - // CurrentTSO, where_is(CurrentTSO), - node->header.gran.procs)); - node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc); - IF_GRAN_DEBUG(bq, - debugBelch("## new bitmask of node %p is %#x\n", - node, node->header.gran.procs)); - if (RtsFlags.GranFlags.GranSimStats.Global) { - globalGranStats.tot_fake_fetches++; - } - } - - bqe = q; - // ToDo: check: ASSERT(CurrentProc==node_loc); - while (get_itbl(bqe)->type==TSO) { // q != END_TSO_QUEUE) { - //next = bqe->link; - /* - bqe points to the current element in the queue - next points to the next element in the queue - */ - //tso = (StgTSO *)bqe; // wastes an assignment to get the type right - //tso_loc = where_is(tso); - len++; - bqe = unblockOne(bqe, node); - } - - /* if this is the BQ of an RBH, we have to put back the info ripped out of - the closure to make room for the anchor of the BQ */ - if (bqe!=END_BQ_QUEUE) { - ASSERT(get_itbl(node)->type == RBH && get_itbl(bqe)->type == CONSTR); - /* - ASSERT((info_ptr==&RBH_Save_0_info) || - (info_ptr==&RBH_Save_1_info) || - (info_ptr==&RBH_Save_2_info)); - */ - /* cf. convertToRBH in RBH.c for writing the RBHSave closure */ - ((StgRBH *)node)->blocking_queue = (StgBlockingQueueElement *)((StgRBHSave *)bqe)->payload[0]; - ((StgRBH *)node)->mut_link = (StgMutClosure *)((StgRBHSave *)bqe)->payload[1]; - - IF_GRAN_DEBUG(bq, - debugBelch("## Filled in RBH_Save for %p (%s) at end of AwBQ\n", - node, info_type(node))); - } - - /* statistics gathering */ - if (RtsFlags.GranFlags.GranSimStats.Global) { - // globalGranStats.tot_bq_processing_time += bq_processing_time; - globalGranStats.tot_bq_len += len; // total length of all bqs awakened - // globalGranStats.tot_bq_len_local += len_local; // same for local TSOs only - globalGranStats.tot_awbq++; // total no. of bqs awakened - } - IF_GRAN_DEBUG(bq, - debugBelch("## BQ Stats of %p: [%d entries] %s\n", - node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : "")); -} -#elif defined(PARALLEL_HASKELL) -void -awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node) -{ - StgBlockingQueueElement *bqe; - - IF_PAR_DEBUG(verbose, - debugBelch("##-_ AwBQ for node %p on [%x]: \n", - node, mytid)); -#ifdef DIST - //RFP - if(get_itbl(q)->type == CONSTR || q==END_BQ_QUEUE) { - IF_PAR_DEBUG(verbose, debugBelch("## ... nothing to unblock so lets just return. RFP (BUG?)\n")); - return; - } -#endif - - ASSERT(q == END_BQ_QUEUE || - get_itbl(q)->type == TSO || - get_itbl(q)->type == BLOCKED_FETCH || - get_itbl(q)->type == CONSTR); - - bqe = q; - while (get_itbl(bqe)->type==TSO || - get_itbl(bqe)->type==BLOCKED_FETCH) { - bqe = unblockOne(bqe, node); - } -} - -#else /* !GRAN && !PARALLEL_HASKELL */ - -void -awakenBlockedQueue(Capability *cap, StgTSO *tso) -{ - if (tso == NULL) return; // hack; see bug #1235728, and comments in - // Exception.cmm - while (tso != END_TSO_QUEUE) { - tso = unblockOne(cap,tso); - } -} -#endif - -/* --------------------------------------------------------------------------- - Interrupt execution - - usually called inside a signal handler so it mustn't do anything fancy. - ------------------------------------------------------------------------ */ - -void -interruptStgRts(void) -{ - sched_state = SCHED_INTERRUPTING; - context_switch = 1; -#if defined(THREADED_RTS) - prodAllCapabilities(); -#endif -} - -/* ----------------------------------------------------------------------------- - Unblock a thread - - This is for use when we raise an exception in another thread, which - may be blocked. - This has nothing to do with the UnblockThread event in GranSim. -- HWL - -------------------------------------------------------------------------- */ - -#if defined(GRAN) || defined(PARALLEL_HASKELL) -/* - NB: only the type of the blocking queue is different in GranSim and GUM - the operations on the queue-elements are the same - long live polymorphism! - - Locks: sched_mutex is held upon entry and exit. - -*/ -static void -unblockThread(Capability *cap, StgTSO *tso) -{ - StgBlockingQueueElement *t, **last; - - switch (tso->why_blocked) { - - case NotBlocked: - return; /* not blocked */ - - case BlockedOnSTM: - // Be careful: nothing to do here! We tell the scheduler that the thread - // is runnable and we leave it to the stack-walking code to abort the - // transaction while unwinding the stack. We should perhaps have a debugging - // test to make sure that this really happens and that the 'zombie' transaction - // does not get committed. - goto done; - - case BlockedOnMVar: - ASSERT(get_itbl(tso->block_info.closure)->type == MVAR); - { - StgBlockingQueueElement *last_tso = END_BQ_QUEUE; - StgMVar *mvar = (StgMVar *)(tso->block_info.closure); - - last = (StgBlockingQueueElement **)&mvar->head; - for (t = (StgBlockingQueueElement *)mvar->head; - t != END_BQ_QUEUE; - last = &t->link, last_tso = t, t = t->link) { - if (t == (StgBlockingQueueElement *)tso) { - *last = (StgBlockingQueueElement *)tso->link; - if (mvar->tail == tso) { - mvar->tail = (StgTSO *)last_tso; - } - goto done; - } - } - barf("unblockThread (MVAR): TSO not found"); - } - - case BlockedOnBlackHole: - ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ); - { - StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure); - - last = &bq->blocking_queue; - for (t = bq->blocking_queue; - t != END_BQ_QUEUE; - last = &t->link, t = t->link) { - if (t == (StgBlockingQueueElement *)tso) { - *last = (StgBlockingQueueElement *)tso->link; - goto done; - } - } - barf("unblockThread (BLACKHOLE): TSO not found"); - } - - case BlockedOnException: - { - StgTSO *target = tso->block_info.tso; - - ASSERT(get_itbl(target)->type == TSO); - - if (target->what_next == ThreadRelocated) { - target = target->link; - ASSERT(get_itbl(target)->type == TSO); - } - - ASSERT(target->blocked_exceptions != NULL); - - last = (StgBlockingQueueElement **)&target->blocked_exceptions; - for (t = (StgBlockingQueueElement *)target->blocked_exceptions; - t != END_BQ_QUEUE; - last = &t->link, t = t->link) { - ASSERT(get_itbl(t)->type == TSO); - if (t == (StgBlockingQueueElement *)tso) { - *last = (StgBlockingQueueElement *)tso->link; - goto done; - } - } - barf("unblockThread (Exception): TSO not found"); - } - - case BlockedOnRead: - case BlockedOnWrite: -#if defined(mingw32_HOST_OS) - case BlockedOnDoProc: -#endif - { - /* take TSO off blocked_queue */ - StgBlockingQueueElement *prev = NULL; - for (t = (StgBlockingQueueElement *)blocked_queue_hd; t != END_BQ_QUEUE; - prev = t, t = t->link) { - if (t == (StgBlockingQueueElement *)tso) { - if (prev == NULL) { - blocked_queue_hd = (StgTSO *)t->link; - if ((StgBlockingQueueElement *)blocked_queue_tl == t) { - blocked_queue_tl = END_TSO_QUEUE; - } - } else { - prev->link = t->link; - if ((StgBlockingQueueElement *)blocked_queue_tl == t) { - blocked_queue_tl = (StgTSO *)prev; - } - } -#if defined(mingw32_HOST_OS) - /* (Cooperatively) signal that the worker thread should abort - * the request. - */ - abandonWorkRequest(tso->block_info.async_result->reqID); -#endif - goto done; - } - } - barf("unblockThread (I/O): TSO not found"); - } - - case BlockedOnDelay: - { - /* take TSO off sleeping_queue */ - StgBlockingQueueElement *prev = NULL; - for (t = (StgBlockingQueueElement *)sleeping_queue; t != END_BQ_QUEUE; - prev = t, t = t->link) { - if (t == (StgBlockingQueueElement *)tso) { - if (prev == NULL) { - sleeping_queue = (StgTSO *)t->link; - } else { - prev->link = t->link; - } - goto done; - } - } - barf("unblockThread (delay): TSO not found"); - } - - default: - barf("unblockThread"); - } - - done: - tso->link = END_TSO_QUEUE; - tso->why_blocked = NotBlocked; - tso->block_info.closure = NULL; - pushOnRunQueue(cap,tso); -} -#else -static void -unblockThread(Capability *cap, StgTSO *tso) -{ - StgTSO *t, **last; - - /* To avoid locking unnecessarily. */ - if (tso->why_blocked == NotBlocked) { - return; - } - - switch (tso->why_blocked) { - - case BlockedOnSTM: - // Be careful: nothing to do here! We tell the scheduler that the thread - // is runnable and we leave it to the stack-walking code to abort the - // transaction while unwinding the stack. We should perhaps have a debugging - // test to make sure that this really happens and that the 'zombie' transaction - // does not get committed. - goto done; - - case BlockedOnMVar: - ASSERT(get_itbl(tso->block_info.closure)->type == MVAR); - { - StgTSO *last_tso = END_TSO_QUEUE; - StgMVar *mvar = (StgMVar *)(tso->block_info.closure); - - last = &mvar->head; - for (t = mvar->head; t != END_TSO_QUEUE; - last = &t->link, last_tso = t, t = t->link) { - if (t == tso) { - *last = tso->link; - if (mvar->tail == tso) { - mvar->tail = last_tso; - } - goto done; - } - } - barf("unblockThread (MVAR): TSO not found"); - } - - case BlockedOnBlackHole: - { - last = &blackhole_queue; - for (t = blackhole_queue; t != END_TSO_QUEUE; - last = &t->link, t = t->link) { - if (t == tso) { - *last = tso->link; - goto done; - } - } - barf("unblockThread (BLACKHOLE): TSO not found"); - } - - case BlockedOnException: - { - StgTSO *target = tso->block_info.tso; - - ASSERT(get_itbl(target)->type == TSO); - - while (target->what_next == ThreadRelocated) { - target = target->link; - ASSERT(get_itbl(target)->type == TSO); - } - - ASSERT(target->blocked_exceptions != NULL); - - last = &target->blocked_exceptions; - for (t = target->blocked_exceptions; t != END_TSO_QUEUE; - last = &t->link, t = t->link) { - ASSERT(get_itbl(t)->type == TSO); - if (t == tso) { - *last = tso->link; - goto done; - } - } - barf("unblockThread (Exception): TSO not found"); - } - -#if !defined(THREADED_RTS) - case BlockedOnRead: - case BlockedOnWrite: -#if defined(mingw32_HOST_OS) - case BlockedOnDoProc: -#endif - { - StgTSO *prev = NULL; - for (t = blocked_queue_hd; t != END_TSO_QUEUE; - prev = t, t = t->link) { - if (t == tso) { - if (prev == NULL) { - blocked_queue_hd = t->link; - if (blocked_queue_tl == t) { - blocked_queue_tl = END_TSO_QUEUE; - } - } else { - prev->link = t->link; - if (blocked_queue_tl == t) { - blocked_queue_tl = prev; - } - } -#if defined(mingw32_HOST_OS) - /* (Cooperatively) signal that the worker thread should abort - * the request. - */ - abandonWorkRequest(tso->block_info.async_result->reqID); -#endif - goto done; - } - } - barf("unblockThread (I/O): TSO not found"); - } - - case BlockedOnDelay: - { - StgTSO *prev = NULL; - for (t = sleeping_queue; t != END_TSO_QUEUE; - prev = t, t = t->link) { - if (t == tso) { - if (prev == NULL) { - sleeping_queue = t->link; - } else { - prev->link = t->link; - } - goto done; - } - } - barf("unblockThread (delay): TSO not found"); - } -#endif - - default: - barf("unblockThread"); - } - - done: - tso->link = END_TSO_QUEUE; - tso->why_blocked = NotBlocked; - tso->block_info.closure = NULL; - appendToRunQueue(cap,tso); - - // We might have just migrated this TSO to our Capability: - if (tso->bound) { - tso->bound->cap = cap; - } - tso->cap = cap; -} -#endif - -/* ----------------------------------------------------------------------------- - * checkBlackHoles() - * - * Check the blackhole_queue for threads that can be woken up. We do - * this periodically: before every GC, and whenever the run queue is - * empty. - * - * An elegant solution might be to just wake up all the blocked - * threads with awakenBlockedQueue occasionally: they'll go back to - * sleep again if the object is still a BLACKHOLE. Unfortunately this - * doesn't give us a way to tell whether we've actually managed to - * wake up any threads, so we would be busy-waiting. - * - * -------------------------------------------------------------------------- */ - -static rtsBool -checkBlackHoles (Capability *cap) -{ - StgTSO **prev, *t; - rtsBool any_woke_up = rtsFalse; - StgHalfWord type; - - // blackhole_queue is global: - ASSERT_LOCK_HELD(&sched_mutex); - - IF_DEBUG(scheduler, sched_belch("checking threads blocked on black holes")); + debugTrace(DEBUG_sched, "checking threads blocked on black holes"); // ASSUMES: sched_mutex prev = &blackhole_queue; @@ -3799,263 +2909,6 @@ checkBlackHoles (Capability *cap) } /* ----------------------------------------------------------------------------- - * raiseAsync() - * - * The following function implements the magic for raising an - * asynchronous exception in an existing thread. - * - * We first remove the thread from any queue on which it might be - * blocked. The possible blockages are MVARs and BLACKHOLE_BQs. - * - * We strip the stack down to the innermost CATCH_FRAME, building - * thunks in the heap for all the active computations, so they can - * be restarted if necessary. When we reach a CATCH_FRAME, we build - * an application of the handler to the exception, and push it on - * the top of the stack. - * - * How exactly do we save all the active computations? We create an - * AP_STACK for every UpdateFrame on the stack. Entering one of these - * AP_STACKs pushes everything from the corresponding update frame - * upwards onto the stack. (Actually, it pushes everything up to the - * next update frame plus a pointer to the next AP_STACK object. - * Entering the next AP_STACK object pushes more onto the stack until we - * reach the last AP_STACK object - at which point the stack should look - * exactly as it did when we killed the TSO and we can continue - * execution by entering the closure on top of the stack. - * - * We can also kill a thread entirely - this happens if either (a) the - * exception passed to raiseAsync is NULL, or (b) there's no - * CATCH_FRAME on the stack. In either case, we strip the entire - * stack and replace the thread with a zombie. - * - * ToDo: in THREADED_RTS mode, this function is only safe if either - * (a) we hold all the Capabilities (eg. in GC, or if there is only - * one Capability), or (b) we own the Capability that the TSO is - * currently blocked on or on the run queue of. - * - * -------------------------------------------------------------------------- */ - -void -raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception) -{ - raiseAsync_(cap, tso, exception, rtsFalse, NULL); -} - -void -suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here) -{ - raiseAsync_(cap, tso, NULL, rtsFalse, stop_here); -} - -static void -raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception, - rtsBool stop_at_atomically, StgPtr stop_here) -{ - StgRetInfoTable *info; - StgPtr sp, frame; - nat i; - - // Thread already dead? - if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) { - return; - } - - IF_DEBUG(scheduler, - sched_belch("raising exception in thread %ld.", (long)tso->id)); - - // Remove it from any blocking queues - unblockThread(cap,tso); - - // mark it dirty; we're about to change its stack. - dirtyTSO(tso); - - sp = tso->sp; - - // The stack freezing code assumes there's a closure pointer on - // the top of the stack, so we have to arrange that this is the case... - // - if (sp[0] == (W_)&stg_enter_info) { - sp++; - } else { - sp--; - sp[0] = (W_)&stg_dummy_ret_closure; - } - - frame = sp + 1; - while (stop_here == NULL || frame < stop_here) { - - // 1. Let the top of the stack be the "current closure" - // - // 2. Walk up the stack until we find either an UPDATE_FRAME or a - // CATCH_FRAME. - // - // 3. If it's an UPDATE_FRAME, then make an AP_STACK containing the - // current closure applied to the chunk of stack up to (but not - // including) the update frame. This closure becomes the "current - // closure". Go back to step 2. - // - // 4. If it's a CATCH_FRAME, then leave the exception handler on - // top of the stack applied to the exception. - // - // 5. If it's a STOP_FRAME, then kill the thread. - // - // NB: if we pass an ATOMICALLY_FRAME then abort the associated - // transaction - - info = get_ret_itbl((StgClosure *)frame); - - switch (info->i.type) { - - case UPDATE_FRAME: - { - StgAP_STACK * ap; - nat words; - - // First build an AP_STACK consisting of the stack chunk above the - // current update frame, with the top word on the stack as the - // fun field. - // - words = frame - sp - 1; - ap = (StgAP_STACK *)allocateLocal(cap,AP_STACK_sizeW(words)); - - ap->size = words; - ap->fun = (StgClosure *)sp[0]; - sp++; - for(i=0; i < (nat)words; ++i) { - ap->payload[i] = (StgClosure *)*sp++; - } - - SET_HDR(ap,&stg_AP_STACK_info, - ((StgClosure *)frame)->header.prof.ccs /* ToDo */); - TICK_ALLOC_UP_THK(words+1,0); - - IF_DEBUG(scheduler, - debugBelch("sched: Updating "); - printPtr((P_)((StgUpdateFrame *)frame)->updatee); - debugBelch(" with "); - printObj((StgClosure *)ap); - ); - - // Replace the updatee with an indirection - // - // Warning: if we're in a loop, more than one update frame on - // the stack may point to the same object. Be careful not to - // overwrite an IND_OLDGEN in this case, because we'll screw - // up the mutable lists. To be on the safe side, don't - // overwrite any kind of indirection at all. See also - // threadSqueezeStack in GC.c, where we have to make a similar - // check. - // - if (!closure_IND(((StgUpdateFrame *)frame)->updatee)) { - // revert the black hole - UPD_IND_NOLOCK(((StgUpdateFrame *)frame)->updatee, - (StgClosure *)ap); - } - sp += sizeofW(StgUpdateFrame) - 1; - sp[0] = (W_)ap; // push onto stack - frame = sp + 1; - continue; //no need to bump frame - } - - case STOP_FRAME: - // We've stripped the entire stack, the thread is now dead. - tso->what_next = ThreadKilled; - tso->sp = frame + sizeofW(StgStopFrame); - return; - - case CATCH_FRAME: - // If we find a CATCH_FRAME, and we've got an exception to raise, - // then build the THUNK raise(exception), and leave it on - // top of the CATCH_FRAME ready to enter. - // - { -#ifdef PROFILING - StgCatchFrame *cf = (StgCatchFrame *)frame; -#endif - StgThunk *raise; - - if (exception == NULL) break; - - // we've got an exception to raise, so let's pass it to the - // handler in this frame. - // - raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1); - TICK_ALLOC_SE_THK(1,0); - SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs); - raise->payload[0] = exception; - - // throw away the stack from Sp up to the CATCH_FRAME. - // - sp = frame - 1; - - /* Ensure that async excpetions are blocked now, so we don't get - * a surprise exception before we get around to executing the - * handler. - */ - if (tso->blocked_exceptions == NULL) { - tso->blocked_exceptions = END_TSO_QUEUE; - } - - /* Put the newly-built THUNK on top of the stack, ready to execute - * when the thread restarts. - */ - sp[0] = (W_)raise; - sp[-1] = (W_)&stg_enter_info; - tso->sp = sp-1; - tso->what_next = ThreadRunGHC; - IF_DEBUG(sanity, checkTSO(tso)); - return; - } - - case ATOMICALLY_FRAME: - if (stop_at_atomically) { - ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC); - stmCondemnTransaction(cap, tso -> trec); -#ifdef REG_R1 - tso->sp = frame; -#else - // R1 is not a register: the return convention for IO in - // this case puts the return value on the stack, so we - // need to set up the stack to return to the atomically - // frame properly... - tso->sp = frame - 2; - tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not? - tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info; -#endif - tso->what_next = ThreadRunGHC; - return; - } - // Not stop_at_atomically... fall through and abort the - // transaction. - - case CATCH_RETRY_FRAME: - // IF we find an ATOMICALLY_FRAME then we abort the - // current transaction and propagate the exception. In - // this case (unlike ordinary exceptions) we do not care - // whether the transaction is valid or not because its - // possible validity cannot have caused the exception - // and will not be visible after the abort. - IF_DEBUG(stm, - debugBelch("Found atomically block delivering async exception\n")); - StgTRecHeader *trec = tso -> trec; - StgTRecHeader *outer = stmGetEnclosingTRec(trec); - stmAbortTransaction(cap, trec); - tso -> trec = outer; - break; - - default: - break; - } - - // move on to the next stack frame - frame += stack_frame_sizeW((StgClosure *)frame); - } - - // if we got here, then we stopped at stop_here - ASSERT(stop_here != NULL); -} - -/* ----------------------------------------------------------------------------- Deleting threads This is used for interruption (^C) and forking, and corresponds to @@ -4066,10 +2919,15 @@ raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception, static void deleteThread (Capability *cap, StgTSO *tso) { - if (tso->why_blocked != BlockedOnCCall && - tso->why_blocked != BlockedOnCCall_NoUnblockExc) { - raiseAsync(cap,tso,NULL); - } + // NOTE: must only be called on a TSO that we have exclusive + // access to, because we will call throwToSingleThreaded() below. + // The TSO must be on the run queue of the Capability we own, or + // we must own all Capabilities. + + if (tso->why_blocked != BlockedOnCCall && + tso->why_blocked != BlockedOnCCall_NoUnblockExc) { + throwToSingleThreaded(cap,tso,NULL); + } } #ifdef FORKPROCESS_PRIMOP_SUPPORTED @@ -4146,7 +3004,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception) continue; case ATOMICALLY_FRAME: - IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p\n", p)); + debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p); tso->sp = p; return ATOMICALLY_FRAME; @@ -4155,7 +3013,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception) return CATCH_FRAME; case CATCH_STM_FRAME: - IF_DEBUG(stm, debugBelch("Found CATCH_STM_FRAME at %p\n", p)); + debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p); tso->sp = p; return CATCH_STM_FRAME; @@ -4201,14 +3059,16 @@ findRetryFrameHelper (StgTSO *tso) switch (info->i.type) { case ATOMICALLY_FRAME: - IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p during retrry\n", p)); - tso->sp = p; - return ATOMICALLY_FRAME; + debugTrace(DEBUG_stm, + "found ATOMICALLY_FRAME at %p during retrry", p); + tso->sp = p; + return ATOMICALLY_FRAME; case CATCH_RETRY_FRAME: - IF_DEBUG(stm, debugBelch("Found CATCH_RETRY_FRAME at %p during retrry\n", p)); - tso->sp = p; - return CATCH_RETRY_FRAME; + debugTrace(DEBUG_stm, + "found CATCH_RETRY_FRAME at %p during retrry", p); + tso->sp = p; + return CATCH_RETRY_FRAME; case CATCH_STM_FRAME: default: @@ -4240,7 +3100,7 @@ resurrectThreads (StgTSO *threads) next = tso->global_link; tso->global_link = all_threads; all_threads = tso; - IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id)); + debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id); // Wake up the thread on the Capability it was last on cap = tso->cap; @@ -4249,13 +3109,16 @@ resurrectThreads (StgTSO *threads) case BlockedOnMVar: case BlockedOnException: /* Called by GC - sched_mutex lock is currently held. */ - raiseAsync(cap, tso,(StgClosure *)BlockedOnDeadMVar_closure); + throwToSingleThreaded(cap, tso, + (StgClosure *)BlockedOnDeadMVar_closure); break; case BlockedOnBlackHole: - raiseAsync(cap, tso,(StgClosure *)NonTermination_closure); + throwToSingleThreaded(cap, tso, + (StgClosure *)NonTermination_closure); break; case BlockedOnSTM: - raiseAsync(cap, tso,(StgClosure *)BlockedIndefinitely_closure); + throwToSingleThreaded(cap, tso, + (StgClosure *)BlockedIndefinitely_closure); break; case NotBlocked: /* This might happen if the thread was blocked on a black hole @@ -4268,315 +3131,3 @@ resurrectThreads (StgTSO *threads) } } } - -/* ---------------------------------------------------------------------------- - * Debugging: why is a thread blocked - * [Also provides useful information when debugging threaded programs - * at the Haskell source code level, so enable outside of DEBUG. --sof 7/02] - ------------------------------------------------------------------------- */ - -#if DEBUG -static void -printThreadBlockage(StgTSO *tso) -{ - switch (tso->why_blocked) { - case BlockedOnRead: - debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd)); - break; - case BlockedOnWrite: - debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd)); - break; -#if defined(mingw32_HOST_OS) - case BlockedOnDoProc: - debugBelch("is blocked on proc (request: %ld)", tso->block_info.async_result->reqID); - break; -#endif - case BlockedOnDelay: - debugBelch("is blocked until %ld", (long)(tso->block_info.target)); - break; - case BlockedOnMVar: - debugBelch("is blocked on an MVar @ %p", tso->block_info.closure); - break; - case BlockedOnException: - debugBelch("is blocked on delivering an exception to thread %d", - tso->block_info.tso->id); - break; - case BlockedOnBlackHole: - debugBelch("is blocked on a black hole"); - break; - case NotBlocked: - debugBelch("is not blocked"); - break; -#if defined(PARALLEL_HASKELL) - case BlockedOnGA: - debugBelch("is blocked on global address; local FM_BQ is %p (%s)", - tso->block_info.closure, info_type(tso->block_info.closure)); - break; - case BlockedOnGA_NoSend: - debugBelch("is blocked on global address (no send); local FM_BQ is %p (%s)", - tso->block_info.closure, info_type(tso->block_info.closure)); - break; -#endif - case BlockedOnCCall: - debugBelch("is blocked on an external call"); - break; - case BlockedOnCCall_NoUnblockExc: - debugBelch("is blocked on an external call (exceptions were already blocked)"); - break; - case BlockedOnSTM: - debugBelch("is blocked on an STM operation"); - break; - default: - barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)", - tso->why_blocked, tso->id, tso); - } -} - -void -printThreadStatus(StgTSO *t) -{ - debugBelch("\tthread %4d @ %p ", t->id, (void *)t); - { - void *label = lookupThreadLabel(t->id); - if (label) debugBelch("[\"%s\"] ",(char *)label); - } - if (t->what_next == ThreadRelocated) { - debugBelch("has been relocated...\n"); - } else { - switch (t->what_next) { - case ThreadKilled: - debugBelch("has been killed"); - break; - case ThreadComplete: - debugBelch("has completed"); - break; - default: - printThreadBlockage(t); - } - debugBelch("\n"); - } -} - -void -printAllThreads(void) -{ - StgTSO *t, *next; - nat i; - Capability *cap; - -# if defined(GRAN) - char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN]; - ullong_format_string(TIME_ON_PROC(CurrentProc), - time_string, rtsFalse/*no commas!*/); - - debugBelch("all threads at [%s]:\n", time_string); -# elif defined(PARALLEL_HASKELL) - char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN]; - ullong_format_string(CURRENT_TIME, - time_string, rtsFalse/*no commas!*/); - - debugBelch("all threads at [%s]:\n", time_string); -# else - debugBelch("all threads:\n"); -# endif - - for (i = 0; i < n_capabilities; i++) { - cap = &capabilities[i]; - debugBelch("threads on capability %d:\n", cap->no); - for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->link) { - printThreadStatus(t); - } - } - - debugBelch("other threads:\n"); - for (t = all_threads; t != END_TSO_QUEUE; t = next) { - if (t->why_blocked != NotBlocked) { - printThreadStatus(t); - } - if (t->what_next == ThreadRelocated) { - next = t->link; - } else { - next = t->global_link; - } - } -} - -// useful from gdb -void -printThreadQueue(StgTSO *t) -{ - nat i = 0; - for (; t != END_TSO_QUEUE; t = t->link) { - printThreadStatus(t); - i++; - } - debugBelch("%d threads on queue\n", i); -} - -/* - Print a whole blocking queue attached to node (debugging only). -*/ -# if defined(PARALLEL_HASKELL) -void -print_bq (StgClosure *node) -{ - StgBlockingQueueElement *bqe; - StgTSO *tso; - rtsBool end; - - debugBelch("## BQ of closure %p (%s): ", - node, info_type(node)); - - /* should cover all closures that may have a blocking queue */ - ASSERT(get_itbl(node)->type == BLACKHOLE_BQ || - get_itbl(node)->type == FETCH_ME_BQ || - get_itbl(node)->type == RBH || - get_itbl(node)->type == MVAR); - - ASSERT(node!=(StgClosure*)NULL); // sanity check - - print_bqe(((StgBlockingQueue*)node)->blocking_queue); -} - -/* - Print a whole blocking queue starting with the element bqe. -*/ -void -print_bqe (StgBlockingQueueElement *bqe) -{ - rtsBool end; - - /* - NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure; - */ - for (end = (bqe==END_BQ_QUEUE); - !end; // iterate until bqe points to a CONSTR - end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), - bqe = end ? END_BQ_QUEUE : bqe->link) { - ASSERT(bqe != END_BQ_QUEUE); // sanity check - ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check - /* types of closures that may appear in a blocking queue */ - ASSERT(get_itbl(bqe)->type == TSO || - get_itbl(bqe)->type == BLOCKED_FETCH || - get_itbl(bqe)->type == CONSTR); - /* only BQs of an RBH end with an RBH_Save closure */ - //ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH); - - switch (get_itbl(bqe)->type) { - case TSO: - debugBelch(" TSO %u (%x),", - ((StgTSO *)bqe)->id, ((StgTSO *)bqe)); - break; - case BLOCKED_FETCH: - debugBelch(" BF (node=%p, ga=((%x, %d, %x)),", - ((StgBlockedFetch *)bqe)->node, - ((StgBlockedFetch *)bqe)->ga.payload.gc.gtid, - ((StgBlockedFetch *)bqe)->ga.payload.gc.slot, - ((StgBlockedFetch *)bqe)->ga.weight); - break; - case CONSTR: - debugBelch(" %s (IP %p),", - (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" : - get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" : - get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" : - "RBH_Save_?"), get_itbl(bqe)); - break; - default: - barf("Unexpected closure type %s in blocking queue", // of %p (%s)", - info_type((StgClosure *)bqe)); // , node, info_type(node)); - break; - } - } /* for */ - debugBelch("\n"); -} -# elif defined(GRAN) -void -print_bq (StgClosure *node) -{ - StgBlockingQueueElement *bqe; - PEs node_loc, tso_loc; - rtsBool end; - - /* should cover all closures that may have a blocking queue */ - ASSERT(get_itbl(node)->type == BLACKHOLE_BQ || - get_itbl(node)->type == FETCH_ME_BQ || - get_itbl(node)->type == RBH); - - ASSERT(node!=(StgClosure*)NULL); // sanity check - node_loc = where_is(node); - - debugBelch("## BQ of closure %p (%s) on [PE %d]: ", - node, info_type(node), node_loc); - - /* - NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure; - */ - for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE); - !end; // iterate until bqe points to a CONSTR - end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) { - ASSERT(bqe != END_BQ_QUEUE); // sanity check - ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check - /* types of closures that may appear in a blocking queue */ - ASSERT(get_itbl(bqe)->type == TSO || - get_itbl(bqe)->type == CONSTR); - /* only BQs of an RBH end with an RBH_Save closure */ - ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH); - - tso_loc = where_is((StgClosure *)bqe); - switch (get_itbl(bqe)->type) { - case TSO: - debugBelch(" TSO %d (%p) on [PE %d],", - ((StgTSO *)bqe)->id, (StgTSO *)bqe, tso_loc); - break; - case CONSTR: - debugBelch(" %s (IP %p),", - (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" : - get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" : - get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" : - "RBH_Save_?"), get_itbl(bqe)); - break; - default: - barf("Unexpected closure type %s in blocking queue of %p (%s)", - info_type((StgClosure *)bqe), node, info_type(node)); - break; - } - } /* for */ - debugBelch("\n"); -} -# endif - -#if defined(PARALLEL_HASKELL) -static nat -run_queue_len(void) -{ - nat i; - StgTSO *tso; - - for (i=0, tso=run_queue_hd; - tso != END_TSO_QUEUE; - i++, tso=tso->link) { - /* nothing */ - } - - return i; -} -#endif - -void -sched_belch(char *s, ...) -{ - va_list ap; - va_start(ap,s); -#ifdef THREADED_RTS - debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()); -#elif defined(PARALLEL_HASKELL) - debugBelch("== "); -#else - debugBelch("sched: "); -#endif - vdebugBelch(s, ap); - debugBelch("\n"); - va_end(ap); -} - -#endif /* DEBUG */