X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=bf39c0ac1436d803ba099733629b2b1e09edc621;hb=13b1fa907fd5d700167cc4da26668fb356d5ecfc;hp=72f6d44a8c8edcce961e5231975851bfa6fef34c;hpb=5d52d9b64c21dcf77849866584744722f8121389;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 72f6d44..bf39c0a 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -56,6 +56,9 @@ #include #endif +#ifdef TRACING +#include "eventlog/EventLog.h" +#endif /* ----------------------------------------------------------------------------- * Global variables * -------------------------------------------------------------------------- */ @@ -125,7 +128,7 @@ static Capability *schedule (Capability *initialCapability, Task *task); static void schedulePreLoop (void); static void scheduleFindWork (Capability *cap); #if defined(THREADED_RTS) -static void scheduleYield (Capability **pcap, Task *task, rtsBool); +static void scheduleYield (Capability **pcap, Task *task); #endif static void scheduleStartSignalHandlers (Capability *cap); static void scheduleCheckBlockedThreads (Capability *cap); @@ -158,17 +161,6 @@ static void deleteAllThreads (Capability *cap); static void deleteThread_(Capability *cap, StgTSO *tso); #endif -/* ----------------------------------------------------------------------------- - * Putting a thread on the run queue: different scheduling policies - * -------------------------------------------------------------------------- */ - -STATIC_INLINE void -addToRunQueue( Capability *cap, StgTSO *t ) -{ - // this does round-robin scheduling; good for concurrency - appendToRunQueue(cap,t); -} - /* --------------------------------------------------------------------------- Main scheduling loop. @@ -215,7 +207,6 @@ schedule (Capability *initialCapability, Task *task) rtsBool ready_to_gc; #if defined(THREADED_RTS) rtsBool first = rtsTrue; - rtsBool force_yield = rtsFalse; #endif cap = initialCapability; @@ -339,9 +330,7 @@ schedule (Capability *initialCapability, Task *task) // ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); } - yield: - scheduleYield(&cap,task,force_yield); - force_yield = rtsFalse; + scheduleYield(&cap,task); if (emptyRunQueue(cap)) continue; // look for work again #endif @@ -501,19 +490,6 @@ run_thread: traceEventStopThread(cap, t, ret); -#if defined(THREADED_RTS) - // If ret is ThreadBlocked, and this Task is bound to the TSO that - // blocked, we are in limbo - the TSO is now owned by whatever it - // is blocked on, and may in fact already have been woken up, - // perhaps even on a different Capability. It may be the case - // that task->cap != cap. We better yield this Capability - // immediately and return to normaility. - if (ret == ThreadBlocked) { - force_yield = rtsTrue; - goto yield; - } -#endif - ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); ASSERT(t->cap == cap); @@ -568,6 +544,30 @@ run_thread: } /* end of while() */ } +/* ----------------------------------------------------------------------------- + * Run queue operations + * -------------------------------------------------------------------------- */ + +void +removeFromRunQueue (Capability *cap, StgTSO *tso) +{ + if (tso->block_info.prev == END_TSO_QUEUE) { + ASSERT(cap->run_queue_hd == tso); + cap->run_queue_hd = tso->_link; + } else { + setTSOLink(cap, tso->block_info.prev, tso->_link); + } + if (tso->_link == END_TSO_QUEUE) { + ASSERT(cap->run_queue_tl == tso); + cap->run_queue_tl = tso->block_info.prev; + } else { + setTSOPrev(cap, tso->_link, tso->block_info.prev); + } + tso->_link = tso->block_info.prev = END_TSO_QUEUE; + + IF_DEBUG(sanity, checkRunQueue(cap)); +} + /* ---------------------------------------------------------------------------- * Setting up the scheduler loop * ------------------------------------------------------------------------- */ @@ -626,23 +626,13 @@ shouldYieldCapability (Capability *cap, Task *task) // and also check the benchmarks in nofib/parallel for regressions. static void -scheduleYield (Capability **pcap, Task *task, rtsBool force_yield) +scheduleYield (Capability **pcap, Task *task) { Capability *cap = *pcap; // if we have work, and we don't need to give up the Capability, continue. // - // The force_yield flag is used when a bound thread blocks. This - // is a particularly tricky situation: the current Task does not - // own the TSO any more, since it is on some queue somewhere, and - // might be woken up or manipulated by another thread at any time. - // The TSO and Task might be migrated to another Capability. - // Certain invariants might be in doubt, such as task->bound->cap - // == cap. We have to yield the current Capability immediately, - // no messing around. - // - if (!force_yield && - !shouldYieldCapability(cap,task) && + if (!shouldYieldCapability(cap,task) && (!emptyRunQueue(cap) || !emptyInbox(cap) || sched_state >= SCHED_INTERRUPTING)) @@ -743,12 +733,14 @@ schedulePushWork(Capability *cap USED_IF_THREADS, || t->bound == task->incall // don't move my bound thread || tsoLocked(t)) { // don't move a locked thread setTSOLink(cap, prev, t); + setTSOPrev(cap, t, prev); prev = t; } else if (i == n_free_caps) { pushed_to_all = rtsTrue; i = 0; // keep one for us setTSOLink(cap, prev, t); + setTSOPrev(cap, t, prev); prev = t; } else { appendToRunQueue(free_caps[i],t); @@ -761,6 +753,8 @@ schedulePushWork(Capability *cap USED_IF_THREADS, } } cap->run_queue_tl = prev; + + IF_DEBUG(sanity, checkRunQueue(cap)); } #ifdef SPARK_PUSHING @@ -1031,6 +1025,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE; + if (blocks > BLOCKS_PER_MBLOCK) { + barf("allocation of %ld bytes too large (GHC should have complained at compile-time)", (long)cap->r.rHpAlloc); + } + debugTrace(DEBUG_sched, "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", (long)t->id, what_next_strs[t->what_next], blocks); @@ -1040,10 +1038,8 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop // if the nursery has only one block. - ACQUIRE_SM_LOCK - bd = allocGroup( blocks ); - RELEASE_SM_LOCK - cap->r.rNursery->n_blocks += blocks; + bd = allocGroup_lock(blocks); + cap->r.rNursery->n_blocks += blocks; // link the new group into the list bd->link = cap->r.rCurrentNursery; @@ -1093,7 +1089,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t ) // context switch flag, and we end up waiting for a GC. // See #1984, and concurrent/should_run/1984 cap->context_switch = 0; - addToRunQueue(cap,t); + appendToRunQueue(cap,t); } else { pushOnRunQueue(cap,t); } @@ -1162,7 +1158,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next ) //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id); checkTSO(t)); - addToRunQueue(cap,t); + appendToRunQueue(cap,t); return rtsFalse; } @@ -1244,23 +1240,23 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t) ASSERT(task->incall->tso == t); if (t->what_next == ThreadComplete) { - if (task->ret) { + if (task->incall->ret) { // NOTE: return val is tso->sp[1] (see StgStartup.hc) - *(task->ret) = (StgClosure *)task->incall->tso->sp[1]; + *(task->incall->ret) = (StgClosure *)task->incall->tso->sp[1]; } - task->stat = Success; + task->incall->stat = Success; } else { - if (task->ret) { - *(task->ret) = NULL; + if (task->incall->ret) { + *(task->incall->ret) = NULL; } if (sched_state >= SCHED_INTERRUPTING) { if (heap_overflow) { - task->stat = HeapExhausted; + task->incall->stat = HeapExhausted; } else { - task->stat = Interrupted; + task->incall->stat = Interrupted; } } else { - task->stat = Killed; + task->incall->stat = Killed; } } #ifdef DEBUG @@ -1542,10 +1538,18 @@ forkProcess(HsStablePtr *entry ACQUIRE_LOCK(&cap->lock); ACQUIRE_LOCK(&cap->running_task->lock); + stopTimer(); // See #4074 + +#if defined(TRACING) + flushEventLog(); // so that child won't inherit dirty file buffers +#endif + pid = fork(); if (pid) { // parent + startTimer(); // #4074 + RELEASE_LOCK(&sched_mutex); RELEASE_LOCK(&cap->lock); RELEASE_LOCK(&cap->running_task->lock); @@ -1562,6 +1566,10 @@ forkProcess(HsStablePtr *entry initMutex(&cap->running_task->lock); #endif +#if defined(TRACING) + abortEventLogging(); // abort eventlog inherited from parent + initEventLogging(); // child starts its own eventlog +#endif // Now, all OS threads except the thread that forked are // stopped. We need to stop all Haskell threads, including // those involved in foreign calls. Also we need to delete @@ -1612,7 +1620,8 @@ forkProcess(HsStablePtr *entry // Wipe our spare workers list, they no longer exist. New // workers will be created if necessary. cap->spare_workers = NULL; - cap->returning_tasks_hd = NULL; + cap->n_spare_workers = 0; + cap->returning_tasks_hd = NULL; cap->returning_tasks_tl = NULL; #endif @@ -1721,13 +1730,17 @@ recoverSuspendedTask (Capability *cap, Task *task) * the whole system. * * The Haskell thread making the C call is put to sleep for the - * duration of the call, on the susepended_ccalling_threads queue. We + * duration of the call, on the suspended_ccalling_threads queue. We * give out a token to the task, which it can use to resume the thread * on return from the C function. + * + * If this is an interruptible C call, this means that the FFI call may be + * unceremoniously terminated and should be scheduled on an + * unbound worker thread. * ------------------------------------------------------------------------- */ void * -suspendThread (StgRegTable *reg) +suspendThread (StgRegTable *reg, rtsBool interruptible) { Capability *cap; int saved_errno; @@ -1756,12 +1769,10 @@ suspendThread (StgRegTable *reg) threadPaused(cap,tso); - if ((tso->flags & TSO_BLOCKEX) == 0) { - tso->why_blocked = BlockedOnCCall; - tso->flags |= TSO_BLOCKEX; - tso->flags &= ~TSO_INTERRUPTIBLE; + if (interruptible) { + tso->why_blocked = BlockedOnCCall_Interruptible; } else { - tso->why_blocked = BlockedOnCCall_NoUnblockExc; + tso->why_blocked = BlockedOnCCall; } // Hand back capability @@ -1820,17 +1831,16 @@ resumeThread (void *task_) traceEventRunThread(cap, tso); - if (tso->why_blocked == BlockedOnCCall) { + /* Reset blocking status */ + tso->why_blocked = NotBlocked; + + if ((tso->flags & TSO_BLOCKEX) == 0) { // avoid locking the TSO if we don't have to if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) { - awakenBlockedExceptionQueue(cap,tso); + maybePerformBlockedException(cap,tso); } - tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE); } - /* Reset blocking status */ - tso->why_blocked = NotBlocked; - cap->r.rCurrentTSO = tso; cap->in_haskell = rtsTrue; errno = saved_errno; @@ -1874,8 +1884,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { - traceEventMigrateThread (cap, tso, capabilities[cpu].no); - wakeupThreadOnCapability(cap, &capabilities[cpu], tso); + migrateThread(cap, tso, &capabilities[cpu]); } #else appendToRunQueue(cap,tso); @@ -1897,8 +1906,8 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) tso->cap = cap; task->incall->tso = tso; - task->ret = ret; - task->stat = NoStatus; + task->incall->ret = ret; + task->incall->stat = NoStatus; appendToRunQueue(cap,tso); @@ -1907,7 +1916,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) cap = schedule(cap,task); - ASSERT(task->stat != NoStatus); + ASSERT(task->incall->stat != NoStatus); ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)id); @@ -2007,12 +2016,7 @@ initScheduler(void) } void -exitScheduler( - rtsBool wait_foreign -#if !defined(THREADED_RTS) - __attribute__((unused)) -#endif -) +exitScheduler (rtsBool wait_foreign USED_IF_THREADS) /* see Capability.c, shutdownCapability() */ { Task *task = NULL; @@ -2342,7 +2346,7 @@ deleteThread (Capability *cap STG_UNUSED, StgTSO *tso) // we must own all Capabilities. if (tso->why_blocked != BlockedOnCCall && - tso->why_blocked != BlockedOnCCall_NoUnblockExc) { + tso->why_blocked != BlockedOnCCall_Interruptible) { throwToSingleThreaded(tso->cap,tso,NULL); } } @@ -2354,9 +2358,9 @@ deleteThread_(Capability *cap, StgTSO *tso) // like deleteThread(), but we delete threads in foreign calls, too. if (tso->why_blocked == BlockedOnCCall || - tso->why_blocked == BlockedOnCCall_NoUnblockExc) { - unblockOne(cap,tso); + tso->why_blocked == BlockedOnCCall_Interruptible) { tso->what_next = ThreadKilled; + appendToRunQueue(tso->cap, tso); } else { deleteThread(cap,tso); } @@ -2561,6 +2565,12 @@ resurrectThreads (StgTSO *threads) * can wake up threads, remember...). */ continue; + case BlockedOnMsgThrowTo: + // This can happen if the target is masking, blocks on a + // black hole, and then is found to be unreachable. In + // this case, we want to let the target wake up and carry + // on, and do nothing to this thread. + continue; default: barf("resurrectThreads: thread blocked in a strange way: %d", tso->why_blocked);