X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FSchedule.c;h=01c97fce55b1e11a6d68b53d53e7174f0f5b392d;hb=18dbd4340f1909eceeb320adba8dfa88ea7d407d;hp=fef2795241948a3644680ef5daa8cdea7b649456;hpb=efa41d9d5eada7aa3230a2bd03b97a8b7025ef2e;p=ghc-hetmet.git diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c index fef2795..01c97fc 100644 --- a/ghc/rts/Schedule.c +++ b/ghc/rts/Schedule.c @@ -1,5 +1,5 @@ /* --------------------------------------------------------------------------- - * $Id: Schedule.c,v 1.123 2002/02/14 07:52:05 sof Exp $ + * $Id: Schedule.c,v 1.130 2002/02/16 00:30:05 sof Exp $ * * (c) The GHC Team, 1998-2000 * @@ -266,38 +266,6 @@ static void sched_belch(char *s, ...); Mutex sched_mutex = INIT_MUTEX_VAR; Mutex term_mutex = INIT_MUTEX_VAR; - - - -/* thread_ready_cond: when signalled, a thread has become runnable for a - * task to execute. - * - * In the non-SMP case, it also implies that the thread that is woken up has - * exclusive access to the RTS and all its data structures (that are not - * under sched_mutex's control). - * - * thread_ready_cond is signalled whenever COND_NO_THREADS_READY doesn't hold. - * - */ -Condition thread_ready_cond = INIT_COND_VAR; -#if 0 -/* For documentation purposes only */ -#define COND_NO_THREADS_READY() (noCapabilities() || EMPTY_RUN_QUEUE()) -#endif - -/* - * To be able to make an informed decision about whether or not - * to create a new task when making an external call, keep track of - * the number of tasks currently blocked waiting on thread_ready_cond. - * (if > 0 => no need for a new task, just unblock an existing one). - * - * waitForWork() takes care of keeping it up-to-date; Task.startTask() - * uses its current value. - */ -nat rts_n_waiting_tasks = 0; - -static void waitForWork(void); - # if defined(SMP) static Condition gc_pending_cond = INIT_COND_VAR; nat await_death; @@ -410,36 +378,19 @@ schedule( void ) # endif #endif rtsBool was_interrupted = rtsFalse; - -#if defined(RTS_SUPPORTS_THREADS) -schedule_start: -#endif -#if defined(RTS_SUPPORTS_THREADS) ACQUIRE_LOCK(&sched_mutex); -#endif #if defined(RTS_SUPPORTS_THREADS) - /* ToDo: consider SMP support */ - if ( rts_n_waiting_workers > 0 && noCapabilities() ) { - /* (At least) one native thread is waiting to - * deposit the result of an external call. So, - * be nice and hand over our capability. - */ - yieldCapability(cap); - /* Lost our sched_mutex lock, try to re-enter the scheduler. */ - goto schedule_start; - } -#endif + /* Check to see whether there are any worker threads + waiting to deposit external call results. If so, + yield our capability */ + yieldToReturningWorker(&sched_mutex, cap); -#if defined(RTS_SUPPORTS_THREADS) - while ( noCapabilities() ) { - waitForWork(); - } + waitForWorkCapability(&sched_mutex, &cap, rtsFalse); #endif #if defined(GRAN) - /* set up first event to get things going */ /* ToDo: assign costs for system setup and init MainTSO ! */ new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc], @@ -723,16 +674,19 @@ schedule_start: if ( EMPTY_RUN_QUEUE() ) { /* Give up our capability */ releaseCapability(cap); - while ( noCapabilities() || EMPTY_RUN_QUEUE() ) { - IF_DEBUG(scheduler, sched_belch("thread %d: waiting for work", osThreadId())); - waitForWork(); - IF_DEBUG(scheduler, sched_belch("thread %d: work now available %d %d", osThreadId(), getFreeCapabilities(),EMPTY_RUN_QUEUE())); + IF_DEBUG(scheduler, sched_belch("thread %d: waiting for work", osThreadId())); + waitForWorkCapability(&sched_mutex, &cap, rtsTrue); + IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId())); +#if 0 + while ( EMPTY_RUN_QUEUE() ) { + waitForWorkCapability(&sched_mutex, &cap); + IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId())); } +#endif } #endif #if defined(GRAN) - if (RtsFlags.GranFlags.Light) GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc @@ -978,7 +932,7 @@ schedule_start: belch("--=^ %d threads, %d sparks on [%#x]", run_queue_len(), spark_queue_len(pool), CURRENT_PROC)); -#if 1 +# if 1 if (0 && RtsFlags.ParFlags.ParStats.Full && t && LastTSO && t->id != LastTSO->id && LastTSO->why_blocked == NotBlocked && @@ -1003,7 +957,7 @@ schedule_start: emitSchedule = rtsFalse; } -#endif +# endif #else /* !GRAN && !PAR */ /* grab a thread from the run queue */ @@ -1377,12 +1331,11 @@ schedule_start: } #endif + if (ready_to_gc #ifdef SMP - if (ready_to_gc && allFreeCapabilities() ) -#else - if (ready_to_gc) + && allFreeCapabilities() #endif - { + ) { /* everybody back, start the GC. * Could do it in this thread, or signal a condition var * to do it in another thread. Either way, we need to @@ -1391,7 +1344,9 @@ schedule_start: #if defined(RTS_SUPPORTS_THREADS) IF_DEBUG(scheduler,sched_belch("doing GC")); #endif + RELEASE_LOCK(&sched_mutex); GarbageCollect(GetRoots,rtsFalse); + ACQUIRE_LOCK(&sched_mutex); ready_to_gc = rtsFalse; #ifdef SMP broadcastCondition(&gc_pending_cond); @@ -1475,7 +1430,7 @@ void deleteAllThreads ( void ) * ------------------------------------------------------------------------- */ StgInt -suspendThread( StgRegTable *reg ) +suspendThread( StgRegTable *reg, rtsBool concCall ) { nat tok; Capability *cap; @@ -1504,7 +1459,7 @@ suspendThread( StgRegTable *reg ) /* Hand back capability */ releaseCapability(cap); -#if defined(RTS_SUPPORTS_THREADS) && !defined(SMP) +#if defined(RTS_SUPPORTS_THREADS) /* Preparing to leave the RTS, so ensure there's a native thread/task waiting to take over. @@ -1512,24 +1467,31 @@ suspendThread( StgRegTable *reg ) for one (i.e., if there's only one Concurrent Haskell thread alive, there's no need to create a new task). */ - IF_DEBUG(scheduler, sched_belch("worker thread (%d): leaving RTS\n", tok)); - startTask(taskStart); + IF_DEBUG(scheduler, sched_belch("worker thread (%d): leaving RTS", tok)); + if (concCall) { + startTask(taskStart); + } #endif + /* Other threads _might_ be available for execution; signal this */ THREAD_RUNNABLE(); RELEASE_LOCK(&sched_mutex); return tok; } StgRegTable * -resumeThread( StgInt tok ) +resumeThread( StgInt tok, rtsBool concCall ) { StgTSO *tso, **prev; Capability *cap; #if defined(RTS_SUPPORTS_THREADS) - /* Wait for permission to re-enter the RTS with the result.. */ - grabReturnCapability(&cap); + /* Wait for permission to re-enter the RTS with the result. */ + if ( concCall ) { + grabReturnCapability(&sched_mutex, &cap); + } else { + grabCapability(&cap); + } #else grabCapability(&cap); #endif @@ -1558,18 +1520,6 @@ resumeThread( StgInt tok ) } -#if defined(RTS_SUPPORTS_THREADS) -static void -waitForWork() -{ - rts_n_waiting_tasks++; - waitCondition(&thread_ready_cond, &sched_mutex); - rts_n_waiting_tasks--; - return; -} -#endif - - /* --------------------------------------------------------------------------- * Static functions * ------------------------------------------------------------------------ */ @@ -1870,10 +1820,13 @@ activateSpark (rtsSpark spark) * on this thread's stack before the scheduler is invoked. * ------------------------------------------------------------------------ */ +static void scheduleThread_ (StgTSO* tso, rtsBool createTask); + void scheduleThread_(StgTSO *tso -#if defined(THREADED_RTS) , rtsBool createTask +#if !defined(THREADED_RTS) + STG_UNUSED #endif ) { @@ -1903,11 +1856,12 @@ scheduleThread_(StgTSO *tso void scheduleThread(StgTSO* tso) { -#if defined(THREADED_RTS) + return scheduleThread_(tso, rtsFalse); +} + +void scheduleExtThread(StgTSO* tso) +{ return scheduleThread_(tso, rtsTrue); -#else - return scheduleThread_(tso); -#endif } /* --------------------------------------------------------------------------- @@ -3127,51 +3081,39 @@ raiseAsync(StgTSO *tso, StgClosure *exception) StgAP_UPD * ap; /* If we find a CATCH_FRAME, and we've got an exception to raise, - * then build PAP(handler,exception,realworld#), and leave it on - * top of the stack ready to enter. + * then build the THUNK raise(exception), and leave it on + * top of the CATCH_FRAME ready to enter. */ if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) { StgCatchFrame *cf = (StgCatchFrame *)su; + StgClosure *raise; + /* we've got an exception to raise, so let's pass it to the * handler in this frame. */ - ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 2); - TICK_ALLOC_UPD_PAP(3,0); - SET_HDR(ap,&stg_PAP_info,cf->header.prof.ccs); - - ap->n_args = 2; - ap->fun = cf->handler; /* :: Exception -> IO a */ - ap->payload[0] = exception; - ap->payload[1] = ARG_TAG(0); /* realworld token */ - - /* throw away the stack from Sp up to and including the - * CATCH_FRAME. - */ - sp = (P_)su + sizeofW(StgCatchFrame) - 1; - tso->su = cf->link; - - /* Restore the blocked/unblocked state for asynchronous exceptions - * at the CATCH_FRAME. - * - * If exceptions were unblocked at the catch, arrange that they - * are unblocked again after executing the handler by pushing an - * unblockAsyncExceptions_ret stack frame. + raise = (StgClosure *)allocate(sizeofW(StgClosure)+1); + TICK_ALLOC_SE_THK(1,0); + SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs); + raise->payload[0] = exception; + + /* throw away the stack from Sp up to the CATCH_FRAME. */ - if (!cf->exceptions_blocked) { - *(sp--) = (W_)&stg_unblockAsyncExceptionszh_ret_info; - } - - /* Ensure that async exceptions are blocked when running the handler. + sp = (P_)su - 1; + + /* Ensure that async excpetions are blocked now, so we don't get + * a surprise exception before we get around to executing the + * handler. */ if (tso->blocked_exceptions == NULL) { - tso->blocked_exceptions = END_TSO_QUEUE; + tso->blocked_exceptions = END_TSO_QUEUE; } - - /* Put the newly-built PAP on top of the stack, ready to execute + + /* Put the newly-built THUNK on top of the stack, ready to execute * when the thread restarts. */ - sp[0] = (W_)ap; + sp[0] = (W_)raise; tso->sp = sp; + tso->su = su; tso->what_next = ThreadEnterGHC; IF_DEBUG(sanity, checkTSO(tso)); return; @@ -3688,7 +3630,6 @@ sched_belch(char *s, ...) //@subsection Index //@index -//* MainRegTable:: @cindex\s-+MainRegTable //* StgMainThread:: @cindex\s-+StgMainThread //* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue //* blocked_queue_hd:: @cindex\s-+blocked_queue_hd @@ -3706,5 +3647,4 @@ sched_belch(char *s, ...) //* schedule:: @cindex\s-+schedule //* take_off_run_queue:: @cindex\s-+take_off_run_queue //* term_mutex:: @cindex\s-+term_mutex -//* thread_ready_cond:: @cindex\s-+thread_ready_cond //@end index