X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FSchedule.c;h=72229d43dd33dade33beced38bf340fe43e0fa72;hb=85126d5203ec5344f6a5b2e77da23d34444e48c6;hp=3371bad9134676ee6767b97778b542d6a8741da7;hpb=0671ef05dd65137d501cb97f0e42be3b78d4004d;p=ghc-hetmet.git diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c index 3371bad..72229d4 100644 --- a/ghc/rts/Schedule.c +++ b/ghc/rts/Schedule.c @@ -1,5 +1,5 @@ /* --------------------------------------------------------------------------- - * $Id: Schedule.c,v 1.105 2001/11/08 12:46:31 simonmar Exp $ + * $Id: Schedule.c,v 1.139 2002/04/23 09:56:28 stolz Exp $ * * (c) The GHC Team, 1998-2000 * @@ -10,10 +10,11 @@ * * WAY Name CPP flag What's it for * -------------------------------------- - * mp GUM PAR Parallel execution on a distributed memory machine - * s SMP SMP Parallel execution on a shared memory machine - * mg GranSim GRAN Simulation of parallel execution - * md GUM/GdH DIST Distributed execution (based on GUM) + * mp GUM PAR Parallel execution on a distributed memory machine + * s SMP SMP Parallel execution on a shared memory machine + * mg GranSim GRAN Simulation of parallel execution + * md GUM/GdH DIST Distributed execution (based on GUM) + * * --------------------------------------------------------------------------*/ //@node Main scheduling code, , , @@ -95,6 +96,10 @@ #include "Stats.h" #include "Itimer.h" #include "Prelude.h" +#ifdef PROFILING +#include "Proftimer.h" +#include "ProfHeap.h" +#endif #if defined(GRAN) || defined(PAR) # include "GranSimRts.h" # include "GranSim.h" @@ -105,41 +110,26 @@ # include "HLC.h" #endif #include "Sparks.h" +#include "Capability.h" +#include "OSThreads.h" +#include "Task.h" + +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_UNISTD_H +#include +#endif #include //@node Variables and Data structures, Prototypes, Includes, Main scheduling code //@subsection Variables and Data structures -/* Main threads: - * - * These are the threads which clients have requested that we run. - * - * In an SMP build, we might have several concurrent clients all - * waiting for results, and each one will wait on a condition variable - * until the result is available. - * - * In non-SMP, clients are strictly nested: the first client calls - * into the RTS, which might call out again to C with a _ccall_GC, and - * eventually re-enter the RTS. - * - * Main threads information is kept in a linked list: - */ -//@cindex StgMainThread -typedef struct StgMainThread_ { - StgTSO * tso; - SchedulerStatus stat; - StgClosure ** ret; -#ifdef SMP - pthread_cond_t wakeup; -#endif - struct StgMainThread_ *link; -} StgMainThread; - /* Main thread queue. * Locks required: sched_mutex. */ -static StgMainThread *main_threads; +StgMainThread *main_threads; /* Thread queues. * Locks required: sched_mutex. @@ -150,7 +140,7 @@ StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */ /* rtsTime TimeOfNextEvent, EndOfTimeSlice; now in GranSim.c */ /* - In GranSim we have a runable and a blocked queue for each processor. + In GranSim we have a runnable and a blocked queue for each processor. In order to minimise code changes new arrays run_queue_hds/tls are created. run_queue_hd is then a short cut (macro) for run_queue_hds[CurrentProc] (see GranSim.h). @@ -177,11 +167,12 @@ StgTSO *sleeping_queue; /* perhaps replace with a hash table? */ */ StgTSO *all_threads; -/* Threads suspended in _ccall_GC. +/* When a thread performs a safe C call (_ccall_GC, using old + * terminology), it gets put on the suspended_ccalling_threads + * list. Used by the garbage collector. */ static StgTSO *suspended_ccalling_threads; -static void GetRoots(evac_fn); static StgTSO *threadStackOverflow(StgTSO *tso); /* KH: The following two flags are shared memory locations. There is no need @@ -221,15 +212,6 @@ StgThreadID next_thread_id = 1; #define MIN_STACK_WORDS (RESERVED_STACK_WORDS + sizeofW(StgStopFrame) + 2) -/* Free capability list. - * Locks required: sched_mutex. - */ -#ifdef SMP -Capability *free_capabilities; /* Available capabilities for running threads */ -nat n_free_capabilities; /* total number of available capabilities */ -#else -Capability MainCapability; /* for non-SMP, we have one global capability */ -#endif #if defined(GRAN) StgTSO *CurrentTSO; @@ -243,13 +225,6 @@ StgTSO dummy_tso; rtsBool ready_to_gc; -/* All our current task ids, saved in case we need to kill them later. - */ -#ifdef SMP -//@cindex task_ids -task_info *task_ids; -#endif - void addToBlockedQueue ( StgTSO *tso ); static void schedule ( void ); @@ -266,18 +241,19 @@ static void detectBlackHoles ( void ); static void sched_belch(char *s, ...); #endif -#ifdef SMP -//@cindex sched_mutex -//@cindex term_mutex -//@cindex thread_ready_cond -//@cindex gc_pending_cond -pthread_mutex_t sched_mutex = PTHREAD_MUTEX_INITIALIZER; -pthread_mutex_t term_mutex = PTHREAD_MUTEX_INITIALIZER; -pthread_cond_t thread_ready_cond = PTHREAD_COND_INITIALIZER; -pthread_cond_t gc_pending_cond = PTHREAD_COND_INITIALIZER; +#if defined(RTS_SUPPORTS_THREADS) +/* ToDo: carefully document the invariants that go together + * with these synchronisation objects. + */ +Mutex sched_mutex = INIT_MUTEX_VAR; +Mutex term_mutex = INIT_MUTEX_VAR; +# if defined(SMP) +static Condition gc_pending_cond = INIT_COND_VAR; nat await_death; -#endif +# endif + +#endif /* RTS_SUPPORTS_THREADS */ #if defined(PAR) StgTSO *LastTSO; @@ -303,7 +279,7 @@ char *threadReturnCode_strs[] = { }; #endif -#ifdef PAR +#if defined(PAR) StgTSO * createSparkThread(rtsSpark spark); StgTSO * activateSpark (rtsSpark spark); #endif @@ -314,6 +290,18 @@ StgTSO * activateSpark (rtsSpark spark); StgTSO *MainTSO; */ +#if defined(PAR) || defined(RTS_SUPPORTS_THREADS) +static void taskStart(void); +static void +taskStart(void) +{ + schedule(); +} +#endif + + + + //@node Main scheduling loop, Suspend and Resume, Prototypes, Main scheduling code //@subsection Main scheduling loop @@ -374,9 +362,15 @@ schedule( void ) rtsBool was_interrupted = rtsFalse; ACQUIRE_LOCK(&sched_mutex); + +#if defined(RTS_SUPPORTS_THREADS) + waitForWorkCapability(&sched_mutex, &cap, rtsFalse); +#else + /* simply initialise it in the non-threaded case */ + grabCapability(&cap); +#endif #if defined(GRAN) - /* set up first event to get things going */ /* ToDo: assign costs for system setup and init MainTSO ! */ new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc], @@ -411,6 +405,13 @@ schedule( void ) IF_DEBUG(scheduler, printAllThreads()); +#if defined(RTS_SUPPORTS_THREADS) + /* Check to see whether there are any worker threads + waiting to deposit external call results. If so, + yield our capability */ + yieldToReturningWorker(&sched_mutex, &cap); +#endif + /* If we're interrupted (the user pressed ^C, or some other * termination condition occurred), kill all the currently running * threads. @@ -427,7 +428,7 @@ schedule( void ) * should be done more efficiently without a linear scan * of the main threads list, somehow... */ -#ifdef SMP +#if defined(RTS_SUPPORTS_THREADS) { StgMainThread *m, **prev; prev = &main_threads; @@ -439,7 +440,11 @@ schedule( void ) } *prev = m->link; m->stat = Success; - pthread_cond_broadcast(&m->wakeup); + broadcastCondition(&m->wakeup); +#ifdef DEBUG + free(m->tso->label); + m->tso->label = NULL; +#endif break; case ThreadKilled: if (m->ret) *(m->ret) = NULL; @@ -449,7 +454,11 @@ schedule( void ) } else { m->stat = Killed; } - pthread_cond_broadcast(&m->wakeup); + broadcastCondition(&m->wakeup); +#ifdef DEBUG + free(m->tso->label); + m->tso->label = NULL; +#endif break; default: break; @@ -457,7 +466,7 @@ schedule( void ) } } -#else // not SMP +#else /* not threaded */ # if defined(PAR) /* in GUM do this only on the Main PE */ @@ -469,6 +478,10 @@ schedule( void ) StgMainThread *m = main_threads; if (m->tso->what_next == ThreadComplete || m->tso->what_next == ThreadKilled) { +#ifdef DEBUG + free(m->tso->label); + m->tso->label = NULL; +#endif main_threads = main_threads->link; if (m->tso->what_next == ThreadComplete) { /* we finished successfully, fill in the return value */ @@ -491,10 +504,13 @@ schedule( void ) /* Top up the run queue from our spark pool. We try to make the * number of threads in the run queue equal to the number of * free capabilities. + * + * Disable spark support in SMP for now, non-essential & requires + * a little bit of work to make it compile cleanly. -- sof 1/02. */ -#if defined(SMP) +#if 0 /* defined(SMP) */ { - nat n = n_free_capabilities; + nat n = getFreeCapabilities(); StgTSO *tso = run_queue_hd; /* Count the run queue */ @@ -521,8 +537,8 @@ schedule( void ) /* We need to wake up the other tasks if we just created some * work for them. */ - if (n_free_capabilities - n > 1) { - pthread_cond_signal(&thread_ready_cond); + if (getFreeCapabilities() - n > 1) { + signalCondition( &thread_ready_cond ); } } #endif // SMP @@ -530,7 +546,9 @@ schedule( void ) /* check for signals each time around the scheduler */ #ifndef mingw32_TARGET_OS if (signals_pending()) { + RELEASE_LOCK(&sched_mutex); /* ToDo: kill */ startSignalHandlers(); + ACQUIRE_LOCK(&sched_mutex); } #endif @@ -540,11 +558,10 @@ schedule( void ) * ToDo: what if another client comes along & requests another * main thread? */ - if (blocked_queue_hd != END_TSO_QUEUE || sleeping_queue != END_TSO_QUEUE) { - awaitEvent( - (run_queue_hd == END_TSO_QUEUE) -#ifdef SMP - && (n_free_capabilities == RtsFlags.ParFlags.nNodes) + if ( !EMPTY_QUEUE(blocked_queue_hd) || !EMPTY_QUEUE(sleeping_queue) ) { + awaitEvent( EMPTY_RUN_QUEUE() +#if defined(SMP) + && allFreeCapabilities() #endif ); } @@ -563,66 +580,151 @@ schedule( void ) * inform all the main threads. */ #ifndef PAR - if (blocked_queue_hd == END_TSO_QUEUE - && run_queue_hd == END_TSO_QUEUE - && sleeping_queue == END_TSO_QUEUE + if ( EMPTY_THREAD_QUEUES() +#if defined(RTS_SUPPORTS_THREADS) + && EMPTY_QUEUE(suspended_ccalling_threads) +#endif #ifdef SMP - && (n_free_capabilities == RtsFlags.ParFlags.nNodes) + && allFreeCapabilities() #endif ) { IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC...")); +#if defined(THREADED_RTS) + /* and SMP mode ..? */ + releaseCapability(cap); +#endif + // Garbage collection can release some new threads due to + // either (a) finalizers or (b) threads resurrected because + // they are about to be send BlockedOnDeadMVar. Any threads + // thus released will be immediately runnable. GarbageCollect(GetRoots,rtsTrue); - if (blocked_queue_hd == END_TSO_QUEUE - && run_queue_hd == END_TSO_QUEUE - && sleeping_queue == END_TSO_QUEUE) { - IF_DEBUG(scheduler, sched_belch("still deadlocked, checking for black holes...")); - detectBlackHoles(); - if (run_queue_hd == END_TSO_QUEUE) { - StgMainThread *m = main_threads; -#ifdef SMP - for (; m != NULL; m = m->link) { - deleteThread(m->tso); - m->ret = NULL; - m->stat = Deadlock; - pthread_cond_broadcast(&m->wakeup); - } - main_threads = NULL; + + if ( !EMPTY_RUN_QUEUE() ) { goto not_deadlocked; } + + IF_DEBUG(scheduler, + sched_belch("still deadlocked, checking for black holes...")); + detectBlackHoles(); + + if ( !EMPTY_RUN_QUEUE() ) { goto not_deadlocked; } + +#ifndef mingw32_TARGET_OS + /* If we have user-installed signal handlers, then wait + * for signals to arrive rather then bombing out with a + * deadlock. + */ +#if defined(RTS_SUPPORTS_THREADS) + if ( 0 ) { /* hmm..what to do? Simply stop waiting for + a signal with no runnable threads (or I/O + suspended ones) leads nowhere quick. + For now, simply shut down when we reach this + condition. + + ToDo: define precisely under what conditions + the Scheduler should shut down in an MT setting. + */ #else - deleteThread(m->tso); - m->ret = NULL; - m->stat = Deadlock; - main_threads = m->link; - return; + if ( anyUserHandlers() ) { #endif + IF_DEBUG(scheduler, + sched_belch("still deadlocked, waiting for signals...")); + + awaitUserSignals(); + + // we might be interrupted... + if (interrupted) { continue; } + + if (signals_pending()) { + RELEASE_LOCK(&sched_mutex); + startSignalHandlers(); + ACQUIRE_LOCK(&sched_mutex); } + ASSERT(!EMPTY_RUN_QUEUE()); + goto not_deadlocked; + } +#endif + + /* Probably a real deadlock. Send the current main thread the + * Deadlock exception (or in the SMP build, send *all* main + * threads the deadlock exception, since none of them can make + * progress). + */ + { + StgMainThread *m; +#if defined(RTS_SUPPORTS_THREADS) + for (m = main_threads; m != NULL; m = m->link) { + switch (m->tso->why_blocked) { + case BlockedOnBlackHole: + raiseAsync(m->tso, (StgClosure *)NonTermination_closure); + break; + case BlockedOnException: + case BlockedOnMVar: + raiseAsync(m->tso, (StgClosure *)Deadlock_closure); + break; + default: + barf("deadlock: main thread blocked in a strange way"); + } + } +#else + m = main_threads; + switch (m->tso->why_blocked) { + case BlockedOnBlackHole: + raiseAsync(m->tso, (StgClosure *)NonTermination_closure); + break; + case BlockedOnException: + case BlockedOnMVar: + raiseAsync(m->tso, (StgClosure *)Deadlock_closure); + break; + default: + barf("deadlock: main thread blocked in a strange way"); + } +#endif } + +#if defined(RTS_SUPPORTS_THREADS) + /* ToDo: revisit conditions (and mechanism) for shutting + down a multi-threaded world */ + IF_DEBUG(scheduler, sched_belch("all done, i think...shutting down.")); + shutdownHaskellAndExit(0); +#endif } + not_deadlocked: + #elif defined(PAR) /* ToDo: add deadlock detection in GUM (similar to SMP) -- HWL */ #endif -#ifdef SMP +#if defined(SMP) /* If there's a GC pending, don't do anything until it has * completed. */ if (ready_to_gc) { IF_DEBUG(scheduler,sched_belch("waiting for GC")); - pthread_cond_wait(&gc_pending_cond, &sched_mutex); + waitCondition( &gc_pending_cond, &sched_mutex ); } - +#endif + +#if defined(RTS_SUPPORTS_THREADS) /* block until we've got a thread on the run queue and a free * capability. + * */ - while (run_queue_hd == END_TSO_QUEUE || free_capabilities == NULL) { - IF_DEBUG(scheduler, sched_belch("waiting for work")); - pthread_cond_wait(&thread_ready_cond, &sched_mutex); - IF_DEBUG(scheduler, sched_belch("work now available")); + if ( EMPTY_RUN_QUEUE() ) { + /* Give up our capability */ + releaseCapability(cap); + IF_DEBUG(scheduler, sched_belch("thread %d: waiting for work", osThreadId())); + waitForWorkCapability(&sched_mutex, &cap, rtsTrue); + IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId())); +#if 0 + while ( EMPTY_RUN_QUEUE() ) { + waitForWorkCapability(&sched_mutex, &cap); + IF_DEBUG(scheduler, sched_belch("thread %d: work now available", osThreadId())); + } +#endif } #endif #if defined(GRAN) - if (RtsFlags.GranFlags.Light) GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc @@ -868,7 +970,7 @@ schedule( void ) belch("--=^ %d threads, %d sparks on [%#x]", run_queue_len(), spark_queue_len(pool), CURRENT_PROC)); -#if 1 +# if 1 if (0 && RtsFlags.ParFlags.ParStats.Full && t && LastTSO && t->id != LastTSO->id && LastTSO->why_blocked == NotBlocked && @@ -893,40 +995,31 @@ schedule( void ) emitSchedule = rtsFalse; } -#endif +# endif #else /* !GRAN && !PAR */ - /* grab a thread from the run queue - */ + /* grab a thread from the run queue */ ASSERT(run_queue_hd != END_TSO_QUEUE); t = POP_RUN_QUEUE(); - // Sanity check the thread we're about to run. This can be // expensive if there is lots of thread switching going on... IF_DEBUG(sanity,checkTSO(t)); - #endif - /* grab a capability - */ -#ifdef SMP - cap = free_capabilities; - free_capabilities = cap->link; - n_free_capabilities--; -#else - cap = &MainCapability; -#endif - cap->r.rCurrentTSO = t; /* context switches are now initiated by the timer signal, unless * the user specified "context switch as often as possible", with * +RTS -C0 */ - if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0 - && (run_queue_hd != END_TSO_QUEUE - || blocked_queue_hd != END_TSO_QUEUE - || sleeping_queue != END_TSO_QUEUE)) + if ( +#ifdef PROFILING + RtsFlags.ProfFlags.profileInterval == 0 || +#endif + (RtsFlags.ConcFlags.ctxtSwitchTicks == 0 + && (run_queue_hd != END_TSO_QUEUE + || blocked_queue_hd != END_TSO_QUEUE + || sleeping_queue != END_TSO_QUEUE))) context_switch = 1; else context_switch = 0; @@ -936,6 +1029,10 @@ schedule( void ) IF_DEBUG(scheduler, sched_belch("-->> Running TSO %ld (%p) %s ...", t->id, t, whatNext_strs[t->what_next])); +#ifdef PROFILING + startHeapProfTimer(); +#endif + /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ /* Run the current thread */ @@ -961,13 +1058,14 @@ schedule( void ) /* Costs for the scheduler are assigned to CCS_SYSTEM */ #ifdef PROFILING + stopHeapProfTimer(); CCCS = CCS_SYSTEM; #endif ACQUIRE_LOCK(&sched_mutex); #ifdef SMP - IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", pthread_self());); + IF_DEBUG(scheduler,fprintf(stderr,"scheduler (task %ld): ", osThreadId());); #elif !defined(GRAN) && !defined(PAR) IF_DEBUG(scheduler,fprintf(stderr,"scheduler: ");); #endif @@ -1255,31 +1353,33 @@ schedule( void ) default: barf("schedule: invalid thread return code %d", (int)ret); } - -#ifdef SMP - cap->link = free_capabilities; - free_capabilities = cap; - n_free_capabilities++; + +#ifdef PROFILING + if (RtsFlags.ProfFlags.profileInterval==0 || performHeapProfile) { + GarbageCollect(GetRoots, rtsTrue); + heapCensus(); + performHeapProfile = rtsFalse; + ready_to_gc = rtsFalse; // we already GC'd + } #endif + if (ready_to_gc #ifdef SMP - if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes) -#else - if (ready_to_gc) + && allFreeCapabilities() #endif - { + ) { /* everybody back, start the GC. * Could do it in this thread, or signal a condition var * to do it in another thread. Either way, we need to * broadcast on gc_pending_cond afterward. */ -#ifdef SMP +#if defined(RTS_SUPPORTS_THREADS) IF_DEBUG(scheduler,sched_belch("doing GC")); #endif GarbageCollect(GetRoots,rtsFalse); ready_to_gc = rtsFalse; #ifdef SMP - pthread_cond_broadcast(&gc_pending_cond); + broadcastCondition(&gc_pending_cond); #endif #if defined(GRAN) /* add a ContinueThread event to continue execution of current thread */ @@ -1311,25 +1411,62 @@ schedule( void ) } /* --------------------------------------------------------------------------- + * Singleton fork(). Do not copy any running threads. + * ------------------------------------------------------------------------- */ + +StgInt forkProcess(StgTSO* tso) { + +#ifndef mingw32_TARGET_OS + pid_t pid; + StgTSO* t,*next; + + IF_DEBUG(scheduler,sched_belch("forking!")); + + pid = fork(); + if (pid) { /* parent */ + + /* just return the pid */ + + } else { /* child */ + /* wipe all other threads */ + run_queue_hd = tso; + tso->link = END_TSO_QUEUE; + + /* DO NOT TOUCH THE QUEUES directly because most of the code around + us is picky about finding the threat still in its queue when + handling the deleteThread() */ + + for (t = all_threads; t != END_TSO_QUEUE; t = next) { + next = t->link; + if (t->id != tso->id) { + deleteThread(t); + } + } + } + return pid; +#else /* mingw32 */ + barf("forkProcess#: primop not implemented for mingw32, sorry!"); + return -1; +#endif /* mingw32 */ +} + +/* --------------------------------------------------------------------------- * deleteAllThreads(): kill all the live threads. * * This is used when we catch a user interrupt (^C), before performing * any necessary cleanups and running finalizers. + * + * Locks: sched_mutex held. * ------------------------------------------------------------------------- */ void deleteAllThreads ( void ) { - StgTSO* t; + StgTSO* t, *next; IF_DEBUG(scheduler,sched_belch("deleting all threads")); - for (t = run_queue_hd; t != END_TSO_QUEUE; t = t->link) { + for (t = all_threads; t != END_TSO_QUEUE; t = next) { + next = t->global_link; deleteThread(t); - } - for (t = blocked_queue_hd; t != END_TSO_QUEUE; t = t->link) { - deleteThread(t); - } - for (t = sleeping_queue; t != END_TSO_QUEUE; t = t->link) { - deleteThread(t); - } + } run_queue_hd = run_queue_tl = END_TSO_QUEUE; blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE; sleeping_queue = END_TSO_QUEUE; @@ -1337,6 +1474,7 @@ void deleteAllThreads ( void ) /* startThread and insertThread are now in GranSim.c -- HWL */ + //@node Suspend and Resume, Run queue code, Main scheduling loop, Main scheduling code //@subsection Suspend and Resume @@ -1356,40 +1494,84 @@ void deleteAllThreads ( void ) * ------------------------------------------------------------------------- */ StgInt -suspendThread( Capability *cap ) +suspendThread( StgRegTable *reg, + rtsBool concCall +#if !defined(RTS_SUPPORTS_THREADS) && !defined(DEBUG) + STG_UNUSED +#endif + ) { nat tok; + Capability *cap; + + /* assume that *reg is a pointer to the StgRegTable part + * of a Capability. + */ + cap = (Capability *)((void *)reg - sizeof(StgFunTable)); ACQUIRE_LOCK(&sched_mutex); IF_DEBUG(scheduler, - sched_belch("thread %d did a _ccall_gc", cap->r.rCurrentTSO->id)); + sched_belch("thread %d did a _ccall_gc (is_concurrent: %d)", cap->r.rCurrentTSO->id,concCall)); threadPaused(cap->r.rCurrentTSO); cap->r.rCurrentTSO->link = suspended_ccalling_threads; suspended_ccalling_threads = cap->r.rCurrentTSO; +#if defined(RTS_SUPPORTS_THREADS) + cap->r.rCurrentTSO->why_blocked = BlockedOnCCall; +#endif + /* Use the thread ID as the token; it should be unique */ tok = cap->r.rCurrentTSO->id; -#ifdef SMP - cap->link = free_capabilities; - free_capabilities = cap; - n_free_capabilities++; + /* Hand back capability */ + releaseCapability(cap); + +#if defined(RTS_SUPPORTS_THREADS) + /* Preparing to leave the RTS, so ensure there's a native thread/task + waiting to take over. + + ToDo: optimise this and only create a new task if there's a need + for one (i.e., if there's only one Concurrent Haskell thread alive, + there's no need to create a new task). + */ + IF_DEBUG(scheduler, sched_belch("worker thread (%d): leaving RTS", tok)); + if (concCall) { + startTask(taskStart); + } #endif + /* Other threads _might_ be available for execution; signal this */ + THREAD_RUNNABLE(); RELEASE_LOCK(&sched_mutex); return tok; } -Capability * -resumeThread( StgInt tok ) +StgRegTable * +resumeThread( StgInt tok, + rtsBool concCall +#if !defined(RTS_SUPPORTS_THREADS) + STG_UNUSED +#endif + ) { StgTSO *tso, **prev; Capability *cap; - ACQUIRE_LOCK(&sched_mutex); +#if defined(RTS_SUPPORTS_THREADS) + /* Wait for permission to re-enter the RTS with the result. */ + if ( concCall ) { + ACQUIRE_LOCK(&sched_mutex); + grabReturnCapability(&sched_mutex, &cap); + } else { + grabCapability(&cap); + } +#else + grabCapability(&cap); +#endif + /* Remove the thread off of the suspended list */ prev = &suspended_ccalling_threads; for (tso = suspended_ccalling_threads; tso != END_TSO_QUEUE; @@ -1403,24 +1585,12 @@ resumeThread( StgInt tok ) barf("resumeThread: thread not found"); } tso->link = END_TSO_QUEUE; - -#ifdef SMP - while (free_capabilities == NULL) { - IF_DEBUG(scheduler, sched_belch("waiting to resume")); - pthread_cond_wait(&thread_ready_cond, &sched_mutex); - IF_DEBUG(scheduler, sched_belch("resuming thread %d", tso->id)); - } - cap = free_capabilities; - free_capabilities = cap->link; - n_free_capabilities--; -#else - cap = &MainCapability; -#endif + /* Reset blocking status */ + tso->why_blocked = NotBlocked; cap->r.rCurrentTSO = tso; - RELEASE_LOCK(&sched_mutex); - return cap; + return &cap->r; } @@ -1456,6 +1626,25 @@ int rts_getThreadId(const StgTSO *tso) return tso->id; } +#ifdef DEBUG +void labelThread(StgTSO *tso, char *label) +{ + int len; + void *buf; + + /* Caveat: Once set, you can only set the thread name to "" */ + len = strlen(label)+1; + buf = realloc(tso->label,len); + if (buf == NULL) { + fprintf(stderr,"insufficient memory for labelThread!\n"); + free(tso->label); + tso->label = NULL; + } else + strncpy(buf,label,len); + tso->label = buf; +} +#endif /* DEBUG */ + /* --------------------------------------------------------------------------- Create a new thread. @@ -1522,7 +1711,7 @@ createThread_(nat size, rtsBool have_lock) stack_size = size - TSO_STRUCT_SIZEW; tso = (StgTSO *)allocate(size); - TICK_ALLOC_TSO(size-TSO_STRUCT_SIZEW, 0); + TICK_ALLOC_TSO(stack_size, 0); SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM); #if defined(GRAN) @@ -1530,13 +1719,21 @@ createThread_(nat size, rtsBool have_lock) #endif tso->what_next = ThreadEnterGHC; +#ifdef DEBUG + tso->label = NULL; +#endif + /* tso->id needs to be unique. For now we use a heavyweight mutex to * protect the increment operation on next_thread_id. * In future, we could use an atomic increment instead. */ +#ifdef SMP if (!have_lock) { ACQUIRE_LOCK(&sched_mutex); } +#endif tso->id = next_thread_id++; +#ifdef SMP if (!have_lock) { RELEASE_LOCK(&sched_mutex); } +#endif tso->why_blocked = NotBlocked; tso->blocked_exceptions = NULL; @@ -1724,14 +1921,16 @@ activateSpark (rtsSpark spark) * on this thread's stack before the scheduler is invoked. * ------------------------------------------------------------------------ */ +static void scheduleThread_ (StgTSO* tso, rtsBool createTask); + void -scheduleThread(StgTSO *tso) +scheduleThread_(StgTSO *tso + , rtsBool createTask +#if !defined(THREADED_RTS) + STG_UNUSED +#endif + ) { - if (tso==END_TSO_QUEUE){ - schedule(); - return; - } - ACQUIRE_LOCK(&sched_mutex); /* Put the new thread on the head of the runnable queue. The caller @@ -1740,6 +1939,14 @@ scheduleThread(StgTSO *tso) * soon as we release the scheduler lock below. */ PUSH_ON_RUN_QUEUE(tso); +#if defined(THREADED_RTS) + /* If main() is scheduling a thread, don't bother creating a + * new task. + */ + if ( createTask ) { + startTask(taskStart); + } +#endif THREAD_RUNNABLE(); #if 0 @@ -1748,21 +1955,15 @@ scheduleThread(StgTSO *tso) RELEASE_LOCK(&sched_mutex); } -/* --------------------------------------------------------------------------- - * startTasks() - * - * Start up Posix threads to run each of the scheduler tasks. - * I believe the task ids are not needed in the system as defined. - * KH @ 25/10/99 - * ------------------------------------------------------------------------ */ +void scheduleThread(StgTSO* tso) +{ + return scheduleThread_(tso, rtsFalse); +} -#if defined(PAR) || defined(SMP) -void -taskStart(void) /* ( void *arg STG_UNUSED) */ +void scheduleExtThread(StgTSO* tso) { - scheduleThread(END_TSO_QUEUE); + return scheduleThread_(tso, rtsTrue); } -#endif /* --------------------------------------------------------------------------- * initScheduler() @@ -1771,7 +1972,6 @@ taskStart(void) /* ( void *arg STG_UNUSED) */ * queues contained any threads, they'll be garbage collected at the * next pass. * - * This now calls startTasks(), so should only be called once! KH @ 25/10/99 * ------------------------------------------------------------------------ */ #ifdef SMP @@ -1782,19 +1982,10 @@ term_handler(int sig STG_UNUSED) ACQUIRE_LOCK(&term_mutex); await_death--; RELEASE_LOCK(&term_mutex); - pthread_exit(NULL); + shutdownThread(); } #endif -static void -initCapability( Capability *cap ) -{ - cap->f.stgChk0 = (F_)__stg_chk_0; - cap->f.stgChk1 = (F_)__stg_chk_1; - cap->f.stgGCEnter1 = (F_)__stg_gc_enter_1; - cap->f.stgUpdatePAP = (F_)__stg_update_PAP; -} - void initScheduler(void) { @@ -1827,9 +2018,26 @@ initScheduler(void) RtsFlags.ConcFlags.ctxtSwitchTicks = RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS; + +#if defined(RTS_SUPPORTS_THREADS) + /* Initialise the mutex and condition variables used by + * the scheduler. */ + initMutex(&sched_mutex); + initMutex(&term_mutex); + + initCondition(&thread_ready_cond); +#endif + +#if defined(SMP) + initCondition(&gc_pending_cond); +#endif + +#if defined(RTS_SUPPORTS_THREADS) + ACQUIRE_LOCK(&sched_mutex); +#endif /* Install the SIGHUP handler */ -#ifdef SMP +#if defined(SMP) { struct sigaction action,oact; @@ -1842,95 +2050,36 @@ initScheduler(void) } #endif -#ifdef SMP - /* Allocate N Capabilities */ - { - nat i; - Capability *cap, *prev; - cap = NULL; - prev = NULL; - for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) { - cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities"); - initCapability(cap); - cap->link = prev; - prev = cap; - } - free_capabilities = cap; - n_free_capabilities = RtsFlags.ParFlags.nNodes; - } - IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Allocated %d capabilities\n", - n_free_capabilities);); -#else - initCapability(&MainCapability); + /* A capability holds the state a native thread needs in + * order to execute STG code. At least one capability is + * floating around (only SMP builds have more than one). + */ + initCapabilities(); + +#if defined(RTS_SUPPORTS_THREADS) + /* start our haskell execution tasks */ +# if defined(SMP) + startTaskManager(RtsFlags.ParFlags.nNodes, taskStart); +# else + startTaskManager(0,taskStart); +# endif #endif -#if defined(SMP) || defined(PAR) +#if /* defined(SMP) ||*/ defined(PAR) initSparkPools(); #endif -} -#ifdef SMP -void -startTasks( void ) -{ - nat i; - int r; - pthread_t tid; - - /* make some space for saving all the thread ids */ - task_ids = stgMallocBytes(RtsFlags.ParFlags.nNodes * sizeof(task_info), - "initScheduler:task_ids"); - - /* and create all the threads */ - for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) { - r = pthread_create(&tid,NULL,taskStart,NULL); - if (r != 0) { - barf("startTasks: Can't create new Posix thread"); - } - task_ids[i].id = tid; - task_ids[i].mut_time = 0.0; - task_ids[i].mut_etime = 0.0; - task_ids[i].gc_time = 0.0; - task_ids[i].gc_etime = 0.0; - task_ids[i].elapsedtimestart = elapsedtime(); - IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Started task: %ld\n",tid);); - } -} +#if defined(RTS_SUPPORTS_THREADS) + RELEASE_LOCK(&sched_mutex); #endif +} + void exitScheduler( void ) { -#ifdef SMP - nat i; - - /* Don't want to use pthread_cancel, since we'd have to install - * these silly exception handlers (pthread_cleanup_{push,pop}) around - * all our locks. - */ -#if 0 - /* Cancel all our tasks */ - for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) { - pthread_cancel(task_ids[i].id); - } - - /* Wait for all the tasks to terminate */ - for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) { - IF_DEBUG(scheduler,fprintf(stderr,"scheduler: waiting for task %ld\n", - task_ids[i].id)); - pthread_join(task_ids[i].id, NULL); - } -#endif - - /* Send 'em all a SIGHUP. That should shut 'em up. - */ - await_death = RtsFlags.ParFlags.nNodes; - for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) { - pthread_kill(task_ids[i].id,SIGTERM); - } - while (await_death > 0) { - sched_yield(); - } +#if defined(RTS_SUPPORTS_THREADS) + stopTaskManager(); #endif } @@ -1978,13 +2127,13 @@ finishAllThreads ( void ) { do { while (run_queue_hd != END_TSO_QUEUE) { - waitThread ( run_queue_hd, NULL ); + waitThread ( run_queue_hd, NULL); } while (blocked_queue_hd != END_TSO_QUEUE) { - waitThread ( blocked_queue_hd, NULL ); + waitThread ( blocked_queue_hd, NULL); } while (sleeping_queue != END_TSO_QUEUE) { - waitThread ( blocked_queue_hd, NULL ); + waitThread ( blocked_queue_hd, NULL); } } while (blocked_queue_hd != END_TSO_QUEUE || @@ -1994,31 +2143,62 @@ finishAllThreads ( void ) SchedulerStatus waitThread(StgTSO *tso, /*out*/StgClosure **ret) +{ + IF_DEBUG(scheduler, sched_belch("== scheduler: waiting for thread (%d)\n", tso->id)); +#if defined(THREADED_RTS) + return waitThread_(tso,ret, rtsFalse); +#else + return waitThread_(tso,ret); +#endif +} + +SchedulerStatus +waitThread_(StgTSO *tso, + /*out*/StgClosure **ret +#if defined(THREADED_RTS) + , rtsBool blockWaiting +#endif + ) { StgMainThread *m; SchedulerStatus stat; ACQUIRE_LOCK(&sched_mutex); + IF_DEBUG(scheduler, sched_belch("== scheduler: waiting for thread (%d)\n", tso->id)); m = stgMallocBytes(sizeof(StgMainThread), "waitThread"); m->tso = tso; m->ret = ret; m->stat = NoStatus; -#ifdef SMP - pthread_cond_init(&m->wakeup, NULL); +#if defined(RTS_SUPPORTS_THREADS) + initCondition(&m->wakeup); #endif m->link = main_threads; main_threads = m; - IF_DEBUG(scheduler, fprintf(stderr, "== scheduler: new main thread (%d)\n", - m->tso->id)); + IF_DEBUG(scheduler, sched_belch("== scheduler: new main thread (%d)\n", m->tso->id)); -#ifdef SMP - do { - pthread_cond_wait(&m->wakeup, &sched_mutex); - } while (m->stat == NoStatus); +#if defined(RTS_SUPPORTS_THREADS) + +# if defined(THREADED_RTS) + if (!blockWaiting) { + /* In the threaded case, the OS thread that called main() + * gets to enter the RTS directly without going via another + * task/thread. + */ + RELEASE_LOCK(&sched_mutex); + schedule(); + ASSERT(m->stat != NoStatus); + } else +# endif + { + IF_DEBUG(scheduler, sched_belch("sfoo")); + do { + waitCondition(&m->wakeup, &sched_mutex); + } while (m->stat == NoStatus); + } #elif defined(GRAN) /* GranSim specific init */ CurrentTSO = m->tso; // the TSO to run @@ -2027,21 +2207,25 @@ waitThread(StgTSO *tso, /*out*/StgClosure **ret) schedule(); #else + RELEASE_LOCK(&sched_mutex); schedule(); ASSERT(m->stat != NoStatus); #endif stat = m->stat; -#ifdef SMP - pthread_cond_destroy(&m->wakeup); +#if defined(RTS_SUPPORTS_THREADS) + closeCondition(&m->wakeup); #endif IF_DEBUG(scheduler, fprintf(stderr, "== scheduler: main thread (%d) finished\n", m->tso->id)); free(m); - RELEASE_LOCK(&sched_mutex); +#if defined(THREADED_RTS) + if (blockWaiting) +#endif + RELEASE_LOCK(&sched_mutex); return stat; } @@ -2166,11 +2350,9 @@ take_off_run_queue(StgTSO *tso) { KH @ 25/10/99 */ -static void +void GetRoots(evac_fn evac) { - StgMainThread *m; - #if defined(GRAN) { nat i; @@ -2209,14 +2391,11 @@ GetRoots(evac_fn evac) } #endif - for (m = main_threads; m != NULL; m = m->link) { - evac((StgClosure **)&m->tso); - } if (suspended_ccalling_threads != END_TSO_QUEUE) { evac((StgClosure **)&suspended_ccalling_threads); } -#if defined(SMP) || defined(PAR) || defined(GRAN) +#if defined(PAR) || defined(GRAN) markSparkQueue(evac); #endif } @@ -2239,13 +2418,18 @@ void (*extra_roots)(evac_fn); void performGC(void) { + /* Obligated to hold this lock upon entry */ + ACQUIRE_LOCK(&sched_mutex); GarbageCollect(GetRoots,rtsFalse); + RELEASE_LOCK(&sched_mutex); } void performMajorGC(void) { + ACQUIRE_LOCK(&sched_mutex); GarbageCollect(GetRoots,rtsTrue); + RELEASE_LOCK(&sched_mutex); } static void @@ -2258,8 +2442,10 @@ AllRoots(evac_fn evac) void performGCWithRoots(void (*get_roots)(evac_fn)) { + ACQUIRE_LOCK(&sched_mutex); extra_roots = get_roots; GarbageCollect(AllRoots,rtsFalse); + RELEASE_LOCK(&sched_mutex); } /* ----------------------------------------------------------------------------- @@ -2306,7 +2492,7 @@ threadStackOverflow(StgTSO *tso) IF_DEBUG(scheduler, fprintf(stderr,"== scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size)); dest = (StgTSO *)allocate(new_tso_size); - TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0); + TICK_ALLOC_TSO(new_stack_size,0); /* copy the TSO block and the old stack into the new area */ memcpy(dest,tso,TSO_STRUCT_SIZE); @@ -2674,13 +2860,15 @@ interruptStgRts(void) NB: only the type of the blocking queue is different in GranSim and GUM the operations on the queue-elements are the same long live polymorphism! + + Locks: sched_mutex is held upon entry and exit. + */ static void unblockThread(StgTSO *tso) { StgBlockingQueueElement *t, **last; - ACQUIRE_LOCK(&sched_mutex); switch (tso->why_blocked) { case NotBlocked: @@ -2802,20 +2990,20 @@ unblockThread(StgTSO *tso) tso->why_blocked = NotBlocked; tso->block_info.closure = NULL; PUSH_ON_RUN_QUEUE(tso); - RELEASE_LOCK(&sched_mutex); } #else static void unblockThread(StgTSO *tso) { StgTSO *t, **last; + + /* To avoid locking unnecessarily. */ + if (tso->why_blocked == NotBlocked) { + return; + } - ACQUIRE_LOCK(&sched_mutex); switch (tso->why_blocked) { - case NotBlocked: - return; /* not blocked */ - case BlockedOnMVar: ASSERT(get_itbl(tso->block_info.closure)->type == MVAR); { @@ -2927,7 +3115,6 @@ unblockThread(StgTSO *tso) tso->why_blocked = NotBlocked; tso->block_info.closure = NULL; PUSH_ON_RUN_QUEUE(tso); - RELEASE_LOCK(&sched_mutex); } #endif @@ -2961,6 +3148,8 @@ unblockThread(StgTSO *tso) * CATCH_FRAME on the stack. In either case, we strip the entire * stack and replace the thread with a zombie. * + * Locks: sched_mutex held upon entry nor exit. + * * -------------------------------------------------------------------------- */ void @@ -2970,6 +3159,16 @@ deleteThread(StgTSO *tso) } void +raiseAsyncWithLock(StgTSO *tso, StgClosure *exception) +{ + /* When raising async exs from contexts where sched_mutex isn't held; + use raiseAsyncWithLock(). */ + ACQUIRE_LOCK(&sched_mutex); + raiseAsync(tso,exception); + RELEASE_LOCK(&sched_mutex); +} + +void raiseAsync(StgTSO *tso, StgClosure *exception) { StgUpdateFrame* su = tso->su; @@ -2985,6 +3184,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception) /* Remove it from any blocking queues */ unblockThread(tso); + IF_DEBUG(scheduler, sched_belch("raising exception in thread %ld.", tso->id)); /* The stack freezing code assumes there's a closure pointer on * the top of the stack. This isn't always the case with compiled * code, so we have to push a dummy closure on the top which just @@ -3000,51 +3200,41 @@ raiseAsync(StgTSO *tso, StgClosure *exception) StgAP_UPD * ap; /* If we find a CATCH_FRAME, and we've got an exception to raise, - * then build PAP(handler,exception,realworld#), and leave it on - * top of the stack ready to enter. + * then build the THUNK raise(exception), and leave it on + * top of the CATCH_FRAME ready to enter. */ if (get_itbl(su)->type == CATCH_FRAME && exception != NULL) { +#ifdef PROFILING StgCatchFrame *cf = (StgCatchFrame *)su; +#endif + StgClosure *raise; + /* we've got an exception to raise, so let's pass it to the * handler in this frame. */ - ap = (StgAP_UPD *)allocate(sizeofW(StgPAP) + 2); - TICK_ALLOC_UPD_PAP(3,0); - SET_HDR(ap,&stg_PAP_info,cf->header.prof.ccs); - - ap->n_args = 2; - ap->fun = cf->handler; /* :: Exception -> IO a */ - ap->payload[0] = exception; - ap->payload[1] = ARG_TAG(0); /* realworld token */ - - /* throw away the stack from Sp up to and including the - * CATCH_FRAME. - */ - sp = (P_)su + sizeofW(StgCatchFrame) - 1; - tso->su = cf->link; - - /* Restore the blocked/unblocked state for asynchronous exceptions - * at the CATCH_FRAME. - * - * If exceptions were unblocked at the catch, arrange that they - * are unblocked again after executing the handler by pushing an - * unblockAsyncExceptions_ret stack frame. + raise = (StgClosure *)allocate(sizeofW(StgClosure)+1); + TICK_ALLOC_SE_THK(1,0); + SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs); + raise->payload[0] = exception; + + /* throw away the stack from Sp up to the CATCH_FRAME. */ - if (!cf->exceptions_blocked) { - *(sp--) = (W_)&stg_unblockAsyncExceptionszh_ret_info; - } - - /* Ensure that async exceptions are blocked when running the handler. + sp = (P_)su - 1; + + /* Ensure that async excpetions are blocked now, so we don't get + * a surprise exception before we get around to executing the + * handler. */ if (tso->blocked_exceptions == NULL) { - tso->blocked_exceptions = END_TSO_QUEUE; + tso->blocked_exceptions = END_TSO_QUEUE; } - - /* Put the newly-built PAP on top of the stack, ready to execute + + /* Put the newly-built THUNK on top of the stack, ready to execute * when the thread restarts. */ - sp[0] = (W_)ap; + sp[0] = (W_)raise; tso->sp = sp; + tso->su = su; tso->what_next = ThreadEnterGHC; IF_DEBUG(sanity, checkTSO(tso)); return; @@ -3178,6 +3368,8 @@ raiseAsync(StgTSO *tso, StgClosure *exception) up and sent a signal: BlockedOnDeadMVar if the thread was blocked on an MVar, or NonTermination if the thread was blocked on a Black Hole. + + Locks: sched_mutex isn't held upon entry nor exit. -------------------------------------------------------------------------- */ void @@ -3194,6 +3386,7 @@ resurrectThreads( StgTSO *threads ) switch (tso->why_blocked) { case BlockedOnMVar: case BlockedOnException: + /* Called by GC - sched_mutex lock is currently held. */ raiseAsync(tso,(StgClosure *)BlockedOnDeadMVar_closure); break; case BlockedOnBlackHole: @@ -3218,6 +3411,8 @@ resurrectThreads( StgTSO *threads ) * * This is only done in a deadlock situation in order to avoid * performance overhead in the normal case. + * + * Locks: sched_mutex is held upon entry and exit. * -------------------------------------------------------------------------- */ static void @@ -3316,6 +3511,11 @@ printThreadBlockage(StgTSO *tso) tso->block_info.closure, info_type(tso->block_info.closure)); break; #endif +#if defined(RTS_SUPPORTS_THREADS) + case BlockedOnCCall: + fprintf(stderr,"is blocked on an external call"); + break; +#endif default: barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)", tso->why_blocked, tso->id, tso); @@ -3360,6 +3560,7 @@ printAllThreads(void) for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) { fprintf(stderr, "\tthread %d ", t->id); + if (t->label) fprintf(stderr,"[\"%s\"] ",t->label); printThreadStatus(t); fprintf(stderr,"\n"); } @@ -3539,7 +3740,7 @@ sched_belch(char *s, ...) va_list ap; va_start(ap,s); #ifdef SMP - fprintf(stderr, "scheduler (task %ld): ", pthread_self()); + fprintf(stderr, "scheduler (task %ld): ", osThreadId()); #elif defined(PAR) fprintf(stderr, "== "); #else @@ -3556,18 +3757,15 @@ sched_belch(char *s, ...) //@subsection Index //@index -//* MainRegTable:: @cindex\s-+MainRegTable //* StgMainThread:: @cindex\s-+StgMainThread //* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue //* blocked_queue_hd:: @cindex\s-+blocked_queue_hd //* blocked_queue_tl:: @cindex\s-+blocked_queue_tl //* context_switch:: @cindex\s-+context_switch //* createThread:: @cindex\s-+createThread -//* free_capabilities:: @cindex\s-+free_capabilities //* gc_pending_cond:: @cindex\s-+gc_pending_cond //* initScheduler:: @cindex\s-+initScheduler //* interrupted:: @cindex\s-+interrupted -//* n_free_capabilities:: @cindex\s-+n_free_capabilities //* next_thread_id:: @cindex\s-+next_thread_id //* print_bq:: @cindex\s-+print_bq //* run_queue_hd:: @cindex\s-+run_queue_hd @@ -3575,7 +3773,5 @@ sched_belch(char *s, ...) //* sched_mutex:: @cindex\s-+sched_mutex //* schedule:: @cindex\s-+schedule //* take_off_run_queue:: @cindex\s-+take_off_run_queue -//* task_ids:: @cindex\s-+task_ids //* term_mutex:: @cindex\s-+term_mutex -//* thread_ready_cond:: @cindex\s-+thread_ready_cond //@end index