X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FSchedule.h;h=dc3763d16cc1e6ae4eaaea86d9426379b18b803f;hb=a20ec0ced36bca7cd0594528922dbe31a6186eae;hp=a8a3b01cd39cd6703db12917c89d9fde5798bad1;hpb=837abbff4b92909533932fa16cdd31fc8ab10b12;p=ghc-hetmet.git diff --git a/ghc/rts/Schedule.h b/ghc/rts/Schedule.h index a8a3b01..dc3763d 100644 --- a/ghc/rts/Schedule.h +++ b/ghc/rts/Schedule.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Schedule.h,v 1.15 2000/01/14 14:06:48 hwloidl Exp $ + * $Id: Schedule.h,v 1.45 2004/03/01 14:18:36 simonmar Exp $ * * (c) The GHC Team 1998-1999 * @@ -8,31 +8,18 @@ * * -------------------------------------------------------------------------*/ -//@menu -//* Scheduler Functions:: -//* Scheduler Vars and Data Types:: -//* Some convenient macros:: -//* Index:: -//@end menu +#ifndef __SCHEDULE_H__ +#define __SCHEDULE_H__ +#include "OSThreads.h" -//@node Scheduler Functions, Scheduler Vars and Data Types -//@subsection Scheduler Functions - -//@cindex initScheduler -//@cindex exitScheduler -//@cindex startTasks /* initScheduler(), exitScheduler(), startTasks() * * Called from STG : no * Locks assumed : none */ -void initScheduler( void ); -void exitScheduler( void ); -#ifdef SMP -void startTasks( void ); -#endif +extern void initScheduler ( void ); +extern void exitScheduler ( void ); -//@cindex awakenBlockedQueue /* awakenBlockedQueue() * * Takes a pointer to the beginning of a blocked TSO queue, and @@ -46,10 +33,10 @@ void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node); #elif defined(PAR) void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node); #else -void awakenBlockedQueue(StgTSO *tso); +void awakenBlockedQueue (StgTSO *tso); +void awakenBlockedQueueNoLock (StgTSO *tso); #endif -//@cindex unblockOne /* unblockOne() * * Takes a pointer to the beginning of a blocked TSO queue, and @@ -58,15 +45,12 @@ void awakenBlockedQueue(StgTSO *tso); * Called from STG : yes * Locks assumed : none */ -#if defined(GRAN) -StgTSO *unblockOne(StgTSO *tso, StgClosure *node); -#elif defined(PAR) -StgTSO *unblockOne(StgTSO *tso, StgClosure *node); +#if defined(GRAN) || defined(PAR) +StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node); #else StgTSO *unblockOne(StgTSO *tso); #endif -//@cindex raiseAsync /* raiseAsync() * * Raises an exception asynchronously in the specified thread. @@ -75,19 +59,54 @@ StgTSO *unblockOne(StgTSO *tso); * Locks assumed : none */ void raiseAsync(StgTSO *tso, StgClosure *exception); +void raiseAsyncWithLock(StgTSO *tso, StgClosure *exception); -//@cindex awaitEvent -/* awaitEvent() +/* awaitEvent(rtsBool wait) * - * Raises an exception asynchronously in the specified thread. + * Checks for blocked threads that need to be woken. * * Called from STG : NO * Locks assumed : sched_mutex */ void awaitEvent(rtsBool wait); /* In Select.c */ +/* wakeUpSleepingThreads(nat ticks) + * + * Wakes up any sleeping threads whose timers have expired. + * + * Called from STG : NO + * Locks assumed : sched_mutex + */ +rtsBool wakeUpSleepingThreads(nat); /* In Select.c */ + +/* wakeBlockedWorkerThread() + * + * If a worker thread is currently blocked in awaitEvent(), interrupt it. + * + * Called from STG : NO + * Locks assumed : sched_mutex + */ +void wakeBlockedWorkerThread(void); /* In Select.c */ + +/* resetWorkerWakeupPipeAfterFork() + * + * Notify Select.c that a fork() has occured + * + * Called from STG : NO + * Locks assumed : don't care, but must be called right after fork() + */ +void resetWorkerWakeupPipeAfterFork(void); /* In Select.c */ + +/* GetRoots(evac_fn f) + * + * Call f() for each root known to the scheduler. + * + * Called from STG : NO + * Locks assumed : ???? + */ +void GetRoots(evac_fn); + // ToDo: check whether all fcts below are used in the SMP version, too -//@cindex awaken_blocked_queue #if defined(GRAN) void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node); void unlink_from_bq(StgTSO* tso, StgClosure* node); @@ -103,92 +122,116 @@ void awaken_blocked_queue(StgTSO *q); void initThread(StgTSO *tso, nat stack_size); #endif -// debugging only -#ifdef DEBUG -extern void printThreadBlockage(StgTSO *tso); -#endif -void print_bq (StgClosure *node); - -//@node Scheduler Vars and Data Types, Some convenient macros, Scheduler Functions -//@subsection Scheduler Vars and Data Types - -//@cindex context_switch /* Context switch flag. * Locks required : sched_mutex */ extern nat context_switch; extern rtsBool interrupted; -extern nat ticks_since_select; - -//@cindex Capability -/* Capability type - */ -typedef StgRegTable Capability; - -/* Free capability list. - * Locks required: sched_mutex. - */ -#ifdef SMP -extern Capability *free_capabilities; -extern nat n_free_capabilities; -#else -extern Capability MainRegTable; -#endif +/* In Select.c */ +extern nat timestamp; /* Thread queues. * Locks required : sched_mutex + * + * In GranSim we have one run/blocked_queue per PE. */ +#if defined(GRAN) +// run_queue_hds defined in GranSim.h +#else extern StgTSO *run_queue_hd, *run_queue_tl; extern StgTSO *blocked_queue_hd, *blocked_queue_tl; - -#ifdef SMP -//@cindex sched_mutex -//@cindex thread_ready_cond -//@cindex gc_pending_cond -extern pthread_mutex_t sched_mutex; -extern pthread_cond_t thread_ready_cond; -extern pthread_cond_t gc_pending_cond; +extern StgTSO *sleeping_queue; #endif +/* Linked list of all threads. */ +extern StgTSO *all_threads; -//@cindex task_info -#ifdef SMP -typedef struct { - pthread_t id; - double elapsedtimestart; - double mut_time; - double mut_etime; - double gc_time; - double gc_etime; -} task_info; - -extern task_info *task_ids; +#if defined(RTS_SUPPORTS_THREADS) +/* Schedule.c has detailed info on what these do */ +extern Mutex sched_mutex; +extern Condition returning_worker_cond; +extern nat rts_n_waiting_workers; +extern nat rts_n_waiting_tasks; #endif -#if !defined(GRAN) -extern StgTSO *run_queue_hd, *run_queue_tl; -extern StgTSO *blocked_queue_hd, *blocked_queue_tl; -#endif +StgBool rtsSupportsBoundThreads(void); +StgBool isThreadBound(StgTSO *tso); +StgInt forkProcess(HsStablePtr *entry); -/* Needed by Hugs. - */ +extern SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret); + + +/* Called by shutdown_handler(). */ void interruptStgRts ( void ); -// ?? needed -- HWL void raiseAsync(StgTSO *tso, StgClosure *exception); nat run_queue_len(void); -//@node Some convenient macros, Index, Scheduler Vars and Data Types -//@subsection Some convenient macros +void resurrectThreads( StgTSO * ); + +/* Main threads: + * + * These are the threads which clients have requested that we run. + * + * In a 'threaded' build, each of these corresponds to one bound thread. + * The pointer to the StgMainThread is passed as a parameter to schedule; + * this invocation of schedule will always pass this main thread's + * bound_thread_cond to waitForkWorkCapability; OS-thread-switching + * takes place using passCapability. + * + * In non-threaded builds, clients are strictly nested: the first client calls + * into the RTS, which might call out again to C with a _ccall_GC, and + * eventually re-enter the RTS. + * + * This is non-abstract at the moment because the garbage collector + * treats pointers to TSOs from the main thread list as "weak" - these + * pointers won't prevent a thread from receiving a BlockedOnDeadMVar + * exception. + * + * Main threads information is kept in a linked list: + */ +typedef struct StgMainThread_ { + StgTSO * tso; + SchedulerStatus stat; + StgClosure ** ret; +#if defined(RTS_SUPPORTS_THREADS) +#if defined(THREADED_RTS) + Condition bound_thread_cond; +#else + Condition wakeup; +#endif +#endif + struct StgMainThread_ *prev; + struct StgMainThread_ *link; +} StgMainThread; + +/* Main thread queue. + * Locks required: sched_mutex. + */ +extern StgMainThread *main_threads; + +void printAllThreads(void); +#ifdef COMPILING_SCHEDULER +static void printThreadBlockage(StgTSO *tso); +static void printThreadStatus(StgTSO *tso); +#endif +/* debugging only + */ +#ifdef DEBUG +void print_bq (StgClosure *node); +#endif +#if defined(PAR) +void print_bqe (StgBlockingQueueElement *bqe); +#endif + +void labelThread(StgPtr tso, char *label); /* ----------------------------------------------------------------------------- * Some convenient macros... */ -#define END_TSO_QUEUE ((StgTSO *)(void*)&END_TSO_QUEUE_closure) -#define END_CAF_LIST ((StgCAF *)(void*)&END_TSO_QUEUE_closure) +/* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */ -//@cindex APPEND_TO_RUN_QUEUE /* Add a thread to the end of the run queue. * NOTE: tso->link should be END_TSO_QUEUE before calling this macro. */ @@ -201,7 +244,6 @@ nat run_queue_len(void); } \ run_queue_tl = tso; -//@cindex PUSH_ON_RUN_QUEUE /* Push a thread on the beginning of the run queue. Used for * newly awakened threads, so they get run as soon as possible. */ @@ -212,22 +254,20 @@ nat run_queue_len(void); run_queue_tl = tso; \ } -//@cindex POP_RUN_QUEUE /* Pop the first thread off the runnable queue. */ -#define POP_RUN_QUEUE() \ - ({ StgTSO *t = run_queue_hd; \ - if (t != END_TSO_QUEUE) { \ - run_queue_hd = t->link; \ - t->link = END_TSO_QUEUE; \ +#define POP_RUN_QUEUE(pt) \ + do { StgTSO *__tmp_t = run_queue_hd; \ + if (__tmp_t != END_TSO_QUEUE) { \ + run_queue_hd = __tmp_t->link; \ + __tmp_t->link = END_TSO_QUEUE; \ if (run_queue_hd == END_TSO_QUEUE) { \ run_queue_tl = END_TSO_QUEUE; \ } \ } \ - t; \ - }) + pt = __tmp_t; \ + } while(0) -//@cindex APPEND_TO_BLOCKED_QUEUE /* Add a thread to the end of the blocked queue. */ #define APPEND_TO_BLOCKED_QUEUE(tso) \ @@ -239,41 +279,41 @@ nat run_queue_len(void); } \ blocked_queue_tl = tso; -//@cindex THREAD_RUNNABLE /* Signal that a runnable thread has become available, in * case there are any waiting tasks to execute it. */ -#ifdef SMP +#if defined(RTS_SUPPORTS_THREADS) #define THREAD_RUNNABLE() \ - if (free_capabilities != NULL) { \ - pthread_cond_signal(&thread_ready_cond); \ - } \ + wakeBlockedWorkerThread(); \ context_switch = 1; #else #define THREAD_RUNNABLE() /* nothing */ #endif -//@node Index, , Some convenient macros -//@subsection Index - -//@index -//* APPEND_TO_BLOCKED_QUEUE:: @cindex\s-+APPEND_TO_BLOCKED_QUEUE -//* APPEND_TO_RUN_QUEUE:: @cindex\s-+APPEND_TO_RUN_QUEUE -//* Capability:: @cindex\s-+Capability -//* POP_RUN_QUEUE :: @cindex\s-+POP_RUN_QUEUE -//* PUSH_ON_RUN_QUEUE:: @cindex\s-+PUSH_ON_RUN_QUEUE -//* THREAD_RUNNABLE:: @cindex\s-+THREAD_RUNNABLE -//* awaitEvent:: @cindex\s-+awaitEvent -//* awakenBlockedQueue:: @cindex\s-+awakenBlockedQueue -//* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue -//* context_switch:: @cindex\s-+context_switch -//* exitScheduler:: @cindex\s-+exitScheduler -//* gc_pending_cond:: @cindex\s-+gc_pending_cond -//* initScheduler:: @cindex\s-+initScheduler -//* raiseAsync:: @cindex\s-+raiseAsync -//* sched_mutex:: @cindex\s-+sched_mutex -//* startTasks:: @cindex\s-+startTasks -//* task_info:: @cindex\s-+task_info -//* thread_ready_cond:: @cindex\s-+thread_ready_cond -//* unblockOne:: @cindex\s-+unblockOne -//@end index +/* Check whether various thread queues are empty + */ +#define EMPTY_QUEUE(q) (q == END_TSO_QUEUE) + +#define EMPTY_RUN_QUEUE() (EMPTY_QUEUE(run_queue_hd)) +#define EMPTY_BLOCKED_QUEUE() (EMPTY_QUEUE(blocked_queue_hd)) +#define EMPTY_SLEEPING_QUEUE() (EMPTY_QUEUE(sleeping_queue)) + +#define EMPTY_THREAD_QUEUES() (EMPTY_RUN_QUEUE() && \ + EMPTY_BLOCKED_QUEUE() && \ + EMPTY_SLEEPING_QUEUE()) + +#if defined(RTS_SUPPORTS_THREADS) +/* If no task is waiting for a capability, + * and if there is work to be done + * or if we need to wait for IO or delay requests, + * spawn a new worker thread. + */ +void +startSchedulerTaskIfNecessary(void); +#endif + +#ifdef DEBUG +extern void sched_belch(char *s, ...); +#endif + +#endif /* __SCHEDULE_H__ */