1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.h,v 1.45 2004/03/01 14:18:36 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Prototypes for functions in Schedule.c
7 * (RTS internal scheduler interface)
9 * -------------------------------------------------------------------------*/
11 #ifndef __SCHEDULE_H__
12 #define __SCHEDULE_H__
13 #include "OSThreads.h"
15 /* initScheduler(), exitScheduler(), startTasks()
17 * Called from STG : no
18 * Locks assumed : none
20 extern void initScheduler ( void );
21 extern void exitScheduler ( void );
23 /* awakenBlockedQueue()
25 * Takes a pointer to the beginning of a blocked TSO queue, and
26 * wakes up the entire queue.
28 * Called from STG : yes
29 * Locks assumed : none
32 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
34 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
36 void awakenBlockedQueue (StgTSO *tso);
37 void awakenBlockedQueueNoLock (StgTSO *tso);
42 * Takes a pointer to the beginning of a blocked TSO queue, and
43 * removes the first thread, placing it on the runnable queue.
45 * Called from STG : yes
46 * Locks assumed : none
48 #if defined(GRAN) || defined(PAR)
49 StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node);
51 StgTSO *unblockOne(StgTSO *tso);
56 * Raises an exception asynchronously in the specified thread.
58 * Called from STG : yes
59 * Locks assumed : none
61 void raiseAsync(StgTSO *tso, StgClosure *exception);
62 void raiseAsyncWithLock(StgTSO *tso, StgClosure *exception);
64 /* awaitEvent(rtsBool wait)
66 * Checks for blocked threads that need to be woken.
68 * Called from STG : NO
69 * Locks assumed : sched_mutex
71 void awaitEvent(rtsBool wait); /* In Select.c */
73 /* wakeUpSleepingThreads(nat ticks)
75 * Wakes up any sleeping threads whose timers have expired.
77 * Called from STG : NO
78 * Locks assumed : sched_mutex
80 rtsBool wakeUpSleepingThreads(nat); /* In Select.c */
82 /* wakeBlockedWorkerThread()
84 * If a worker thread is currently blocked in awaitEvent(), interrupt it.
86 * Called from STG : NO
87 * Locks assumed : sched_mutex
89 void wakeBlockedWorkerThread(void); /* In Select.c */
91 /* resetWorkerWakeupPipeAfterFork()
93 * Notify Select.c that a fork() has occured
95 * Called from STG : NO
96 * Locks assumed : don't care, but must be called right after fork()
98 void resetWorkerWakeupPipeAfterFork(void); /* In Select.c */
100 /* GetRoots(evac_fn f)
102 * Call f() for each root known to the scheduler.
104 * Called from STG : NO
105 * Locks assumed : ????
107 void GetRoots(evac_fn);
109 // ToDo: check whether all fcts below are used in the SMP version, too
111 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
112 void unlink_from_bq(StgTSO* tso, StgClosure* node);
113 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
115 nat run_queue_len(void);
116 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
117 void initThread(StgTSO *tso, nat stack_size);
119 char *info_type(StgClosure *closure); // dummy
120 char *info_type_by_ip(StgInfoTable *ip); // dummy
121 void awaken_blocked_queue(StgTSO *q);
122 void initThread(StgTSO *tso, nat stack_size);
125 /* Context switch flag.
126 * Locks required : sched_mutex
128 extern nat context_switch;
129 extern rtsBool interrupted;
132 extern nat timestamp;
135 * Locks required : sched_mutex
137 * In GranSim we have one run/blocked_queue per PE.
140 // run_queue_hds defined in GranSim.h
142 extern StgTSO *run_queue_hd, *run_queue_tl;
143 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
144 extern StgTSO *sleeping_queue;
146 /* Linked list of all threads. */
147 extern StgTSO *all_threads;
149 #if defined(RTS_SUPPORTS_THREADS)
150 /* Schedule.c has detailed info on what these do */
151 extern Mutex sched_mutex;
152 extern Condition returning_worker_cond;
153 extern nat rts_n_waiting_workers;
154 extern nat rts_n_waiting_tasks;
157 StgBool rtsSupportsBoundThreads(void);
158 StgBool isThreadBound(StgTSO *tso);
159 StgInt forkProcess(HsStablePtr *entry);
161 extern SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
164 /* Called by shutdown_handler(). */
165 void interruptStgRts ( void );
167 void raiseAsync(StgTSO *tso, StgClosure *exception);
168 nat run_queue_len(void);
170 void resurrectThreads( StgTSO * );
174 * These are the threads which clients have requested that we run.
176 * In a 'threaded' build, each of these corresponds to one bound thread.
177 * The pointer to the StgMainThread is passed as a parameter to schedule;
178 * this invocation of schedule will always pass this main thread's
179 * bound_thread_cond to waitForkWorkCapability; OS-thread-switching
180 * takes place using passCapability.
182 * In non-threaded builds, clients are strictly nested: the first client calls
183 * into the RTS, which might call out again to C with a _ccall_GC, and
184 * eventually re-enter the RTS.
186 * This is non-abstract at the moment because the garbage collector
187 * treats pointers to TSOs from the main thread list as "weak" - these
188 * pointers won't prevent a thread from receiving a BlockedOnDeadMVar
191 * Main threads information is kept in a linked list:
193 typedef struct StgMainThread_ {
195 SchedulerStatus stat;
197 #if defined(RTS_SUPPORTS_THREADS)
198 #if defined(THREADED_RTS)
199 Condition bound_thread_cond;
204 struct StgMainThread_ *prev;
205 struct StgMainThread_ *link;
208 /* Main thread queue.
209 * Locks required: sched_mutex.
211 extern StgMainThread *main_threads;
213 void printAllThreads(void);
214 #ifdef COMPILING_SCHEDULER
215 static void printThreadBlockage(StgTSO *tso);
216 static void printThreadStatus(StgTSO *tso);
221 void print_bq (StgClosure *node);
224 void print_bqe (StgBlockingQueueElement *bqe);
227 void labelThread(StgPtr tso, char *label);
229 /* -----------------------------------------------------------------------------
230 * Some convenient macros...
233 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
235 /* Add a thread to the end of the run queue.
236 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
238 #define APPEND_TO_RUN_QUEUE(tso) \
239 ASSERT(tso->link == END_TSO_QUEUE); \
240 if (run_queue_hd == END_TSO_QUEUE) { \
241 run_queue_hd = tso; \
243 run_queue_tl->link = tso; \
247 /* Push a thread on the beginning of the run queue. Used for
248 * newly awakened threads, so they get run as soon as possible.
250 #define PUSH_ON_RUN_QUEUE(tso) \
251 tso->link = run_queue_hd; \
252 run_queue_hd = tso; \
253 if (run_queue_tl == END_TSO_QUEUE) { \
254 run_queue_tl = tso; \
257 /* Pop the first thread off the runnable queue.
259 #define POP_RUN_QUEUE(pt) \
260 do { StgTSO *__tmp_t = run_queue_hd; \
261 if (__tmp_t != END_TSO_QUEUE) { \
262 run_queue_hd = __tmp_t->link; \
263 __tmp_t->link = END_TSO_QUEUE; \
264 if (run_queue_hd == END_TSO_QUEUE) { \
265 run_queue_tl = END_TSO_QUEUE; \
271 /* Add a thread to the end of the blocked queue.
273 #define APPEND_TO_BLOCKED_QUEUE(tso) \
274 ASSERT(tso->link == END_TSO_QUEUE); \
275 if (blocked_queue_hd == END_TSO_QUEUE) { \
276 blocked_queue_hd = tso; \
278 blocked_queue_tl->link = tso; \
280 blocked_queue_tl = tso;
282 /* Signal that a runnable thread has become available, in
283 * case there are any waiting tasks to execute it.
285 #if defined(RTS_SUPPORTS_THREADS)
286 #define THREAD_RUNNABLE() \
287 wakeBlockedWorkerThread(); \
290 #define THREAD_RUNNABLE() /* nothing */
293 /* Check whether various thread queues are empty
295 #define EMPTY_QUEUE(q) (q == END_TSO_QUEUE)
297 #define EMPTY_RUN_QUEUE() (EMPTY_QUEUE(run_queue_hd))
298 #define EMPTY_BLOCKED_QUEUE() (EMPTY_QUEUE(blocked_queue_hd))
299 #define EMPTY_SLEEPING_QUEUE() (EMPTY_QUEUE(sleeping_queue))
301 #define EMPTY_THREAD_QUEUES() (EMPTY_RUN_QUEUE() && \
302 EMPTY_BLOCKED_QUEUE() && \
303 EMPTY_SLEEPING_QUEUE())
305 #if defined(RTS_SUPPORTS_THREADS)
306 /* If no task is waiting for a capability,
307 * and if there is work to be done
308 * or if we need to wait for IO or delay requests,
309 * spawn a new worker thread.
312 startSchedulerTaskIfNecessary(void);
316 extern void sched_belch(char *s, ...);
319 #endif /* __SCHEDULE_H__ */