1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.h,v 1.40 2003/10/01 10:49:09 wolfgang Exp $
4 * (c) The GHC Team 1998-1999
6 * Prototypes for functions in Schedule.c
7 * (RTS internal scheduler interface)
9 * -------------------------------------------------------------------------*/
11 #ifndef __SCHEDULE_H__
12 #define __SCHEDULE_H__
13 #include "OSThreads.h"
15 /* initScheduler(), exitScheduler(), startTasks()
17 * Called from STG : no
18 * Locks assumed : none
20 extern void initScheduler ( void );
21 extern void exitScheduler ( void );
23 /* awakenBlockedQueue()
25 * Takes a pointer to the beginning of a blocked TSO queue, and
26 * wakes up the entire queue.
28 * Called from STG : yes
29 * Locks assumed : none
32 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
34 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
36 void awakenBlockedQueue(StgTSO *tso);
37 #if defined(RTS_SUPPORTS_THREADS)
38 void awakenBlockedQueueNoLock(StgTSO *tso);
44 * Takes a pointer to the beginning of a blocked TSO queue, and
45 * removes the first thread, placing it on the runnable queue.
47 * Called from STG : yes
48 * Locks assumed : none
50 #if defined(GRAN) || defined(PAR)
51 StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node);
53 StgTSO *unblockOne(StgTSO *tso);
58 * Raises an exception asynchronously in the specified thread.
60 * Called from STG : yes
61 * Locks assumed : none
63 void raiseAsync(StgTSO *tso, StgClosure *exception);
64 void raiseAsyncWithLock(StgTSO *tso, StgClosure *exception);
66 /* awaitEvent(rtsBool wait)
68 * Checks for blocked threads that need to be woken.
70 * Called from STG : NO
71 * Locks assumed : sched_mutex
73 void awaitEvent(rtsBool wait); /* In Select.c */
75 /* wakeUpSleepingThreads(nat ticks)
77 * Wakes up any sleeping threads whose timers have expired.
79 * Called from STG : NO
80 * Locks assumed : sched_mutex
82 rtsBool wakeUpSleepingThreads(nat); /* In Select.c */
84 /* wakeBlockedWorkerThread()
86 * If a worker thread is currently blocked in awaitEvent(), interrupt it.
88 * Called from STG : NO
89 * Locks assumed : sched_mutex
91 void wakeBlockedWorkerThread(void); /* In Select.c */
94 /* GetRoots(evac_fn f)
96 * Call f() for each root known to the scheduler.
98 * Called from STG : NO
99 * Locks assumed : ????
101 void GetRoots(evac_fn);
103 // ToDo: check whether all fcts below are used in the SMP version, too
105 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
106 void unlink_from_bq(StgTSO* tso, StgClosure* node);
107 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
109 nat run_queue_len(void);
110 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
111 void initThread(StgTSO *tso, nat stack_size);
113 char *info_type(StgClosure *closure); // dummy
114 char *info_type_by_ip(StgInfoTable *ip); // dummy
115 void awaken_blocked_queue(StgTSO *q);
116 void initThread(StgTSO *tso, nat stack_size);
119 /* Context switch flag.
120 * Locks required : sched_mutex
122 extern nat context_switch;
123 extern rtsBool interrupted;
126 extern nat timestamp;
129 * Locks required : sched_mutex
131 * In GranSim we have one run/blocked_queue per PE.
134 // run_queue_hds defined in GranSim.h
136 extern StgTSO *run_queue_hd, *run_queue_tl;
137 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
138 extern StgTSO *sleeping_queue;
140 /* Linked list of all threads. */
141 extern StgTSO *all_threads;
143 #if defined(RTS_SUPPORTS_THREADS)
144 /* Schedule.c has detailed info on what these do */
145 extern Mutex sched_mutex;
146 extern Condition thread_ready_cond;
147 extern Condition returning_worker_cond;
148 extern nat rts_n_waiting_workers;
149 extern nat rts_n_waiting_tasks;
152 StgBool rtsSupportsBoundThreads(void);
153 StgBool isThreadBound(StgTSO *tso);
154 StgInt forkProcess(StgTSO *tso);
156 extern SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
159 /* Called by shutdown_handler(). */
160 void interruptStgRts ( void );
162 void raiseAsync(StgTSO *tso, StgClosure *exception);
163 nat run_queue_len(void);
165 void resurrectThreads( StgTSO * );
169 * These are the threads which clients have requested that we run.
171 * In a 'threaded' build, each of these corresponds to one bound thread.
172 * The pointer to the StgMainThread is passed as a parameter to schedule;
173 * this invocation of schedule will always pass this main thread's
174 * bound_thread_cond to waitForkWorkCapability; OS-thread-switching
175 * takes place using passCapability.
177 * In non-threaded builds, clients are strictly nested: the first client calls
178 * into the RTS, which might call out again to C with a _ccall_GC, and
179 * eventually re-enter the RTS.
181 * This is non-abstract at the moment because the garbage collector
182 * treats pointers to TSOs from the main thread list as "weak" - these
183 * pointers won't prevent a thread from receiving a BlockedOnDeadMVar
186 * Main threads information is kept in a linked list:
188 typedef struct StgMainThread_ {
190 SchedulerStatus stat;
192 #if defined(RTS_SUPPORTS_THREADS)
193 #if defined(THREADED_RTS)
194 Condition bound_thread_cond;
199 struct StgMainThread_ *link;
202 /* Main thread queue.
203 * Locks required: sched_mutex.
205 extern StgMainThread *main_threads;
207 void printAllThreads(void);
208 #ifdef COMPILING_SCHEDULER
209 static void printThreadBlockage(StgTSO *tso);
210 static void printThreadStatus(StgTSO *tso);
215 void print_bq (StgClosure *node);
218 void print_bqe (StgBlockingQueueElement *bqe);
221 void labelThread(StgPtr tso, char *label);
223 /* -----------------------------------------------------------------------------
224 * Some convenient macros...
227 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
229 /* Add a thread to the end of the run queue.
230 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
232 #define APPEND_TO_RUN_QUEUE(tso) \
233 ASSERT(tso->link == END_TSO_QUEUE); \
234 if (run_queue_hd == END_TSO_QUEUE) { \
235 run_queue_hd = tso; \
237 run_queue_tl->link = tso; \
241 /* Push a thread on the beginning of the run queue. Used for
242 * newly awakened threads, so they get run as soon as possible.
244 #define PUSH_ON_RUN_QUEUE(tso) \
245 tso->link = run_queue_hd; \
246 run_queue_hd = tso; \
247 if (run_queue_tl == END_TSO_QUEUE) { \
248 run_queue_tl = tso; \
251 /* Pop the first thread off the runnable queue.
253 #define POP_RUN_QUEUE() \
254 ({ StgTSO *t = run_queue_hd; \
255 if (t != END_TSO_QUEUE) { \
256 run_queue_hd = t->link; \
257 t->link = END_TSO_QUEUE; \
258 if (run_queue_hd == END_TSO_QUEUE) { \
259 run_queue_tl = END_TSO_QUEUE; \
265 /* Add a thread to the end of the blocked queue.
267 #define APPEND_TO_BLOCKED_QUEUE(tso) \
268 ASSERT(tso->link == END_TSO_QUEUE); \
269 if (blocked_queue_hd == END_TSO_QUEUE) { \
270 blocked_queue_hd = tso; \
272 blocked_queue_tl->link = tso; \
274 blocked_queue_tl = tso;
276 /* Signal that a runnable thread has become available, in
277 * case there are any waiting tasks to execute it.
279 #if defined(RTS_SUPPORTS_THREADS)
280 #define THREAD_RUNNABLE() \
281 wakeBlockedWorkerThread(); \
284 #define THREAD_RUNNABLE() /* nothing */
287 /* Check whether various thread queues are empty
289 #define EMPTY_QUEUE(q) (q == END_TSO_QUEUE)
291 #define EMPTY_RUN_QUEUE() (EMPTY_QUEUE(run_queue_hd))
292 #define EMPTY_BLOCKED_QUEUE() (EMPTY_QUEUE(blocked_queue_hd))
293 #define EMPTY_SLEEPING_QUEUE() (EMPTY_QUEUE(sleeping_queue))
295 #define EMPTY_THREAD_QUEUES() (EMPTY_RUN_QUEUE() && \
296 EMPTY_BLOCKED_QUEUE() && \
297 EMPTY_SLEEPING_QUEUE())
299 #if defined(RTS_SUPPORTS_THREADS)
300 /* If no task is waiting for a capability,
301 * and if there is work to be done
302 * or if we need to wait for IO or delay requests,
303 * spawn a new worker thread.
306 startSchedulerTaskIfNecessary(void);
309 #endif /* __SCHEDULE_H__ */