1 /* -----------------------------------------------------------------------------
2 * $Id: Schedule.h,v 1.33 2002/04/13 05:33:03 sof Exp $
4 * (c) The GHC Team 1998-1999
6 * Prototypes for functions in Schedule.c
7 * (RTS internal scheduler interface)
9 * -------------------------------------------------------------------------*/
11 #ifndef __SCHEDULE_H__
12 #define __SCHEDULE_H__
13 #include "OSThreads.h"
15 /* initScheduler(), exitScheduler(), startTasks()
17 * Called from STG : no
18 * Locks assumed : none
20 extern void initScheduler ( void );
21 extern void exitScheduler ( void );
23 /* awakenBlockedQueue()
25 * Takes a pointer to the beginning of a blocked TSO queue, and
26 * wakes up the entire queue.
28 * Called from STG : yes
29 * Locks assumed : none
32 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
34 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
36 void awakenBlockedQueue(StgTSO *tso);
41 * Takes a pointer to the beginning of a blocked TSO queue, and
42 * removes the first thread, placing it on the runnable queue.
44 * Called from STG : yes
45 * Locks assumed : none
47 #if defined(GRAN) || defined(PAR)
48 StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node);
50 StgTSO *unblockOne(StgTSO *tso);
55 * Raises an exception asynchronously in the specified thread.
57 * Called from STG : yes
58 * Locks assumed : none
60 void raiseAsync(StgTSO *tso, StgClosure *exception);
61 void raiseAsyncWithLock(StgTSO *tso, StgClosure *exception);
65 * Raises an exception asynchronously in the specified thread.
67 * Called from STG : NO
68 * Locks assumed : sched_mutex
70 void awaitEvent(rtsBool wait); /* In Select.c */
72 /* wakeUpSleepingThreads(nat ticks)
74 * Wakes up any sleeping threads whose timers have expired.
76 * Called from STG : NO
77 * Locks assumed : sched_mutex
79 rtsBool wakeUpSleepingThreads(nat); /* In Select.c */
81 /* GetRoots(evac_fn f)
83 * Call f() for each root known to the scheduler.
85 * Called from STG : NO
86 * Locks assumed : ????
88 void GetRoots(evac_fn);
90 // ToDo: check whether all fcts below are used in the SMP version, too
92 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
93 void unlink_from_bq(StgTSO* tso, StgClosure* node);
94 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
96 nat run_queue_len(void);
97 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
98 void initThread(StgTSO *tso, nat stack_size);
100 char *info_type(StgClosure *closure); // dummy
101 char *info_type_by_ip(StgInfoTable *ip); // dummy
102 void awaken_blocked_queue(StgTSO *q);
103 void initThread(StgTSO *tso, nat stack_size);
106 /* Context switch flag.
107 * Locks required : sched_mutex
109 extern nat context_switch;
110 extern rtsBool interrupted;
113 extern nat timestamp;
116 * Locks required : sched_mutex
118 * In GranSim we have one run/blocked_queue per PE.
121 // run_queue_hds defined in GranSim.h
123 extern StgTSO *run_queue_hd, *run_queue_tl;
124 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
125 extern StgTSO *sleeping_queue;
127 /* Linked list of all threads. */
128 extern StgTSO *all_threads;
130 #if defined(RTS_SUPPORTS_THREADS)
131 /* Schedule.c has detailed info on what these do */
132 extern Mutex sched_mutex;
133 extern Condition thread_ready_cond;
134 extern Condition returning_worker_cond;
135 extern nat rts_n_waiting_workers;
136 extern nat rts_n_waiting_tasks;
139 StgInt forkProcess(StgTSO *tso);
141 /* Sigh, RTS-internal versions of waitThread(), scheduleThread(), and
142 rts_evalIO() for the use by main() only. ToDo: better. */
143 extern SchedulerStatus waitThread_(StgTSO *tso,
144 /*out*/StgClosure **ret
145 #if defined(THREADED_RTS)
146 , rtsBool blockWaiting
149 extern SchedulerStatus rts_mainEvalIO(HaskellObj p, /*out*/HaskellObj *ret);
152 /* Called by shutdown_handler(). */
153 void interruptStgRts ( void );
155 void raiseAsync(StgTSO *tso, StgClosure *exception);
156 nat run_queue_len(void);
158 void resurrectThreads( StgTSO * );
162 * These are the threads which clients have requested that we run.
164 * In a 'threaded' build, we might have several concurrent clients all
165 * waiting for results, and each one will wait on a condition variable
166 * until the result is available.
168 * In non-SMP, clients are strictly nested: the first client calls
169 * into the RTS, which might call out again to C with a _ccall_GC, and
170 * eventually re-enter the RTS.
172 * This is non-abstract at the moment because the garbage collector
173 * treats pointers to TSOs from the main thread list as "weak" - these
174 * pointers won't prevent a thread from receiving a BlockedOnDeadMVar
177 * Main threads information is kept in a linked list:
179 typedef struct StgMainThread_ {
181 SchedulerStatus stat;
183 #if defined(RTS_SUPPORTS_THREADS)
186 struct StgMainThread_ *link;
189 /* Main thread queue.
190 * Locks required: sched_mutex.
192 extern StgMainThread *main_threads;
197 void printThreadBlockage(StgTSO *tso);
198 void printThreadStatus(StgTSO *tso);
199 void printAllThreads(void);
201 void print_bq (StgClosure *node);
203 void print_bqe (StgBlockingQueueElement *bqe);
206 /* -----------------------------------------------------------------------------
207 * Some convenient macros...
210 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
212 /* Add a thread to the end of the run queue.
213 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
215 #define APPEND_TO_RUN_QUEUE(tso) \
216 ASSERT(tso->link == END_TSO_QUEUE); \
217 if (run_queue_hd == END_TSO_QUEUE) { \
218 run_queue_hd = tso; \
220 run_queue_tl->link = tso; \
224 /* Push a thread on the beginning of the run queue. Used for
225 * newly awakened threads, so they get run as soon as possible.
227 #define PUSH_ON_RUN_QUEUE(tso) \
228 tso->link = run_queue_hd; \
229 run_queue_hd = tso; \
230 if (run_queue_tl == END_TSO_QUEUE) { \
231 run_queue_tl = tso; \
234 /* Pop the first thread off the runnable queue.
236 #define POP_RUN_QUEUE() \
237 ({ StgTSO *t = run_queue_hd; \
238 if (t != END_TSO_QUEUE) { \
239 run_queue_hd = t->link; \
240 t->link = END_TSO_QUEUE; \
241 if (run_queue_hd == END_TSO_QUEUE) { \
242 run_queue_tl = END_TSO_QUEUE; \
248 /* Add a thread to the end of the blocked queue.
250 #define APPEND_TO_BLOCKED_QUEUE(tso) \
251 ASSERT(tso->link == END_TSO_QUEUE); \
252 if (blocked_queue_hd == END_TSO_QUEUE) { \
253 blocked_queue_hd = tso; \
255 blocked_queue_tl->link = tso; \
257 blocked_queue_tl = tso;
259 /* Signal that a runnable thread has become available, in
260 * case there are any waiting tasks to execute it.
262 #if defined(RTS_SUPPORTS_THREADS)
263 #define THREAD_RUNNABLE() \
264 if ( !noCapabilities() ) { \
265 signalCondition(&thread_ready_cond); \
269 #define THREAD_RUNNABLE() /* nothing */
272 /* Check whether various thread queues are empty
274 #define EMPTY_QUEUE(q) (q == END_TSO_QUEUE)
276 #define EMPTY_RUN_QUEUE() (EMPTY_QUEUE(run_queue_hd))
277 #define EMPTY_BLOCKED_QUEUE() (EMPTY_QUEUE(blocked_queue_hd))
278 #define EMPTY_SLEEPING_QUEUE() (EMPTY_QUEUE(sleeping_queue))
280 #define EMPTY_THREAD_QUEUES() (EMPTY_RUN_QUEUE() && \
281 EMPTY_BLOCKED_QUEUE() && \
282 EMPTY_SLEEPING_QUEUE())
284 #endif /* __SCHEDULE_H__ */