1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-1999
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
10 #ifndef __SCHEDULE_H__
11 #define __SCHEDULE_H__
12 #include "OSThreads.h"
14 /* initScheduler(), exitScheduler(), startTasks()
16 * Called from STG : no
17 * Locks assumed : none
19 extern void initScheduler ( void );
20 extern void exitScheduler ( void );
22 /* awakenBlockedQueue()
24 * Takes a pointer to the beginning of a blocked TSO queue, and
25 * wakes up the entire queue.
27 * Called from STG : yes
28 * Locks assumed : none
31 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
33 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
35 void awakenBlockedQueue (StgTSO *tso);
36 void awakenBlockedQueueNoLock (StgTSO *tso);
39 /* Version of scheduleThread that doesn't take sched_mutex */
40 void scheduleThreadLocked(StgTSO *tso);
44 * Takes a pointer to the beginning of a blocked TSO queue, and
45 * removes the first thread, placing it on the runnable queue.
47 * Called from STG : yes
48 * Locks assumed : none
50 #if defined(GRAN) || defined(PAR)
51 StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node);
52 StgBlockingQueueElement *unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node);
54 StgTSO *unblockOne(StgTSO *tso);
55 StgTSO *unblockOneLocked(StgTSO *tso);
60 * Raises an exception asynchronously in the specified thread.
62 * Called from STG : yes
63 * Locks assumed : none
65 void raiseAsync(StgTSO *tso, StgClosure *exception);
66 void raiseAsyncWithLock(StgTSO *tso, StgClosure *exception);
68 /* raiseExceptionHelper */
69 StgWord raiseExceptionHelper (StgTSO *tso, StgClosure *exception);
71 /* findRetryFrameHelper */
72 StgWord findRetryFrameHelper (StgTSO *tso);
74 /* awaitEvent(rtsBool wait)
76 * Checks for blocked threads that need to be woken.
78 * Called from STG : NO
79 * Locks assumed : sched_mutex
81 void awaitEvent(rtsBool wait); /* In Select.c */
83 /* wakeUpSleepingThreads(nat ticks)
85 * Wakes up any sleeping threads whose timers have expired.
87 * Called from STG : NO
88 * Locks assumed : sched_mutex
90 rtsBool wakeUpSleepingThreads(lnat); /* In Select.c */
92 /* wakeBlockedWorkerThread()
94 * If a worker thread is currently blocked in awaitEvent(), interrupt it.
96 * Called from STG : NO
97 * Locks assumed : sched_mutex
99 void wakeBlockedWorkerThread(void); /* In Select.c */
101 /* resetWorkerWakeupPipeAfterFork()
103 * Notify Select.c that a fork() has occured
105 * Called from STG : NO
106 * Locks assumed : don't care, but must be called right after fork()
108 void resetWorkerWakeupPipeAfterFork(void); /* In Select.c */
110 /* GetRoots(evac_fn f)
112 * Call f() for each root known to the scheduler.
114 * Called from STG : NO
115 * Locks assumed : ????
117 void GetRoots(evac_fn);
119 // ToDo: check whether all fcts below are used in the SMP version, too
121 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
122 void unlink_from_bq(StgTSO* tso, StgClosure* node);
123 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
125 nat run_queue_len(void);
126 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
127 void initThread(StgTSO *tso, nat stack_size);
129 char *info_type(StgClosure *closure); // dummy
130 char *info_type_by_ip(StgInfoTable *ip); // dummy
131 void awaken_blocked_queue(StgTSO *q);
132 void initThread(StgTSO *tso, nat stack_size);
135 /* Context switch flag.
136 * Locks required : sched_mutex
138 extern int RTS_VAR(context_switch);
139 extern rtsBool RTS_VAR(interrupted);
142 * flag that tracks whether we have done any execution in this time slice.
144 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
145 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
146 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
147 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
148 extern nat recent_activity;
151 extern lnat RTS_VAR(timestamp);
154 * Locks required : sched_mutex
156 * In GranSim we have one run/blocked_queue per PE.
159 // run_queue_hds defined in GranSim.h
161 extern StgTSO *RTS_VAR(run_queue_hd), *RTS_VAR(run_queue_tl);
162 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
163 extern StgTSO *RTS_VAR(blackhole_queue);
164 extern StgTSO *RTS_VAR(sleeping_queue);
166 /* Linked list of all threads. */
167 extern StgTSO *RTS_VAR(all_threads);
169 /* Set to rtsTrue if there are threads on the blackhole_queue, and
170 * it is possible that one or more of them may be available to run.
171 * This flag is set to rtsFalse after we've checked the queue, and
172 * set to rtsTrue just before we run some Haskell code. It is used
173 * to decide whether we should yield the Capability or not.
175 extern rtsBool blackholes_need_checking;
177 #if defined(RTS_SUPPORTS_THREADS)
178 /* Schedule.c has detailed info on what these do */
179 extern Mutex RTS_VAR(sched_mutex);
180 extern Condition RTS_VAR(returning_worker_cond);
181 extern nat RTS_VAR(rts_n_waiting_workers);
182 extern nat RTS_VAR(rts_n_waiting_tasks);
185 StgBool isThreadBound(StgTSO *tso);
187 extern SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
190 /* Called by shutdown_handler(). */
191 void interruptStgRts ( void );
193 void raiseAsync(StgTSO *tso, StgClosure *exception);
194 nat run_queue_len(void);
196 void resurrectThreads( StgTSO * );
200 * These are the threads which clients have requested that we run.
202 * In a 'threaded' build, each of these corresponds to one bound thread.
203 * The pointer to the StgMainThread is passed as a parameter to schedule;
204 * this invocation of schedule will always pass this main thread's
205 * bound_thread_cond to waitForkWorkCapability; OS-thread-switching
206 * takes place using passCapability.
208 * In non-threaded builds, clients are strictly nested: the first client calls
209 * into the RTS, which might call out again to C with a _ccall_GC, and
210 * eventually re-enter the RTS.
212 * This is non-abstract at the moment because the garbage collector
213 * treats pointers to TSOs from the main thread list as "weak" - these
214 * pointers won't prevent a thread from receiving a BlockedOnDeadMVar
217 * Main threads information is kept in a linked list:
219 typedef struct StgMainThread_ {
221 SchedulerStatus stat;
223 #if defined(RTS_SUPPORTS_THREADS)
224 Condition bound_thread_cond;
226 struct StgMainThread_ *prev;
227 struct StgMainThread_ *link;
230 /* Main thread queue.
231 * Locks required: sched_mutex.
233 extern StgMainThread *main_threads;
235 void printAllThreads(void);
236 #ifdef COMPILING_SCHEDULER
237 static void printThreadBlockage(StgTSO *tso);
238 static void printThreadStatus(StgTSO *tso);
243 void print_bq (StgClosure *node);
246 void print_bqe (StgBlockingQueueElement *bqe);
249 void labelThread(StgPtr tso, char *label);
251 /* -----------------------------------------------------------------------------
252 * Some convenient macros...
255 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
257 /* Add a thread to the end of the run queue.
258 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
260 #define APPEND_TO_RUN_QUEUE(tso) \
261 ASSERT(tso->link == END_TSO_QUEUE); \
262 if (run_queue_hd == END_TSO_QUEUE) { \
263 run_queue_hd = tso; \
265 run_queue_tl->link = tso; \
269 /* Push a thread on the beginning of the run queue. Used for
270 * newly awakened threads, so they get run as soon as possible.
272 #define PUSH_ON_RUN_QUEUE(tso) \
273 tso->link = run_queue_hd; \
274 run_queue_hd = tso; \
275 if (run_queue_tl == END_TSO_QUEUE) { \
276 run_queue_tl = tso; \
279 /* Pop the first thread off the runnable queue.
281 #define POP_RUN_QUEUE(pt) \
282 do { StgTSO *__tmp_t = run_queue_hd; \
283 if (__tmp_t != END_TSO_QUEUE) { \
284 run_queue_hd = __tmp_t->link; \
285 __tmp_t->link = END_TSO_QUEUE; \
286 if (run_queue_hd == END_TSO_QUEUE) { \
287 run_queue_tl = END_TSO_QUEUE; \
293 /* Add a thread to the end of the blocked queue.
295 #define APPEND_TO_BLOCKED_QUEUE(tso) \
296 ASSERT(tso->link == END_TSO_QUEUE); \
297 if (blocked_queue_hd == END_TSO_QUEUE) { \
298 blocked_queue_hd = tso; \
300 blocked_queue_tl->link = tso; \
302 blocked_queue_tl = tso;
304 /* Check whether various thread queues are empty
306 #define EMPTY_QUEUE(q) (q == END_TSO_QUEUE)
308 #define EMPTY_RUN_QUEUE() (EMPTY_QUEUE(run_queue_hd))
309 #define EMPTY_BLOCKED_QUEUE() (EMPTY_QUEUE(blocked_queue_hd))
310 #define EMPTY_SLEEPING_QUEUE() (EMPTY_QUEUE(sleeping_queue))
312 #define EMPTY_THREAD_QUEUES() (EMPTY_RUN_QUEUE() && \
313 EMPTY_BLOCKED_QUEUE() && \
314 EMPTY_SLEEPING_QUEUE())
316 #if defined(RTS_SUPPORTS_THREADS)
317 /* If no task is waiting for a capability,
318 * and if there is work to be done
319 * or if we need to wait for IO or delay requests,
320 * spawn a new worker thread.
323 startSchedulerTaskIfNecessary(void);
327 extern void sched_belch(char *s, ...)
328 GNU_ATTRIBUTE(format (printf, 1, 2));
331 #endif /* __SCHEDULE_H__ */