1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2005
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
13 #include "OSThreads.h"
14 #include "Capability.h"
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
20 void initScheduler (void);
21 void exitScheduler (rtsBool wait_foreign);
22 void freeScheduler (void);
24 // Place a new thread on the run queue of the current Capability
25 void scheduleThread (Capability *cap, StgTSO *tso);
27 // Place a new thread on the run queue of a specified Capability
28 // (cap is the currently owned Capability, cpu is the number of
29 // the desired Capability).
30 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
32 /* awakenBlockedQueue()
34 * Takes a pointer to the beginning of a blocked TSO queue, and
35 * wakes up the entire queue.
36 * Called from STG : yes
37 * Locks assumed : none
40 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
42 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
44 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
49 * Causes an OS thread to wake up and run the scheduler, if necessary.
55 * Put the specified thread on the run queue of the given Capability.
56 * Called from STG : yes
57 * Locks assumed : we own the Capability.
59 StgTSO * unblockOne (Capability *cap, StgTSO *tso);
61 /* raiseExceptionHelper */
62 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
64 /* findRetryFrameHelper */
65 StgWord findRetryFrameHelper (StgTSO *tso);
69 * Entry point for a new worker task.
70 * Called from STG : NO
71 * Locks assumed : none
73 #if defined(THREADED_RTS)
74 void OSThreadProcAttr workerStart(Task *task);
78 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
79 void unlink_from_bq(StgTSO* tso, StgClosure* node);
80 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
82 nat run_queue_len(void);
83 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
84 void initThread(StgTSO *tso, nat stack_size);
86 char *info_type(StgClosure *closure); // dummy
87 char *info_type_by_ip(StgInfoTable *ip); // dummy
88 void awaken_blocked_queue(StgTSO *q);
89 void initThread(StgTSO *tso, nat stack_size);
92 /* The state of the scheduler. This is used to control the sequence
93 * of events during shutdown, and when the runtime is interrupted
96 #define SCHED_RUNNING 0 /* running as normal */
97 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
98 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
100 extern rtsBool RTS_VAR(sched_state);
103 * flag that tracks whether we have done any execution in this time slice.
105 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
106 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
107 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
108 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
110 /* Recent activity flag.
111 * Locks required : Transition from MAYBE_NO to INACTIVE
112 * happens in the timer signal, so it is atomic. Trnasition from
113 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
114 * to set it to ACTIVITY_YES.
116 extern nat recent_activity;
119 * Locks required : sched_mutex
121 * In GranSim we have one run/blocked_queue per PE.
124 // run_queue_hds defined in GranSim.h
126 extern StgTSO *RTS_VAR(blackhole_queue);
127 #if !defined(THREADED_RTS)
128 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
129 extern StgTSO *RTS_VAR(sleeping_queue);
133 /* Set to rtsTrue if there are threads on the blackhole_queue, and
134 * it is possible that one or more of them may be available to run.
135 * This flag is set to rtsFalse after we've checked the queue, and
136 * set to rtsTrue just before we run some Haskell code. It is used
137 * to decide whether we should yield the Capability or not.
138 * Locks required : none (see scheduleCheckBlackHoles()).
140 extern rtsBool blackholes_need_checking;
142 #if defined(THREADED_RTS)
143 extern Mutex RTS_VAR(sched_mutex);
146 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
148 /* Called by shutdown_handler(). */
149 void interruptStgRts (void);
151 nat run_queue_len (void);
153 void resurrectThreads (StgTSO *);
154 void performPendingThrowTos (StgTSO *);
156 void printAllThreads(void);
161 void print_bq (StgClosure *node);
164 void print_bqe (StgBlockingQueueElement *bqe);
167 /* -----------------------------------------------------------------------------
168 * Some convenient macros/inline functions...
173 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
175 /* Add a thread to the end of the run queue.
176 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
177 * ASSUMES: cap->running_task is the current task.
180 appendToRunQueue (Capability *cap, StgTSO *tso)
182 ASSERT(tso->_link == END_TSO_QUEUE);
183 if (cap->run_queue_hd == END_TSO_QUEUE) {
184 cap->run_queue_hd = tso;
186 setTSOLink(cap, cap->run_queue_tl, tso);
188 cap->run_queue_tl = tso;
191 /* Push a thread on the beginning of the run queue. Used for
192 * newly awakened threads, so they get run as soon as possible.
193 * ASSUMES: cap->running_task is the current task.
196 pushOnRunQueue (Capability *cap, StgTSO *tso)
198 setTSOLink(cap, tso, cap->run_queue_hd);
199 cap->run_queue_hd = tso;
200 if (cap->run_queue_tl == END_TSO_QUEUE) {
201 cap->run_queue_tl = tso;
205 /* Pop the first thread off the runnable queue.
207 INLINE_HEADER StgTSO *
208 popRunQueue (Capability *cap)
210 StgTSO *t = cap->run_queue_hd;
211 ASSERT(t != END_TSO_QUEUE);
212 cap->run_queue_hd = t->_link;
213 t->_link = END_TSO_QUEUE; // no write barrier req'd
214 if (cap->run_queue_hd == END_TSO_QUEUE) {
215 cap->run_queue_tl = END_TSO_QUEUE;
220 /* Add a thread to the end of the blocked queue.
222 #if !defined(THREADED_RTS)
224 appendToBlockedQueue(StgTSO *tso)
226 ASSERT(tso->_link == END_TSO_QUEUE);
227 if (blocked_queue_hd == END_TSO_QUEUE) {
228 blocked_queue_hd = tso;
230 setTSOLink(&MainCapability, blocked_queue_tl, tso);
232 blocked_queue_tl = tso;
236 #if defined(THREADED_RTS)
237 // Assumes: my_cap is owned by the current Task. We hold
238 // other_cap->lock, but we do not necessarily own other_cap; another
239 // Task may be running on it.
241 appendToWakeupQueue (Capability *my_cap, Capability *other_cap, StgTSO *tso)
243 ASSERT(tso->_link == END_TSO_QUEUE);
244 if (other_cap->wakeup_queue_hd == END_TSO_QUEUE) {
245 other_cap->wakeup_queue_hd = tso;
247 // my_cap is passed to setTSOLink() because it may need to
248 // write to the mutable list.
249 setTSOLink(my_cap, other_cap->wakeup_queue_tl, tso);
251 other_cap->wakeup_queue_tl = tso;
255 /* Check whether various thread queues are empty
257 INLINE_HEADER rtsBool
258 emptyQueue (StgTSO *q)
260 return (q == END_TSO_QUEUE);
263 INLINE_HEADER rtsBool
264 emptyRunQueue(Capability *cap)
266 return emptyQueue(cap->run_queue_hd);
269 #if defined(THREADED_RTS)
270 INLINE_HEADER rtsBool
271 emptyWakeupQueue(Capability *cap)
273 return emptyQueue(cap->wakeup_queue_hd);
277 #if !defined(THREADED_RTS)
278 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
279 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
282 INLINE_HEADER rtsBool
283 emptyThreadQueues(Capability *cap)
285 return emptyRunQueue(cap)
286 #if !defined(THREADED_RTS)
287 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
292 #endif /* !IN_STG_CODE */
294 #endif /* SCHEDULE_H */