1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2005
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
13 #include "rts/OSThreads.h"
14 #include "Capability.h"
19 /* initScheduler(), exitScheduler()
20 * Called from STG : no
21 * Locks assumed : none
23 void initScheduler (void);
24 void exitScheduler (rtsBool wait_foreign);
25 void freeScheduler (void);
27 // Place a new thread on the run queue of the current Capability
28 void scheduleThread (Capability *cap, StgTSO *tso);
30 // Place a new thread on the run queue of a specified Capability
31 // (cap is the currently owned Capability, cpu is the number of
32 // the desired Capability).
33 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
37 * Causes an OS thread to wake up and run the scheduler, if necessary.
39 #if defined(THREADED_RTS)
43 /* raiseExceptionHelper */
44 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
46 /* findRetryFrameHelper */
47 StgWord findRetryFrameHelper (StgTSO *tso);
51 * Entry point for a new worker task.
52 * Called from STG : NO
53 * Locks assumed : none
55 #if defined(THREADED_RTS)
56 void OSThreadProcAttr workerStart(Task *task);
59 /* The state of the scheduler. This is used to control the sequence
60 * of events during shutdown, and when the runtime is interrupted
63 #define SCHED_RUNNING 0 /* running as normal */
64 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
65 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
67 extern volatile StgWord sched_state;
70 * flag that tracks whether we have done any execution in this time slice.
72 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
73 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
74 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
75 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
77 /* Recent activity flag.
78 * Locks required : Transition from MAYBE_NO to INACTIVE
79 * happens in the timer signal, so it is atomic. Trnasition from
80 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
81 * to set it to ACTIVITY_YES.
83 extern volatile StgWord recent_activity;
86 * Locks required : sched_mutex
88 * In GranSim we have one run/blocked_queue per PE.
90 extern StgTSO *blackhole_queue;
91 #if !defined(THREADED_RTS)
92 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
93 extern StgTSO *sleeping_queue;
96 /* Set to rtsTrue if there are threads on the blackhole_queue, and
97 * it is possible that one or more of them may be available to run.
98 * This flag is set to rtsFalse after we've checked the queue, and
99 * set to rtsTrue just before we run some Haskell code. It is used
100 * to decide whether we should yield the Capability or not.
101 * Locks required : none (see scheduleCheckBlackHoles()).
103 extern rtsBool blackholes_need_checking;
105 extern rtsBool heap_overflow;
107 #if defined(THREADED_RTS)
108 extern Mutex sched_mutex;
111 /* Called by shutdown_handler(). */
112 void interruptStgRts (void);
114 void resurrectThreads (StgTSO *);
115 void performPendingThrowTos (StgTSO *);
117 /* -----------------------------------------------------------------------------
118 * Some convenient macros/inline functions...
123 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
125 /* Add a thread to the end of the run queue.
126 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
127 * ASSUMES: cap->running_task is the current task.
130 appendToRunQueue (Capability *cap, StgTSO *tso);
133 appendToRunQueue (Capability *cap, StgTSO *tso)
135 ASSERT(tso->_link == END_TSO_QUEUE);
136 if (cap->run_queue_hd == END_TSO_QUEUE) {
137 cap->run_queue_hd = tso;
139 setTSOLink(cap, cap->run_queue_tl, tso);
141 cap->run_queue_tl = tso;
142 traceSchedEvent (cap, EVENT_THREAD_RUNNABLE, tso, 0);
145 /* Push a thread on the beginning of the run queue.
146 * ASSUMES: cap->running_task is the current task.
149 pushOnRunQueue (Capability *cap, StgTSO *tso)
151 setTSOLink(cap, tso, cap->run_queue_hd);
152 cap->run_queue_hd = tso;
153 if (cap->run_queue_tl == END_TSO_QUEUE) {
154 cap->run_queue_tl = tso;
158 /* Pop the first thread off the runnable queue.
160 INLINE_HEADER StgTSO *
161 popRunQueue (Capability *cap)
163 StgTSO *t = cap->run_queue_hd;
164 ASSERT(t != END_TSO_QUEUE);
165 cap->run_queue_hd = t->_link;
166 t->_link = END_TSO_QUEUE; // no write barrier req'd
167 if (cap->run_queue_hd == END_TSO_QUEUE) {
168 cap->run_queue_tl = END_TSO_QUEUE;
173 /* Add a thread to the end of the blocked queue.
175 #if !defined(THREADED_RTS)
177 appendToBlockedQueue(StgTSO *tso)
179 ASSERT(tso->_link == END_TSO_QUEUE);
180 if (blocked_queue_hd == END_TSO_QUEUE) {
181 blocked_queue_hd = tso;
183 setTSOLink(&MainCapability, blocked_queue_tl, tso);
185 blocked_queue_tl = tso;
189 #if defined(THREADED_RTS)
190 // Assumes: my_cap is owned by the current Task. We hold
191 // other_cap->lock, but we do not necessarily own other_cap; another
192 // Task may be running on it.
194 appendToWakeupQueue (Capability *my_cap, Capability *other_cap, StgTSO *tso)
196 ASSERT(tso->_link == END_TSO_QUEUE);
197 if (other_cap->wakeup_queue_hd == END_TSO_QUEUE) {
198 other_cap->wakeup_queue_hd = tso;
200 // my_cap is passed to setTSOLink() because it may need to
201 // write to the mutable list.
202 setTSOLink(my_cap, other_cap->wakeup_queue_tl, tso);
204 other_cap->wakeup_queue_tl = tso;
208 /* Check whether various thread queues are empty
210 INLINE_HEADER rtsBool
211 emptyQueue (StgTSO *q)
213 return (q == END_TSO_QUEUE);
216 INLINE_HEADER rtsBool
217 emptyRunQueue(Capability *cap)
219 return emptyQueue(cap->run_queue_hd);
222 #if defined(THREADED_RTS)
223 INLINE_HEADER rtsBool
224 emptyWakeupQueue(Capability *cap)
226 return emptyQueue(cap->wakeup_queue_hd);
230 #if !defined(THREADED_RTS)
231 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
232 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
235 INLINE_HEADER rtsBool
236 emptyThreadQueues(Capability *cap)
238 return emptyRunQueue(cap)
239 #if !defined(THREADED_RTS)
240 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
245 #endif /* !IN_STG_CODE */
249 #endif /* SCHEDULE_H */