1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2005
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
13 #include "rts/OSThreads.h"
14 #include "Capability.h"
15 #include "eventlog/EventLog.h"
17 /* initScheduler(), exitScheduler()
18 * Called from STG : no
19 * Locks assumed : none
21 void initScheduler (void);
22 void exitScheduler (rtsBool wait_foreign);
23 void freeScheduler (void);
25 // Place a new thread on the run queue of the current Capability
26 void scheduleThread (Capability *cap, StgTSO *tso);
28 // Place a new thread on the run queue of a specified Capability
29 // (cap is the currently owned Capability, cpu is the number of
30 // the desired Capability).
31 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
35 * Causes an OS thread to wake up and run the scheduler, if necessary.
37 #if defined(THREADED_RTS)
41 /* raiseExceptionHelper */
42 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
44 /* findRetryFrameHelper */
45 StgWord findRetryFrameHelper (StgTSO *tso);
49 * Entry point for a new worker task.
50 * Called from STG : NO
51 * Locks assumed : none
53 #if defined(THREADED_RTS)
54 void OSThreadProcAttr workerStart(Task *task);
57 /* The state of the scheduler. This is used to control the sequence
58 * of events during shutdown, and when the runtime is interrupted
61 #define SCHED_RUNNING 0 /* running as normal */
62 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
63 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
65 extern volatile StgWord sched_state;
68 * flag that tracks whether we have done any execution in this time slice.
70 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
71 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
72 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
73 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
75 /* Recent activity flag.
76 * Locks required : Transition from MAYBE_NO to INACTIVE
77 * happens in the timer signal, so it is atomic. Trnasition from
78 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
79 * to set it to ACTIVITY_YES.
81 extern volatile StgWord recent_activity;
84 * Locks required : sched_mutex
86 * In GranSim we have one run/blocked_queue per PE.
88 extern StgTSO *blackhole_queue;
89 #if !defined(THREADED_RTS)
90 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
91 extern StgTSO *sleeping_queue;
94 /* Set to rtsTrue if there are threads on the blackhole_queue, and
95 * it is possible that one or more of them may be available to run.
96 * This flag is set to rtsFalse after we've checked the queue, and
97 * set to rtsTrue just before we run some Haskell code. It is used
98 * to decide whether we should yield the Capability or not.
99 * Locks required : none (see scheduleCheckBlackHoles()).
101 extern rtsBool blackholes_need_checking;
103 extern rtsBool heap_overflow;
105 #if defined(THREADED_RTS)
106 extern Mutex sched_mutex;
109 /* Called by shutdown_handler(). */
110 void interruptStgRts (void);
112 void resurrectThreads (StgTSO *);
113 void performPendingThrowTos (StgTSO *);
115 /* -----------------------------------------------------------------------------
116 * Some convenient macros/inline functions...
121 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
123 /* Add a thread to the end of the run queue.
124 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
125 * ASSUMES: cap->running_task is the current task.
128 appendToRunQueue (Capability *cap, StgTSO *tso)
130 ASSERT(tso->_link == END_TSO_QUEUE);
131 if (cap->run_queue_hd == END_TSO_QUEUE) {
132 cap->run_queue_hd = tso;
134 setTSOLink(cap, cap->run_queue_tl, tso);
136 cap->run_queue_tl = tso;
137 postEvent (cap, EVENT_THREAD_RUNNABLE, tso->id, 0);
140 /* Push a thread on the beginning of the run queue.
141 * ASSUMES: cap->running_task is the current task.
144 pushOnRunQueue (Capability *cap, StgTSO *tso)
146 setTSOLink(cap, tso, cap->run_queue_hd);
147 cap->run_queue_hd = tso;
148 if (cap->run_queue_tl == END_TSO_QUEUE) {
149 cap->run_queue_tl = tso;
153 /* Pop the first thread off the runnable queue.
155 INLINE_HEADER StgTSO *
156 popRunQueue (Capability *cap)
158 StgTSO *t = cap->run_queue_hd;
159 ASSERT(t != END_TSO_QUEUE);
160 cap->run_queue_hd = t->_link;
161 t->_link = END_TSO_QUEUE; // no write barrier req'd
162 if (cap->run_queue_hd == END_TSO_QUEUE) {
163 cap->run_queue_tl = END_TSO_QUEUE;
168 /* Add a thread to the end of the blocked queue.
170 #if !defined(THREADED_RTS)
172 appendToBlockedQueue(StgTSO *tso)
174 ASSERT(tso->_link == END_TSO_QUEUE);
175 if (blocked_queue_hd == END_TSO_QUEUE) {
176 blocked_queue_hd = tso;
178 setTSOLink(&MainCapability, blocked_queue_tl, tso);
180 blocked_queue_tl = tso;
184 #if defined(THREADED_RTS)
185 // Assumes: my_cap is owned by the current Task. We hold
186 // other_cap->lock, but we do not necessarily own other_cap; another
187 // Task may be running on it.
189 appendToWakeupQueue (Capability *my_cap, Capability *other_cap, StgTSO *tso)
191 ASSERT(tso->_link == END_TSO_QUEUE);
192 if (other_cap->wakeup_queue_hd == END_TSO_QUEUE) {
193 other_cap->wakeup_queue_hd = tso;
195 // my_cap is passed to setTSOLink() because it may need to
196 // write to the mutable list.
197 setTSOLink(my_cap, other_cap->wakeup_queue_tl, tso);
199 other_cap->wakeup_queue_tl = tso;
203 /* Check whether various thread queues are empty
205 INLINE_HEADER rtsBool
206 emptyQueue (StgTSO *q)
208 return (q == END_TSO_QUEUE);
211 INLINE_HEADER rtsBool
212 emptyRunQueue(Capability *cap)
214 return emptyQueue(cap->run_queue_hd);
217 #if defined(THREADED_RTS)
218 INLINE_HEADER rtsBool
219 emptyWakeupQueue(Capability *cap)
221 return emptyQueue(cap->wakeup_queue_hd);
225 #if !defined(THREADED_RTS)
226 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
227 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
230 INLINE_HEADER rtsBool
231 emptyThreadQueues(Capability *cap)
233 return emptyRunQueue(cap)
234 #if !defined(THREADED_RTS)
235 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
240 #endif /* !IN_STG_CODE */
242 #endif /* SCHEDULE_H */