1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2005
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
13 #include "OSThreads.h"
14 #include "Capability.h"
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
20 void initScheduler (void);
21 void exitScheduler (void);
23 // Place a new thread on the run queue of the current Capability
24 void scheduleThread (Capability *cap, StgTSO *tso);
26 // Place a new thread on the run queue of a specified Capability
27 // (cap is the currently owned Capability, cpu is the number of
28 // the desired Capability).
29 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
31 /* awakenBlockedQueue()
33 * Takes a pointer to the beginning of a blocked TSO queue, and
34 * wakes up the entire queue.
35 * Called from STG : yes
36 * Locks assumed : none
39 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
41 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
43 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
48 * Causes an OS thread to wake up and run the scheduler, if necessary.
54 * Put the specified thread on the run queue of the given Capability.
55 * Called from STG : yes
56 * Locks assumed : we own the Capability.
58 StgTSO * unblockOne(Capability *cap, StgTSO *tso);
62 * Raises an exception asynchronously in the specified thread.
64 * Called from STG : yes
65 * Locks assumed : none
67 void raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception);
69 /* suspendComputation()
71 * A variant of raiseAsync(), this strips the stack of the specified
72 * thread down to the stop_here point, leaving a current closure on
73 * top of the stack at [stop_here - 1].
75 void suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here);
77 /* raiseExceptionHelper */
78 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
80 /* findRetryFrameHelper */
81 StgWord findRetryFrameHelper (StgTSO *tso);
83 /* GetRoots(evac_fn f)
85 * Call f() for each root known to the scheduler.
87 * Called from STG : NO
88 * Locks assumed : ????
90 void GetRoots(evac_fn);
94 * Entry point for a new worker task.
95 * Called from STG : NO
96 * Locks assumed : none
98 void workerStart(Task *task);
101 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
102 void unlink_from_bq(StgTSO* tso, StgClosure* node);
103 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
105 nat run_queue_len(void);
106 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
107 void initThread(StgTSO *tso, nat stack_size);
109 char *info_type(StgClosure *closure); // dummy
110 char *info_type_by_ip(StgInfoTable *ip); // dummy
111 void awaken_blocked_queue(StgTSO *q);
112 void initThread(StgTSO *tso, nat stack_size);
115 /* Context switch flag.
116 * Locks required : none (conflicts are harmless)
118 extern int RTS_VAR(context_switch);
120 /* The state of the scheduler. This is used to control the sequence
121 * of events during shutdown, and when the runtime is interrupted
124 #define SCHED_RUNNING 0 /* running as normal */
125 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
126 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
128 extern rtsBool RTS_VAR(sched_state);
131 * flag that tracks whether we have done any execution in this time slice.
133 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
134 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
135 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
136 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
138 /* Recent activity flag.
139 * Locks required : Transition from MAYBE_NO to INACTIVE
140 * happens in the timer signal, so it is atomic. Trnasition from
141 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
142 * to set it to ACTIVITY_YES.
144 extern nat recent_activity;
147 * Locks required : sched_mutex
149 * In GranSim we have one run/blocked_queue per PE.
152 // run_queue_hds defined in GranSim.h
154 extern StgTSO *RTS_VAR(blackhole_queue);
155 #if !defined(THREADED_RTS)
156 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
157 extern StgTSO *RTS_VAR(sleeping_queue);
161 /* Linked list of all threads.
162 * Locks required : sched_mutex
164 extern StgTSO *RTS_VAR(all_threads);
166 /* Set to rtsTrue if there are threads on the blackhole_queue, and
167 * it is possible that one or more of them may be available to run.
168 * This flag is set to rtsFalse after we've checked the queue, and
169 * set to rtsTrue just before we run some Haskell code. It is used
170 * to decide whether we should yield the Capability or not.
171 * Locks required : none (see scheduleCheckBlackHoles()).
173 extern rtsBool blackholes_need_checking;
175 #if defined(THREADED_RTS)
176 extern Mutex RTS_VAR(sched_mutex);
179 StgBool isThreadBound(StgTSO *tso);
181 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
183 /* Called by shutdown_handler(). */
184 void interruptStgRts (void);
186 nat run_queue_len (void);
188 void resurrectThreads (StgTSO *);
190 void printAllThreads(void);
195 void print_bq (StgClosure *node);
198 void print_bqe (StgBlockingQueueElement *bqe);
201 void labelThread(StgPtr tso, char *label);
203 /* -----------------------------------------------------------------------------
204 * Some convenient macros/inline functions...
209 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
211 /* Add a thread to the end of the run queue.
212 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
213 * ASSUMES: cap->running_task is the current task.
216 appendToRunQueue (Capability *cap, StgTSO *tso)
218 ASSERT(tso->link == END_TSO_QUEUE);
219 if (cap->run_queue_hd == END_TSO_QUEUE) {
220 cap->run_queue_hd = tso;
222 cap->run_queue_tl->link = tso;
224 cap->run_queue_tl = tso;
227 /* Push a thread on the beginning of the run queue. Used for
228 * newly awakened threads, so they get run as soon as possible.
229 * ASSUMES: cap->running_task is the current task.
232 pushOnRunQueue (Capability *cap, StgTSO *tso)
234 tso->link = cap->run_queue_hd;
235 cap->run_queue_hd = tso;
236 if (cap->run_queue_tl == END_TSO_QUEUE) {
237 cap->run_queue_tl = tso;
241 /* Pop the first thread off the runnable queue.
243 STATIC_INLINE StgTSO *
244 popRunQueue (Capability *cap)
246 StgTSO *t = cap->run_queue_hd;
247 ASSERT(t != END_TSO_QUEUE);
248 cap->run_queue_hd = t->link;
249 t->link = END_TSO_QUEUE;
250 if (cap->run_queue_hd == END_TSO_QUEUE) {
251 cap->run_queue_tl = END_TSO_QUEUE;
256 /* Add a thread to the end of the blocked queue.
258 #if !defined(THREADED_RTS)
260 appendToBlockedQueue(StgTSO *tso)
262 ASSERT(tso->link == END_TSO_QUEUE);
263 if (blocked_queue_hd == END_TSO_QUEUE) {
264 blocked_queue_hd = tso;
266 blocked_queue_tl->link = tso;
268 blocked_queue_tl = tso;
272 #if defined(THREADED_RTS)
274 appendToWakeupQueue (Capability *cap, StgTSO *tso)
276 ASSERT(tso->link == END_TSO_QUEUE);
277 if (cap->wakeup_queue_hd == END_TSO_QUEUE) {
278 cap->wakeup_queue_hd = tso;
280 cap->wakeup_queue_tl->link = tso;
282 cap->wakeup_queue_tl = tso;
286 /* Check whether various thread queues are empty
288 STATIC_INLINE rtsBool
289 emptyQueue (StgTSO *q)
291 return (q == END_TSO_QUEUE);
294 STATIC_INLINE rtsBool
295 emptyRunQueue(Capability *cap)
297 return emptyQueue(cap->run_queue_hd);
300 #if defined(THREADED_RTS)
301 STATIC_INLINE rtsBool
302 emptyWakeupQueue(Capability *cap)
304 return emptyQueue(cap->wakeup_queue_hd);
308 #if !defined(THREADED_RTS)
309 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
310 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
313 STATIC_INLINE rtsBool
314 emptyThreadQueues(Capability *cap)
316 return emptyRunQueue(cap)
317 #if !defined(THREADED_RTS)
318 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
323 #endif /* !IN_STG_CODE */
326 dirtyTSO (StgTSO *tso)
328 tso->flags |= TSO_DIRTY;
331 #endif /* SCHEDULE_H */