1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2005
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
13 #include "OSThreads.h"
14 #include "Capability.h"
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
20 void initScheduler (void);
21 void exitScheduler (void);
23 // Place a new thread on the run queue of the specified Capability
24 void scheduleThread (Capability *cap, StgTSO *tso);
26 /* awakenBlockedQueue()
28 * Takes a pointer to the beginning of a blocked TSO queue, and
29 * wakes up the entire queue.
30 * Called from STG : yes
31 * Locks assumed : none
34 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
36 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
38 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
43 * Put the specified thread on the run queue of the given Capability.
44 * Called from STG : yes
45 * Locks assumed : we own the Capability.
47 StgTSO * unblockOne(Capability *cap, StgTSO *tso);
51 * Raises an exception asynchronously in the specified thread.
53 * Called from STG : yes
54 * Locks assumed : none
56 void raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception);
58 /* suspendComputation()
60 * A variant of raiseAsync(), this strips the stack of the specified
61 * thread down to the stop_here point, leaving a current closure on
62 * top of the stack at [stop_here - 1].
64 void suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here);
66 /* raiseExceptionHelper */
67 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
69 /* findRetryFrameHelper */
70 StgWord findRetryFrameHelper (StgTSO *tso);
72 /* GetRoots(evac_fn f)
74 * Call f() for each root known to the scheduler.
76 * Called from STG : NO
77 * Locks assumed : ????
79 void GetRoots(evac_fn);
83 * Entry point for a new worker task.
84 * Called from STG : NO
85 * Locks assumed : none
87 void workerStart(Task *task);
90 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
91 void unlink_from_bq(StgTSO* tso, StgClosure* node);
92 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
94 nat run_queue_len(void);
95 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
96 void initThread(StgTSO *tso, nat stack_size);
98 char *info_type(StgClosure *closure); // dummy
99 char *info_type_by_ip(StgInfoTable *ip); // dummy
100 void awaken_blocked_queue(StgTSO *q);
101 void initThread(StgTSO *tso, nat stack_size);
104 /* Context switch flag.
105 * Locks required : none (conflicts are harmless)
107 extern int RTS_VAR(context_switch);
110 * Locks required : none (makes one transition from false->true)
112 extern rtsBool RTS_VAR(interrupted);
115 * Locks required : none (makes one transition from false->true)
117 extern rtsBool shutting_down_scheduler;
120 * flag that tracks whether we have done any execution in this time slice.
122 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
123 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
124 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
125 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
127 /* Recent activity flag.
128 * Locks required : Transition from MAYBE_NO to INACTIVE
129 * happens in the timer signal, so it is atomic. Trnasition from
130 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
131 * to set it to ACTIVITY_YES.
133 extern nat recent_activity;
136 * Locks required : sched_mutex
138 * In GranSim we have one run/blocked_queue per PE.
141 // run_queue_hds defined in GranSim.h
143 extern StgTSO *RTS_VAR(blackhole_queue);
144 #if !defined(THREADED_RTS)
145 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
146 extern StgTSO *RTS_VAR(sleeping_queue);
150 /* Linked list of all threads.
151 * Locks required : sched_mutex
153 extern StgTSO *RTS_VAR(all_threads);
155 /* Set to rtsTrue if there are threads on the blackhole_queue, and
156 * it is possible that one or more of them may be available to run.
157 * This flag is set to rtsFalse after we've checked the queue, and
158 * set to rtsTrue just before we run some Haskell code. It is used
159 * to decide whether we should yield the Capability or not.
160 * Locks required : none (see scheduleCheckBlackHoles()).
162 extern rtsBool blackholes_need_checking;
164 #if defined(THREADED_RTS)
165 extern Mutex RTS_VAR(sched_mutex);
168 StgBool isThreadBound(StgTSO *tso);
170 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
172 /* Called by shutdown_handler(). */
173 void interruptStgRts (void);
175 nat run_queue_len (void);
177 void resurrectThreads (StgTSO *);
179 void printAllThreads(void);
184 void print_bq (StgClosure *node);
187 void print_bqe (StgBlockingQueueElement *bqe);
190 void labelThread(StgPtr tso, char *label);
192 /* -----------------------------------------------------------------------------
193 * Some convenient macros/inline functions...
198 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
200 /* Add a thread to the end of the run queue.
201 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
202 * ASSUMES: cap->running_task is the current task.
205 appendToRunQueue (Capability *cap, StgTSO *tso)
207 ASSERT(tso->link == END_TSO_QUEUE);
208 if (cap->run_queue_hd == END_TSO_QUEUE) {
209 cap->run_queue_hd = tso;
211 cap->run_queue_tl->link = tso;
213 cap->run_queue_tl = tso;
216 /* Push a thread on the beginning of the run queue. Used for
217 * newly awakened threads, so they get run as soon as possible.
218 * ASSUMES: cap->running_task is the current task.
221 pushOnRunQueue (Capability *cap, StgTSO *tso)
223 tso->link = cap->run_queue_hd;
224 cap->run_queue_hd = tso;
225 if (cap->run_queue_tl == END_TSO_QUEUE) {
226 cap->run_queue_tl = tso;
230 /* Pop the first thread off the runnable queue.
232 STATIC_INLINE StgTSO *
233 popRunQueue (Capability *cap)
235 StgTSO *t = cap->run_queue_hd;
236 ASSERT(t != END_TSO_QUEUE);
237 cap->run_queue_hd = t->link;
238 t->link = END_TSO_QUEUE;
239 if (cap->run_queue_hd == END_TSO_QUEUE) {
240 cap->run_queue_tl = END_TSO_QUEUE;
245 /* Add a thread to the end of the blocked queue.
247 #if !defined(THREADED_RTS)
249 appendToBlockedQueue(StgTSO *tso)
251 ASSERT(tso->link == END_TSO_QUEUE);
252 if (blocked_queue_hd == END_TSO_QUEUE) {
253 blocked_queue_hd = tso;
255 blocked_queue_tl->link = tso;
257 blocked_queue_tl = tso;
261 /* Check whether various thread queues are empty
263 STATIC_INLINE rtsBool
264 emptyQueue (StgTSO *q)
266 return (q == END_TSO_QUEUE);
269 STATIC_INLINE rtsBool
270 emptyRunQueue(Capability *cap)
272 return emptyQueue(cap->run_queue_hd);
275 #if !defined(THREADED_RTS)
276 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
277 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
280 STATIC_INLINE rtsBool
281 emptyThreadQueues(Capability *cap)
283 return emptyRunQueue(cap)
284 #if !defined(THREADED_RTS)
285 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
291 dirtyTSO (StgTSO *tso)
293 tso->flags |= TSO_DIRTY;
297 void sched_belch(char *s, ...)
298 GNU_ATTRIBUTE(format (printf, 1, 2));
301 #endif /* !IN_STG_CODE */
303 #endif /* SCHEDULE_H */