1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2005
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
13 #include "OSThreads.h"
14 #include "Capability.h"
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
20 void initScheduler (void);
21 void exitScheduler (void);
23 // Place a new thread on the run queue of the specified Capability
24 void scheduleThread (Capability *cap, StgTSO *tso);
26 /* awakenBlockedQueue()
28 * Takes a pointer to the beginning of a blocked TSO queue, and
29 * wakes up the entire queue.
30 * Called from STG : yes
31 * Locks assumed : none
34 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
36 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
38 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
43 * Put the specified thread on the run queue of the given Capability.
44 * Called from STG : yes
45 * Locks assumed : we own the Capability.
47 StgTSO * unblockOne(Capability *cap, StgTSO *tso);
51 * Raises an exception asynchronously in the specified thread.
53 * Called from STG : yes
54 * Locks assumed : none
56 void raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception);
58 /* suspendComputation()
60 * A variant of raiseAsync(), this strips the stack of the specified
61 * thread down to the stop_here point, leaving a current closure on
62 * top of the stack at [stop_here - 1].
64 void suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here);
66 /* raiseExceptionHelper */
67 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
69 /* findRetryFrameHelper */
70 StgWord findRetryFrameHelper (StgTSO *tso);
72 /* GetRoots(evac_fn f)
74 * Call f() for each root known to the scheduler.
76 * Called from STG : NO
77 * Locks assumed : ????
79 void GetRoots(evac_fn);
83 * Entry point for a new worker task.
84 * Called from STG : NO
85 * Locks assumed : none
87 void workerStart(Task *task);
89 // ToDo: check whether all fcts below are used in the SMP version, too
91 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
92 void unlink_from_bq(StgTSO* tso, StgClosure* node);
93 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
95 nat run_queue_len(void);
96 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
97 void initThread(StgTSO *tso, nat stack_size);
99 char *info_type(StgClosure *closure); // dummy
100 char *info_type_by_ip(StgInfoTable *ip); // dummy
101 void awaken_blocked_queue(StgTSO *q);
102 void initThread(StgTSO *tso, nat stack_size);
105 /* Context switch flag.
106 * Locks required : none (conflicts are harmless)
108 extern int RTS_VAR(context_switch);
111 * Locks required : none (makes one transition from false->true)
113 extern rtsBool RTS_VAR(interrupted);
116 * Locks required : none (makes one transition from false->true)
118 extern rtsBool shutting_down_scheduler;
121 * flag that tracks whether we have done any execution in this time slice.
123 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
124 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
125 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
126 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
128 /* Recent activity flag.
129 * Locks required : Transition from MAYBE_NO to INACTIVE
130 * happens in the timer signal, so it is atomic. Trnasition from
131 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
132 * to set it to ACTIVITY_YES.
134 extern nat recent_activity;
137 * Locks required : sched_mutex
139 * In GranSim we have one run/blocked_queue per PE.
142 // run_queue_hds defined in GranSim.h
144 extern StgTSO *RTS_VAR(blackhole_queue);
145 #if !defined(THREADED_RTS)
146 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
147 extern StgTSO *RTS_VAR(sleeping_queue);
151 /* Linked list of all threads.
152 * Locks required : sched_mutex
154 extern StgTSO *RTS_VAR(all_threads);
156 /* Set to rtsTrue if there are threads on the blackhole_queue, and
157 * it is possible that one or more of them may be available to run.
158 * This flag is set to rtsFalse after we've checked the queue, and
159 * set to rtsTrue just before we run some Haskell code. It is used
160 * to decide whether we should yield the Capability or not.
161 * Locks required : none (see scheduleCheckBlackHoles()).
163 extern rtsBool blackholes_need_checking;
165 #if defined(THREADED_RTS)
166 extern Mutex RTS_VAR(sched_mutex);
169 StgBool isThreadBound(StgTSO *tso);
171 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
173 /* Called by shutdown_handler(). */
174 void interruptStgRts (void);
176 nat run_queue_len (void);
178 void resurrectThreads (StgTSO *);
180 void printAllThreads(void);
185 void print_bq (StgClosure *node);
188 void print_bqe (StgBlockingQueueElement *bqe);
191 void labelThread(StgPtr tso, char *label);
193 /* -----------------------------------------------------------------------------
194 * Some convenient macros/inline functions...
199 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
201 /* Add a thread to the end of the run queue.
202 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
203 * ASSUMES: cap->running_task is the current task.
206 appendToRunQueue (Capability *cap, StgTSO *tso)
208 ASSERT(tso->link == END_TSO_QUEUE);
209 if (cap->run_queue_hd == END_TSO_QUEUE) {
210 cap->run_queue_hd = tso;
212 cap->run_queue_tl->link = tso;
214 cap->run_queue_tl = tso;
217 /* Push a thread on the beginning of the run queue. Used for
218 * newly awakened threads, so they get run as soon as possible.
219 * ASSUMES: cap->running_task is the current task.
222 pushOnRunQueue (Capability *cap, StgTSO *tso)
224 tso->link = cap->run_queue_hd;
225 cap->run_queue_hd = tso;
226 if (cap->run_queue_tl == END_TSO_QUEUE) {
227 cap->run_queue_tl = tso;
231 /* Pop the first thread off the runnable queue.
233 STATIC_INLINE StgTSO *
234 popRunQueue (Capability *cap)
236 StgTSO *t = cap->run_queue_hd;
237 ASSERT(t != END_TSO_QUEUE);
238 cap->run_queue_hd = t->link;
239 t->link = END_TSO_QUEUE;
240 if (cap->run_queue_hd == END_TSO_QUEUE) {
241 cap->run_queue_tl = END_TSO_QUEUE;
246 /* Add a thread to the end of the blocked queue.
248 #if !defined(THREADED_RTS)
250 appendToBlockedQueue(StgTSO *tso)
252 ASSERT(tso->link == END_TSO_QUEUE);
253 if (blocked_queue_hd == END_TSO_QUEUE) {
254 blocked_queue_hd = tso;
256 blocked_queue_tl->link = tso;
258 blocked_queue_tl = tso;
262 /* Check whether various thread queues are empty
264 STATIC_INLINE rtsBool
265 emptyQueue (StgTSO *q)
267 return (q == END_TSO_QUEUE);
270 STATIC_INLINE rtsBool
271 emptyRunQueue(Capability *cap)
273 return emptyQueue(cap->run_queue_hd);
276 #if !defined(THREADED_RTS)
277 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
278 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
281 STATIC_INLINE rtsBool
282 emptyThreadQueues(Capability *cap)
284 return emptyRunQueue(cap)
285 #if !defined(THREADED_RTS)
286 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
292 void sched_belch(char *s, ...)
293 GNU_ATTRIBUTE(format (printf, 1, 2));
296 #endif /* !IN_STG_CODE */
298 #endif /* SCHEDULE_H */