1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2005
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
8 * -------------------------------------------------------------------------*/
13 #include "OSThreads.h"
14 #include "Capability.h"
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
20 void initScheduler (void);
21 void exitScheduler (void);
23 // Place a new thread on the run queue of the specified Capability
24 void scheduleThread (Capability *cap, StgTSO *tso);
26 /* awakenBlockedQueue()
28 * Takes a pointer to the beginning of a blocked TSO queue, and
29 * wakes up the entire queue.
30 * Called from STG : yes
31 * Locks assumed : none
34 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
36 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
38 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
43 * Put the specified thread on the run queue of the given Capability.
44 * Called from STG : yes
45 * Locks assumed : we own the Capability.
47 StgTSO * unblockOne(Capability *cap, StgTSO *tso);
51 * Raises an exception asynchronously in the specified thread.
53 * Called from STG : yes
54 * Locks assumed : none
56 void raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception);
58 /* raiseExceptionHelper */
59 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
61 /* findRetryFrameHelper */
62 StgWord findRetryFrameHelper (StgTSO *tso);
64 /* GetRoots(evac_fn f)
66 * Call f() for each root known to the scheduler.
68 * Called from STG : NO
69 * Locks assumed : ????
71 void GetRoots(evac_fn);
75 * Entry point for a new worker task.
76 * Called from STG : NO
77 * Locks assumed : none
79 void workerStart(Task *task);
81 // ToDo: check whether all fcts below are used in the SMP version, too
83 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
84 void unlink_from_bq(StgTSO* tso, StgClosure* node);
85 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
87 nat run_queue_len(void);
88 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
89 void initThread(StgTSO *tso, nat stack_size);
91 char *info_type(StgClosure *closure); // dummy
92 char *info_type_by_ip(StgInfoTable *ip); // dummy
93 void awaken_blocked_queue(StgTSO *q);
94 void initThread(StgTSO *tso, nat stack_size);
97 /* Context switch flag.
98 * Locks required : none (conflicts are harmless)
100 extern int RTS_VAR(context_switch);
103 * Locks required : none (makes one transition from false->true)
105 extern rtsBool RTS_VAR(interrupted);
108 * Locks required : none (makes one transition from false->true)
110 extern rtsBool shutting_down_scheduler;
113 * flag that tracks whether we have done any execution in this time slice.
115 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
116 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
117 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
118 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
120 /* Recent activity flag.
121 * Locks required : Transition from MAYBE_NO to INACTIVE
122 * happens in the timer signal, so it is atomic. Trnasition from
123 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
124 * to set it to ACTIVITY_YES.
126 extern nat recent_activity;
129 * Locks required : sched_mutex
131 * In GranSim we have one run/blocked_queue per PE.
134 // run_queue_hds defined in GranSim.h
136 extern StgTSO *RTS_VAR(blackhole_queue);
137 #if !defined(THREADED_RTS)
138 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
139 extern StgTSO *RTS_VAR(sleeping_queue);
143 /* Linked list of all threads.
144 * Locks required : sched_mutex
146 extern StgTSO *RTS_VAR(all_threads);
148 /* Set to rtsTrue if there are threads on the blackhole_queue, and
149 * it is possible that one or more of them may be available to run.
150 * This flag is set to rtsFalse after we've checked the queue, and
151 * set to rtsTrue just before we run some Haskell code. It is used
152 * to decide whether we should yield the Capability or not.
153 * Locks required : none (see scheduleCheckBlackHoles()).
155 extern rtsBool blackholes_need_checking;
157 #if defined(THREADED_RTS)
158 extern Mutex RTS_VAR(sched_mutex);
161 StgBool isThreadBound(StgTSO *tso);
163 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
165 /* Called by shutdown_handler(). */
166 void interruptStgRts (void);
168 nat run_queue_len (void);
170 void resurrectThreads (StgTSO *);
172 void printAllThreads(void);
177 void print_bq (StgClosure *node);
180 void print_bqe (StgBlockingQueueElement *bqe);
183 void labelThread(StgPtr tso, char *label);
185 /* -----------------------------------------------------------------------------
186 * Some convenient macros/inline functions...
191 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
193 /* Add a thread to the end of the run queue.
194 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
195 * ASSUMES: cap->running_task is the current task.
198 appendToRunQueue (Capability *cap, StgTSO *tso)
200 ASSERT(tso->link == END_TSO_QUEUE);
201 if (cap->run_queue_hd == END_TSO_QUEUE) {
202 cap->run_queue_hd = tso;
204 cap->run_queue_tl->link = tso;
206 cap->run_queue_tl = tso;
209 /* Push a thread on the beginning of the run queue. Used for
210 * newly awakened threads, so they get run as soon as possible.
211 * ASSUMES: cap->running_task is the current task.
214 pushOnRunQueue (Capability *cap, StgTSO *tso)
216 tso->link = cap->run_queue_hd;
217 cap->run_queue_hd = tso;
218 if (cap->run_queue_tl == END_TSO_QUEUE) {
219 cap->run_queue_tl = tso;
223 /* Pop the first thread off the runnable queue.
225 STATIC_INLINE StgTSO *
226 popRunQueue (Capability *cap)
228 StgTSO *t = cap->run_queue_hd;
229 ASSERT(t != END_TSO_QUEUE);
230 cap->run_queue_hd = t->link;
231 t->link = END_TSO_QUEUE;
232 if (cap->run_queue_hd == END_TSO_QUEUE) {
233 cap->run_queue_tl = END_TSO_QUEUE;
238 /* Add a thread to the end of the blocked queue.
240 #if !defined(THREADED_RTS)
242 appendToBlockedQueue(StgTSO *tso)
244 ASSERT(tso->link == END_TSO_QUEUE);
245 if (blocked_queue_hd == END_TSO_QUEUE) {
246 blocked_queue_hd = tso;
248 blocked_queue_tl->link = tso;
250 blocked_queue_tl = tso;
254 /* Check whether various thread queues are empty
256 STATIC_INLINE rtsBool
257 emptyQueue (StgTSO *q)
259 return (q == END_TSO_QUEUE);
262 STATIC_INLINE rtsBool
263 emptyRunQueue(Capability *cap)
265 return emptyQueue(cap->run_queue_hd);
268 #if !defined(THREADED_RTS)
269 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
270 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
273 STATIC_INLINE rtsBool
274 emptyThreadQueues(Capability *cap)
276 return emptyRunQueue(cap)
277 #if !defined(THREADED_RTS)
278 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
284 void sched_belch(char *s, ...)
285 GNU_ATTRIBUTE(format (printf, 1, 2));
288 #endif /* !IN_STG_CODE */
290 #endif /* SCHEDULE_H */