+StgBool isThreadBound(StgTSO *tso);
+
+SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
+
+/* Called by shutdown_handler(). */
+void interruptStgRts (void);
+
+nat run_queue_len (void);
+
+void resurrectThreads (StgTSO *);
+
+void printAllThreads(void);
+
+/* debugging only
+ */
+#ifdef DEBUG
+void print_bq (StgClosure *node);
+#endif
+#if defined(PAR)
+void print_bqe (StgBlockingQueueElement *bqe);
+#endif
+
+void labelThread(StgPtr tso, char *label);
+
+/* -----------------------------------------------------------------------------
+ * Some convenient macros/inline functions...
+ */
+
+#if !IN_STG_CODE
+
+/* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
+
+/* Add a thread to the end of the run queue.
+ * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
+ * ASSUMES: cap->running_task is the current task.
+ */
+STATIC_INLINE void
+appendToRunQueue (Capability *cap, StgTSO *tso)
+{
+ ASSERT(tso->link == END_TSO_QUEUE);
+ if (cap->run_queue_hd == END_TSO_QUEUE) {
+ cap->run_queue_hd = tso;
+ } else {
+ cap->run_queue_tl->link = tso;
+ }
+ cap->run_queue_tl = tso;
+}
+
+/* Push a thread on the beginning of the run queue. Used for
+ * newly awakened threads, so they get run as soon as possible.
+ * ASSUMES: cap->running_task is the current task.
+ */
+STATIC_INLINE void
+pushOnRunQueue (Capability *cap, StgTSO *tso)
+{
+ tso->link = cap->run_queue_hd;
+ cap->run_queue_hd = tso;
+ if (cap->run_queue_tl == END_TSO_QUEUE) {
+ cap->run_queue_tl = tso;
+ }
+}
+
+/* Pop the first thread off the runnable queue.
+ */
+STATIC_INLINE StgTSO *
+popRunQueue (Capability *cap)
+{
+ StgTSO *t = cap->run_queue_hd;
+ ASSERT(t != END_TSO_QUEUE);
+ cap->run_queue_hd = t->link;
+ t->link = END_TSO_QUEUE;
+ if (cap->run_queue_hd == END_TSO_QUEUE) {
+ cap->run_queue_tl = END_TSO_QUEUE;
+ }
+ return t;
+}
+
+/* Add a thread to the end of the blocked queue.
+ */
+#if !defined(THREADED_RTS)
+STATIC_INLINE void
+appendToBlockedQueue(StgTSO *tso)
+{
+ ASSERT(tso->link == END_TSO_QUEUE);
+ if (blocked_queue_hd == END_TSO_QUEUE) {
+ blocked_queue_hd = tso;
+ } else {
+ blocked_queue_tl->link = tso;
+ }
+ blocked_queue_tl = tso;
+}
+#endif
+
+/* Check whether various thread queues are empty
+ */
+STATIC_INLINE rtsBool
+emptyQueue (StgTSO *q)
+{
+ return (q == END_TSO_QUEUE);
+}
+
+STATIC_INLINE rtsBool
+emptyRunQueue(Capability *cap)
+{
+ return emptyQueue(cap->run_queue_hd);
+}
+
+#if !defined(THREADED_RTS)
+#define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
+#define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
+#endif
+
+STATIC_INLINE rtsBool
+emptyThreadQueues(Capability *cap)
+{
+ return emptyRunQueue(cap)
+#if !defined(THREADED_RTS)
+ && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
+#endif
+ ;
+}
+
+#ifdef DEBUG
+void sched_belch(char *s, ...)
+ GNU_ATTRIBUTE(format (printf, 1, 2));
+#endif
+
+#endif /* !IN_STG_CODE */
+
+STATIC_INLINE void
+dirtyTSO (StgTSO *tso)
+{
+ tso->flags |= TSO_DIRTY;
+}
+
+#endif /* SCHEDULE_H */