/* -----------------------------------------------------------------------------
- * $Id: Schedule.h,v 1.33 2002/04/13 05:33:03 sof Exp $
*
* (c) The GHC Team 1998-1999
*
#elif defined(PAR)
void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
#else
-void awakenBlockedQueue(StgTSO *tso);
+void awakenBlockedQueue (StgTSO *tso);
+void awakenBlockedQueueNoLock (StgTSO *tso);
#endif
/* unblockOne()
void raiseAsync(StgTSO *tso, StgClosure *exception);
void raiseAsyncWithLock(StgTSO *tso, StgClosure *exception);
-/* awaitEvent()
+/* raiseExceptionHelper */
+StgWord raiseExceptionHelper (StgTSO *tso, StgClosure *exception);
+
+/* awaitEvent(rtsBool wait)
*
- * Raises an exception asynchronously in the specified thread.
+ * Checks for blocked threads that need to be woken.
*
* Called from STG : NO
* Locks assumed : sched_mutex
*/
rtsBool wakeUpSleepingThreads(nat); /* In Select.c */
+/* wakeBlockedWorkerThread()
+ *
+ * If a worker thread is currently blocked in awaitEvent(), interrupt it.
+ *
+ * Called from STG : NO
+ * Locks assumed : sched_mutex
+ */
+void wakeBlockedWorkerThread(void); /* In Select.c */
+
+/* resetWorkerWakeupPipeAfterFork()
+ *
+ * Notify Select.c that a fork() has occured
+ *
+ * Called from STG : NO
+ * Locks assumed : don't care, but must be called right after fork()
+ */
+void resetWorkerWakeupPipeAfterFork(void); /* In Select.c */
+
/* GetRoots(evac_fn f)
*
* Call f() for each root known to the scheduler.
/* Context switch flag.
* Locks required : sched_mutex
*/
-extern nat context_switch;
-extern rtsBool interrupted;
+extern nat RTS_VAR(context_switch);
+extern rtsBool RTS_VAR(interrupted);
/* In Select.c */
-extern nat timestamp;
+extern nat RTS_VAR(timestamp);
/* Thread queues.
* Locks required : sched_mutex
#if defined(GRAN)
// run_queue_hds defined in GranSim.h
#else
-extern StgTSO *run_queue_hd, *run_queue_tl;
-extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
-extern StgTSO *sleeping_queue;
+extern StgTSO *RTS_VAR(run_queue_hd), *RTS_VAR(run_queue_tl);
+extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
+extern StgTSO *RTS_VAR(sleeping_queue);
#endif
/* Linked list of all threads. */
-extern StgTSO *all_threads;
+extern StgTSO *RTS_VAR(all_threads);
#if defined(RTS_SUPPORTS_THREADS)
/* Schedule.c has detailed info on what these do */
-extern Mutex sched_mutex;
-extern Condition thread_ready_cond;
-extern Condition returning_worker_cond;
-extern nat rts_n_waiting_workers;
-extern nat rts_n_waiting_tasks;
+extern Mutex RTS_VAR(sched_mutex);
+extern Condition RTS_VAR(returning_worker_cond);
+extern nat RTS_VAR(rts_n_waiting_workers);
+extern nat RTS_VAR(rts_n_waiting_tasks);
#endif
-StgInt forkProcess(StgTSO *tso);
+StgBool rtsSupportsBoundThreads(void);
+StgBool isThreadBound(StgTSO *tso);
-/* Sigh, RTS-internal versions of waitThread(), scheduleThread(), and
- rts_evalIO() for the use by main() only. ToDo: better. */
-extern SchedulerStatus waitThread_(StgTSO *tso,
- /*out*/StgClosure **ret
-#if defined(THREADED_RTS)
- , rtsBool blockWaiting
-#endif
- );
-extern SchedulerStatus rts_mainEvalIO(HaskellObj p, /*out*/HaskellObj *ret);
+extern SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
/* Called by shutdown_handler(). */
*
* These are the threads which clients have requested that we run.
*
- * In a 'threaded' build, we might have several concurrent clients all
- * waiting for results, and each one will wait on a condition variable
- * until the result is available.
+ * In a 'threaded' build, each of these corresponds to one bound thread.
+ * The pointer to the StgMainThread is passed as a parameter to schedule;
+ * this invocation of schedule will always pass this main thread's
+ * bound_thread_cond to waitForkWorkCapability; OS-thread-switching
+ * takes place using passCapability.
*
- * In non-SMP, clients are strictly nested: the first client calls
+ * In non-threaded builds, clients are strictly nested: the first client calls
* into the RTS, which might call out again to C with a _ccall_GC, and
* eventually re-enter the RTS.
*
SchedulerStatus stat;
StgClosure ** ret;
#if defined(RTS_SUPPORTS_THREADS)
+#if defined(THREADED_RTS)
+ Condition bound_thread_cond;
+#else
Condition wakeup;
#endif
+#endif
+ struct StgMainThread_ *prev;
struct StgMainThread_ *link;
} StgMainThread;
*/
extern StgMainThread *main_threads;
+void printAllThreads(void);
+#ifdef COMPILING_SCHEDULER
+static void printThreadBlockage(StgTSO *tso);
+static void printThreadStatus(StgTSO *tso);
+#endif
/* debugging only
*/
#ifdef DEBUG
-void printThreadBlockage(StgTSO *tso);
-void printThreadStatus(StgTSO *tso);
-void printAllThreads(void);
-#endif
void print_bq (StgClosure *node);
+#endif
#if defined(PAR)
void print_bqe (StgBlockingQueueElement *bqe);
#endif
+void labelThread(StgPtr tso, char *label);
+
/* -----------------------------------------------------------------------------
* Some convenient macros...
*/
/* Pop the first thread off the runnable queue.
*/
-#define POP_RUN_QUEUE() \
- ({ StgTSO *t = run_queue_hd; \
- if (t != END_TSO_QUEUE) { \
- run_queue_hd = t->link; \
- t->link = END_TSO_QUEUE; \
+#define POP_RUN_QUEUE(pt) \
+ do { StgTSO *__tmp_t = run_queue_hd; \
+ if (__tmp_t != END_TSO_QUEUE) { \
+ run_queue_hd = __tmp_t->link; \
+ __tmp_t->link = END_TSO_QUEUE; \
if (run_queue_hd == END_TSO_QUEUE) { \
run_queue_tl = END_TSO_QUEUE; \
} \
} \
- t; \
- })
+ pt = __tmp_t; \
+ } while(0)
/* Add a thread to the end of the blocked queue.
*/
*/
#if defined(RTS_SUPPORTS_THREADS)
#define THREAD_RUNNABLE() \
- if ( !noCapabilities() ) { \
- signalCondition(&thread_ready_cond); \
- } \
+ wakeBlockedWorkerThread(); \
context_switch = 1;
#else
#define THREAD_RUNNABLE() /* nothing */
EMPTY_BLOCKED_QUEUE() && \
EMPTY_SLEEPING_QUEUE())
+#if defined(RTS_SUPPORTS_THREADS)
+/* If no task is waiting for a capability,
+ * and if there is work to be done
+ * or if we need to wait for IO or delay requests,
+ * spawn a new worker thread.
+ */
+void
+startSchedulerTaskIfNecessary(void);
+#endif
+
+#ifdef DEBUG
+extern void sched_belch(char *s, ...)
+ GNU_ATTRIBUTE(format (printf, 1, 2));
+#endif
+
#endif /* __SCHEDULE_H__ */