/* -----------------------------------------------------------------------------
- * $Id: Schedule.h,v 1.28 2002/02/13 08:48:07 sof Exp $
*
* (c) The GHC Team 1998-1999
*
* (RTS internal scheduler interface)
*
* -------------------------------------------------------------------------*/
+
#ifndef __SCHEDULE_H__
#define __SCHEDULE_H__
#include "OSThreads.h"
-//@menu
-//* Scheduler Functions::
-//* Scheduler Vars and Data Types::
-//* Some convenient macros::
-//* Index::
-//@end menu
-
-//@node Scheduler Functions, Scheduler Vars and Data Types
-//@subsection Scheduler Functions
-
-//@cindex initScheduler
-//@cindex exitScheduler
/* initScheduler(), exitScheduler(), startTasks()
*
* Called from STG : no
extern void initScheduler ( void );
extern void exitScheduler ( void );
-//@cindex awakenBlockedQueue
/* awakenBlockedQueue()
*
* Takes a pointer to the beginning of a blocked TSO queue, and
#elif defined(PAR)
void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
#else
-void awakenBlockedQueue(StgTSO *tso);
+void awakenBlockedQueue (StgTSO *tso);
+void awakenBlockedQueueNoLock (StgTSO *tso);
#endif
-//@cindex unblockOne
/* unblockOne()
*
* Takes a pointer to the beginning of a blocked TSO queue, and
StgTSO *unblockOne(StgTSO *tso);
#endif
-//@cindex raiseAsync
/* raiseAsync()
*
* Raises an exception asynchronously in the specified thread.
* Locks assumed : none
*/
void raiseAsync(StgTSO *tso, StgClosure *exception);
+void raiseAsyncWithLock(StgTSO *tso, StgClosure *exception);
+
+/* raiseExceptionHelper */
+StgWord raiseExceptionHelper (StgTSO *tso, StgClosure *exception);
-//@cindex awaitEvent
-/* awaitEvent()
+/* findRetryFrameHelper */
+StgWord findRetryFrameHelper (StgTSO *tso);
+
+/* awaitEvent(rtsBool wait)
*
- * Raises an exception asynchronously in the specified thread.
+ * Checks for blocked threads that need to be woken.
*
* Called from STG : NO
* Locks assumed : sched_mutex
*/
rtsBool wakeUpSleepingThreads(nat); /* In Select.c */
+/* wakeBlockedWorkerThread()
+ *
+ * If a worker thread is currently blocked in awaitEvent(), interrupt it.
+ *
+ * Called from STG : NO
+ * Locks assumed : sched_mutex
+ */
+void wakeBlockedWorkerThread(void); /* In Select.c */
+
+/* resetWorkerWakeupPipeAfterFork()
+ *
+ * Notify Select.c that a fork() has occured
+ *
+ * Called from STG : NO
+ * Locks assumed : don't care, but must be called right after fork()
+ */
+void resetWorkerWakeupPipeAfterFork(void); /* In Select.c */
+
/* GetRoots(evac_fn f)
*
* Call f() for each root known to the scheduler.
void GetRoots(evac_fn);
// ToDo: check whether all fcts below are used in the SMP version, too
-//@cindex awaken_blocked_queue
#if defined(GRAN)
void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
void unlink_from_bq(StgTSO* tso, StgClosure* node);
void initThread(StgTSO *tso, nat stack_size);
#endif
-//@node Scheduler Vars and Data Types, Some convenient macros, Scheduler Functions
-//@subsection Scheduler Vars and Data Types
-
-//@cindex context_switch
/* Context switch flag.
* Locks required : sched_mutex
*/
-extern nat context_switch;
-extern rtsBool interrupted;
+extern int RTS_VAR(context_switch);
+extern rtsBool RTS_VAR(interrupted);
/* In Select.c */
-extern nat timestamp;
+extern nat RTS_VAR(timestamp);
/* Thread queues.
* Locks required : sched_mutex
#if defined(GRAN)
// run_queue_hds defined in GranSim.h
#else
-extern StgTSO *run_queue_hd, *run_queue_tl;
-extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
-extern StgTSO *sleeping_queue;
+extern StgTSO *RTS_VAR(run_queue_hd), *RTS_VAR(run_queue_tl);
+extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
+extern StgTSO *RTS_VAR(sleeping_queue);
#endif
/* Linked list of all threads. */
-extern StgTSO *all_threads;
+extern StgTSO *RTS_VAR(all_threads);
#if defined(RTS_SUPPORTS_THREADS)
/* Schedule.c has detailed info on what these do */
-extern Mutex sched_mutex;
-extern Condition thread_ready_cond;
-extern Condition returning_worker_cond;
-extern nat rts_n_waiting_workers;
-extern nat rts_n_waiting_tasks;
+extern Mutex RTS_VAR(sched_mutex);
+extern Condition RTS_VAR(returning_worker_cond);
+extern nat RTS_VAR(rts_n_waiting_workers);
+extern nat RTS_VAR(rts_n_waiting_tasks);
#endif
+StgBool isThreadBound(StgTSO *tso);
-/* Sigh, RTS-internal versions of waitThread(), scheduleThread(), and
- rts_evalIO() for the use by main() only. ToDo: better. */
-extern SchedulerStatus waitThread_(StgTSO *tso,
- /*out*/StgClosure **ret
-#if defined(THREADED_RTS)
- , rtsBool blockWaiting
-#endif
- );
-extern void scheduleThread_(StgTSO *tso
-#if defined(THREADED_RTS)
- , rtsBool createTask
-#endif
- );
-extern SchedulerStatus rts_mainEvalIO(HaskellObj p, /*out*/HaskellObj *ret);
+extern SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
/* Called by shutdown_handler(). */
void resurrectThreads( StgTSO * );
-//@node Some convenient macros, Index, Scheduler Vars and Data Types
-//@subsection Some convenient macros
+/* Main threads:
+ *
+ * These are the threads which clients have requested that we run.
+ *
+ * In a 'threaded' build, each of these corresponds to one bound thread.
+ * The pointer to the StgMainThread is passed as a parameter to schedule;
+ * this invocation of schedule will always pass this main thread's
+ * bound_thread_cond to waitForkWorkCapability; OS-thread-switching
+ * takes place using passCapability.
+ *
+ * In non-threaded builds, clients are strictly nested: the first client calls
+ * into the RTS, which might call out again to C with a _ccall_GC, and
+ * eventually re-enter the RTS.
+ *
+ * This is non-abstract at the moment because the garbage collector
+ * treats pointers to TSOs from the main thread list as "weak" - these
+ * pointers won't prevent a thread from receiving a BlockedOnDeadMVar
+ * exception.
+ *
+ * Main threads information is kept in a linked list:
+ */
+typedef struct StgMainThread_ {
+ StgTSO * tso;
+ SchedulerStatus stat;
+ StgClosure ** ret;
+#if defined(RTS_SUPPORTS_THREADS)
+#if defined(THREADED_RTS)
+ Condition bound_thread_cond;
+#else
+ Condition wakeup;
+#endif
+#endif
+ struct StgMainThread_ *prev;
+ struct StgMainThread_ *link;
+} StgMainThread;
+
+/* Main thread queue.
+ * Locks required: sched_mutex.
+ */
+extern StgMainThread *main_threads;
+void printAllThreads(void);
+#ifdef COMPILING_SCHEDULER
+static void printThreadBlockage(StgTSO *tso);
+static void printThreadStatus(StgTSO *tso);
+#endif
/* debugging only
*/
#ifdef DEBUG
-void printThreadBlockage(StgTSO *tso);
-void printThreadStatus(StgTSO *tso);
-void printAllThreads(void);
-#endif
void print_bq (StgClosure *node);
+#endif
#if defined(PAR)
void print_bqe (StgBlockingQueueElement *bqe);
#endif
+void labelThread(StgPtr tso, char *label);
+
/* -----------------------------------------------------------------------------
* Some convenient macros...
*/
/* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
-//@cindex APPEND_TO_RUN_QUEUE
/* Add a thread to the end of the run queue.
* NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
*/
} \
run_queue_tl = tso;
-//@cindex PUSH_ON_RUN_QUEUE
/* Push a thread on the beginning of the run queue. Used for
* newly awakened threads, so they get run as soon as possible.
*/
run_queue_tl = tso; \
}
-//@cindex POP_RUN_QUEUE
/* Pop the first thread off the runnable queue.
*/
-#define POP_RUN_QUEUE() \
- ({ StgTSO *t = run_queue_hd; \
- if (t != END_TSO_QUEUE) { \
- run_queue_hd = t->link; \
- t->link = END_TSO_QUEUE; \
+#define POP_RUN_QUEUE(pt) \
+ do { StgTSO *__tmp_t = run_queue_hd; \
+ if (__tmp_t != END_TSO_QUEUE) { \
+ run_queue_hd = __tmp_t->link; \
+ __tmp_t->link = END_TSO_QUEUE; \
if (run_queue_hd == END_TSO_QUEUE) { \
run_queue_tl = END_TSO_QUEUE; \
} \
} \
- t; \
- })
+ pt = __tmp_t; \
+ } while(0)
-//@cindex APPEND_TO_BLOCKED_QUEUE
/* Add a thread to the end of the blocked queue.
*/
#define APPEND_TO_BLOCKED_QUEUE(tso) \
} \
blocked_queue_tl = tso;
-//@cindex THREAD_RUNNABLE
-/* Signal that a runnable thread has become available, in
- * case there are any waiting tasks to execute it.
+/* Check whether various thread queues are empty
*/
+#define EMPTY_QUEUE(q) (q == END_TSO_QUEUE)
+
+#define EMPTY_RUN_QUEUE() (EMPTY_QUEUE(run_queue_hd))
+#define EMPTY_BLOCKED_QUEUE() (EMPTY_QUEUE(blocked_queue_hd))
+#define EMPTY_SLEEPING_QUEUE() (EMPTY_QUEUE(sleeping_queue))
+
+#define EMPTY_THREAD_QUEUES() (EMPTY_RUN_QUEUE() && \
+ EMPTY_BLOCKED_QUEUE() && \
+ EMPTY_SLEEPING_QUEUE())
+
#if defined(RTS_SUPPORTS_THREADS)
-#define THREAD_RUNNABLE() \
- if ( !noCapabilities() ) { \
- signalCondition(&thread_ready_cond); \
- } \
- context_switch = 1;
-#else
-#define THREAD_RUNNABLE() /* nothing */
+/* If no task is waiting for a capability,
+ * and if there is work to be done
+ * or if we need to wait for IO or delay requests,
+ * spawn a new worker thread.
+ */
+void
+startSchedulerTaskIfNecessary(void);
#endif
-//@cindex EMPTY_RUN_QUEUE
-/* Check whether the run queue is empty i.e. the PE is idle
- */
-#define EMPTY_RUN_QUEUE() (run_queue_hd == END_TSO_QUEUE)
-#define EMPTY_QUEUE(q) (q == END_TSO_QUEUE)
+#ifdef DEBUG
+extern void sched_belch(char *s, ...)
+ GNU_ATTRIBUTE(format (printf, 1, 2));
+#endif
#endif /* __SCHEDULE_H__ */
-
-//@node Index, , Some convenient macros
-//@subsection Index
-
-//@index
-//* APPEND_TO_BLOCKED_QUEUE:: @cindex\s-+APPEND_TO_BLOCKED_QUEUE
-//* APPEND_TO_RUN_QUEUE:: @cindex\s-+APPEND_TO_RUN_QUEUE
-//* POP_RUN_QUEUE :: @cindex\s-+POP_RUN_QUEUE
-//* PUSH_ON_RUN_QUEUE:: @cindex\s-+PUSH_ON_RUN_QUEUE
-//* awaitEvent:: @cindex\s-+awaitEvent
-//* awakenBlockedQueue:: @cindex\s-+awakenBlockedQueue
-//* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue
-//* context_switch:: @cindex\s-+context_switch
-//* exitScheduler:: @cindex\s-+exitScheduler
-//* gc_pending_cond:: @cindex\s-+gc_pending_cond
-//* initScheduler:: @cindex\s-+initScheduler
-//* raiseAsync:: @cindex\s-+raiseAsync
-//* startTasks:: @cindex\s-+startTasks
-//* unblockOne:: @cindex\s-+unblockOne
-//@end index