void awakenBlockedQueueNoLock (StgTSO *tso);
#endif
+/* Version of scheduleThread that doesn't take sched_mutex */
+void scheduleThreadLocked(StgTSO *tso);
+
/* unblockOne()
*
* Takes a pointer to the beginning of a blocked TSO queue, and
*/
#if defined(GRAN) || defined(PAR)
StgBlockingQueueElement *unblockOne(StgBlockingQueueElement *bqe, StgClosure *node);
+StgBlockingQueueElement *unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node);
#else
StgTSO *unblockOne(StgTSO *tso);
+StgTSO *unblockOneLocked(StgTSO *tso);
#endif
/* raiseAsync()
/* raiseExceptionHelper */
StgWord raiseExceptionHelper (StgTSO *tso, StgClosure *exception);
+/* findRetryFrameHelper */
+StgWord findRetryFrameHelper (StgTSO *tso);
+
/* awaitEvent(rtsBool wait)
*
* Checks for blocked threads that need to be woken.
* Called from STG : NO
* Locks assumed : sched_mutex
*/
-rtsBool wakeUpSleepingThreads(nat); /* In Select.c */
+rtsBool wakeUpSleepingThreads(lnat); /* In Select.c */
/* wakeBlockedWorkerThread()
*
/* Context switch flag.
* Locks required : sched_mutex
*/
-extern nat RTS_VAR(context_switch);
+extern int RTS_VAR(context_switch);
extern rtsBool RTS_VAR(interrupted);
+/*
+ * flag that tracks whether we have done any execution in this time slice.
+ */
+#define ACTIVITY_YES 0 /* there has been activity in the current slice */
+#define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
+#define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
+#define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
+extern nat recent_activity;
+
/* In Select.c */
-extern nat RTS_VAR(timestamp);
+extern lnat RTS_VAR(timestamp);
/* Thread queues.
* Locks required : sched_mutex
#else
extern StgTSO *RTS_VAR(run_queue_hd), *RTS_VAR(run_queue_tl);
extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
+extern StgTSO *RTS_VAR(blackhole_queue);
extern StgTSO *RTS_VAR(sleeping_queue);
#endif
/* Linked list of all threads. */
extern StgTSO *RTS_VAR(all_threads);
+/* Set to rtsTrue if there are threads on the blackhole_queue, and
+ * it is possible that one or more of them may be available to run.
+ * This flag is set to rtsFalse after we've checked the queue, and
+ * set to rtsTrue just before we run some Haskell code. It is used
+ * to decide whether we should yield the Capability or not.
+ */
+extern rtsBool blackholes_need_checking;
+
#if defined(RTS_SUPPORTS_THREADS)
/* Schedule.c has detailed info on what these do */
extern Mutex RTS_VAR(sched_mutex);
extern nat RTS_VAR(rts_n_waiting_tasks);
#endif
-StgBool rtsSupportsBoundThreads(void);
StgBool isThreadBound(StgTSO *tso);
extern SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
SchedulerStatus stat;
StgClosure ** ret;
#if defined(RTS_SUPPORTS_THREADS)
-#if defined(THREADED_RTS)
Condition bound_thread_cond;
-#else
- Condition wakeup;
-#endif
#endif
struct StgMainThread_ *prev;
struct StgMainThread_ *link;
} \
blocked_queue_tl = tso;
-/* Signal that a runnable thread has become available, in
- * case there are any waiting tasks to execute it.
- */
-#if defined(RTS_SUPPORTS_THREADS)
-#define THREAD_RUNNABLE() \
- wakeBlockedWorkerThread(); \
- context_switch = 1;
-#else
-#define THREAD_RUNNABLE() /* nothing */
-#endif
-
/* Check whether various thread queues are empty
*/
#define EMPTY_QUEUE(q) (q == END_TSO_QUEUE)