* Locks assumed : none
*/
void initScheduler (void);
-void exitScheduler (void);
+void exitScheduler (rtsBool wait_foreign);
+void freeScheduler (void);
// Place a new thread on the run queue of the current Capability
void scheduleThread (Capability *cap, StgTSO *tso);
/* findRetryFrameHelper */
StgWord findRetryFrameHelper (StgTSO *tso);
-/* GetRoots(evac_fn f)
- *
- * Call f() for each root known to the scheduler.
- *
- * Called from STG : NO
- * Locks assumed : ????
- */
-void GetRoots(evac_fn);
-
/* workerStart()
*
* Entry point for a new worker task.
#endif
#endif
-/* Linked list of all threads.
- * Locks required : sched_mutex
- */
-extern StgTSO *RTS_VAR(all_threads);
-
/* Set to rtsTrue if there are threads on the blackhole_queue, and
* it is possible that one or more of them may be available to run.
* This flag is set to rtsFalse after we've checked the queue, and
nat run_queue_len (void);
void resurrectThreads (StgTSO *);
+void performPendingThrowTos (StgTSO *);
void printAllThreads(void);
* NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
* ASSUMES: cap->running_task is the current task.
*/
-STATIC_INLINE void
+INLINE_HEADER void
appendToRunQueue (Capability *cap, StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_hd = tso;
} else {
- cap->run_queue_tl->link = tso;
+ setTSOLink(cap, cap->run_queue_tl, tso);
}
cap->run_queue_tl = tso;
}
* newly awakened threads, so they get run as soon as possible.
* ASSUMES: cap->running_task is the current task.
*/
-STATIC_INLINE void
+INLINE_HEADER void
pushOnRunQueue (Capability *cap, StgTSO *tso)
{
- tso->link = cap->run_queue_hd;
+ setTSOLink(cap, tso, cap->run_queue_hd);
cap->run_queue_hd = tso;
if (cap->run_queue_tl == END_TSO_QUEUE) {
cap->run_queue_tl = tso;
/* Pop the first thread off the runnable queue.
*/
-STATIC_INLINE StgTSO *
+INLINE_HEADER StgTSO *
popRunQueue (Capability *cap)
{
StgTSO *t = cap->run_queue_hd;
ASSERT(t != END_TSO_QUEUE);
- cap->run_queue_hd = t->link;
- t->link = END_TSO_QUEUE;
+ cap->run_queue_hd = t->_link;
+ t->_link = END_TSO_QUEUE; // no write barrier req'd
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_tl = END_TSO_QUEUE;
}
/* Add a thread to the end of the blocked queue.
*/
#if !defined(THREADED_RTS)
-STATIC_INLINE void
+INLINE_HEADER void
appendToBlockedQueue(StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (blocked_queue_hd == END_TSO_QUEUE) {
blocked_queue_hd = tso;
} else {
- blocked_queue_tl->link = tso;
+ setTSOLink(&MainCapability, blocked_queue_tl, tso);
}
blocked_queue_tl = tso;
}
#endif
#if defined(THREADED_RTS)
-STATIC_INLINE void
+INLINE_HEADER void
appendToWakeupQueue (Capability *cap, StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->wakeup_queue_hd == END_TSO_QUEUE) {
cap->wakeup_queue_hd = tso;
} else {
- cap->wakeup_queue_tl->link = tso;
+ setTSOLink(cap, cap->wakeup_queue_tl, tso);
}
cap->wakeup_queue_tl = tso;
}
/* Check whether various thread queues are empty
*/
-STATIC_INLINE rtsBool
+INLINE_HEADER rtsBool
emptyQueue (StgTSO *q)
{
return (q == END_TSO_QUEUE);
}
-STATIC_INLINE rtsBool
+INLINE_HEADER rtsBool
emptyRunQueue(Capability *cap)
{
return emptyQueue(cap->run_queue_hd);
}
#if defined(THREADED_RTS)
-STATIC_INLINE rtsBool
+INLINE_HEADER rtsBool
emptyWakeupQueue(Capability *cap)
{
return emptyQueue(cap->wakeup_queue_hd);
#define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
#endif
-STATIC_INLINE rtsBool
+INLINE_HEADER rtsBool
emptyThreadQueues(Capability *cap)
{
return emptyRunQueue(cap)
#endif /* !IN_STG_CODE */
-STATIC_INLINE void
-dirtyTSO (StgTSO *tso)
-{
- tso->flags |= TSO_DIRTY;
-}
-
#endif /* SCHEDULE_H */