X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.h;h=6ed759821290df32dd222a2ab9ad0fc91c6beb0d;hb=a200038f469418fef77d863dc3d1cd0125ec1e82;hp=a4a95f3c34c681ebb494b902fff72af41e05063a;hpb=79c9408712af3ddd6340b0b5785ffde34f830042;p=ghc-hetmet.git diff --git a/rts/Schedule.h b/rts/Schedule.h index a4a95f3..6ed7598 100644 --- a/rts/Schedule.h +++ b/rts/Schedule.h @@ -70,7 +70,9 @@ StgWord findRetryFrameHelper (StgTSO *tso); * Called from STG : NO * Locks assumed : none */ -void workerStart(Task *task); +#if defined(THREADED_RTS) +void OSThreadProcAttr workerStart(Task *task); +#endif #if defined(GRAN) void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node); @@ -133,11 +135,6 @@ extern StgTSO *RTS_VAR(sleeping_queue); #endif #endif -/* Linked list of all threads. - * Locks required : sched_mutex - */ -extern StgTSO *RTS_VAR(all_threads); - /* Set to rtsTrue if there are threads on the blackhole_queue, and * it is possible that one or more of them may be available to run. * This flag is set to rtsFalse after we've checked the queue, and @@ -159,6 +156,7 @@ void interruptStgRts (void); nat run_queue_len (void); void resurrectThreads (StgTSO *); +void performPendingThrowTos (StgTSO *); void printAllThreads(void); @@ -186,11 +184,11 @@ void print_bqe (StgBlockingQueueElement *bqe); INLINE_HEADER void appendToRunQueue (Capability *cap, StgTSO *tso) { - ASSERT(tso->link == END_TSO_QUEUE); + ASSERT(tso->_link == END_TSO_QUEUE); if (cap->run_queue_hd == END_TSO_QUEUE) { cap->run_queue_hd = tso; } else { - cap->run_queue_tl->link = tso; + setTSOLink(cap, cap->run_queue_tl, tso); } cap->run_queue_tl = tso; } @@ -202,7 +200,7 @@ appendToRunQueue (Capability *cap, StgTSO *tso) INLINE_HEADER void pushOnRunQueue (Capability *cap, StgTSO *tso) { - tso->link = cap->run_queue_hd; + setTSOLink(cap, tso, cap->run_queue_hd); cap->run_queue_hd = tso; if (cap->run_queue_tl == END_TSO_QUEUE) { cap->run_queue_tl = tso; @@ -216,8 +214,8 @@ popRunQueue (Capability *cap) { StgTSO *t = cap->run_queue_hd; ASSERT(t != END_TSO_QUEUE); - cap->run_queue_hd = t->link; - t->link = END_TSO_QUEUE; + cap->run_queue_hd = t->_link; + t->_link = END_TSO_QUEUE; // no write barrier req'd if (cap->run_queue_hd == END_TSO_QUEUE) { cap->run_queue_tl = END_TSO_QUEUE; } @@ -230,27 +228,32 @@ popRunQueue (Capability *cap) INLINE_HEADER void appendToBlockedQueue(StgTSO *tso) { - ASSERT(tso->link == END_TSO_QUEUE); + ASSERT(tso->_link == END_TSO_QUEUE); if (blocked_queue_hd == END_TSO_QUEUE) { blocked_queue_hd = tso; } else { - blocked_queue_tl->link = tso; + setTSOLink(&MainCapability, blocked_queue_tl, tso); } blocked_queue_tl = tso; } #endif #if defined(THREADED_RTS) +// Assumes: my_cap is owned by the current Task. We hold +// other_cap->lock, but we do not necessarily own other_cap; another +// Task may be running on it. INLINE_HEADER void -appendToWakeupQueue (Capability *cap, StgTSO *tso) +appendToWakeupQueue (Capability *my_cap, Capability *other_cap, StgTSO *tso) { - ASSERT(tso->link == END_TSO_QUEUE); - if (cap->wakeup_queue_hd == END_TSO_QUEUE) { - cap->wakeup_queue_hd = tso; + ASSERT(tso->_link == END_TSO_QUEUE); + if (other_cap->wakeup_queue_hd == END_TSO_QUEUE) { + other_cap->wakeup_queue_hd = tso; } else { - cap->wakeup_queue_tl->link = tso; + // my_cap is passed to setTSOLink() because it may need to + // write to the mutable list. + setTSOLink(my_cap, other_cap->wakeup_queue_tl, tso); } - cap->wakeup_queue_tl = tso; + other_cap->wakeup_queue_tl = tso; } #endif @@ -293,11 +296,5 @@ emptyThreadQueues(Capability *cap) #endif /* !IN_STG_CODE */ -INLINE_HEADER void -dirtyTSO (StgTSO *tso) -{ - tso->flags |= TSO_DIRTY; -} - #endif /* SCHEDULE_H */