#define END_TSO_QUEUE stg_END_TSO_QUEUE_closure
#define END_INVARIANT_CHECK_QUEUE stg_END_INVARIANT_CHECK_QUEUE_closure
-#define dirtyTSO(tso) \
- StgTSO_flags(tso) = StgTSO_flags(tso) | TSO_DIRTY::I32;
-
#define recordMutableCap(p, gen, regs) \
W_ __bd; \
W_ mut_list; \
#define TSO_INTERRUPTIBLE 8
#define TSO_STOPPED_ON_BREAKPOINT 16
+/*
+ * TSO_LINK_DIRTY is set when a TSO's link field is modified
+ */
+#define TSO_LINK_DIRTY 32
+
/* -----------------------------------------------------------------------------
RET_DYN stack frames
-------------------------------------------------------------------------- */
extern void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
extern void dirty_MVAR(StgRegTable *reg, StgClosure *p);
-extern void dirty_TSO(StgClosure *tso);
-
#endif /* RTSEXTERNAL_H */
typedef struct StgTSO_ {
StgHeader header;
- struct StgTSO_* link; /* Links threads onto blocking queues */
+ /* The link field, for linking threads together in lists (e.g. the
+ run queue on a Capability.
+ */
+ struct StgTSO_* _link;
+ /*
+ NOTE!!! do not modify _link directly, it is subject to
+ a write barrier for generational GC. Instead use the
+ setTSOLink() function. Exceptions to this rule are:
+
+ * setting the link field to END_TSO_QUEUE
+ * putting a TSO on the blackhole_queue
+ * setting the link field of the currently running TSO, as it
+ will already be dirty.
+ */
+
struct StgTSO_* global_link; /* Links all threads together */
StgWord16 what_next; /* Values defined in Constants.h */
} StgTSO;
/* -----------------------------------------------------------------------------
+ functions
+ -------------------------------------------------------------------------- */
+
+extern void dirty_TSO (Capability *cap, StgTSO *tso);
+extern void setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target);
+
+/* -----------------------------------------------------------------------------
Invariants:
An active thread has the following properties:
closure_field(StgArrWords, words);
closure_payload(StgArrWords, payload);
- closure_field(StgTSO, link);
+ closure_field(StgTSO, _link);
closure_field(StgTSO, global_link);
closure_field(StgTSO, what_next);
closure_field(StgTSO, why_blocked);
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
+ CurrentTSO);
}
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- dirtyTSO(tso);
+
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar),mvar) [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
- StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- dirtyTSO(tso);
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar) "ptr", mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
- StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
+ CurrentTSO);
}
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
- dirtyTSO(tso);
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
- dirtyTSO(tso);
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
* macro in Schedule.h).
*/
#define APPEND_TO_BLOCKED_QUEUE(tso) \
- ASSERT(StgTSO_link(tso) == END_TSO_QUEUE); \
+ ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
W_[blocked_queue_hd] = tso; \
} else { \
- StgTSO_link(W_[blocked_queue_tl]) = tso; \
+ foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl], tso); \
} \
W_[blocked_queue_tl] = tso;
while:
if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
prev = t;
- t = StgTSO_link(t);
+ t = StgTSO__link(t);
goto while;
}
- StgTSO_link(CurrentTSO) = t;
+ StgTSO__link(CurrentTSO) = t;
if (prev == NULL) {
W_[sleeping_queue] = CurrentTSO;
} else {
- StgTSO_link(prev) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr", prev, CurrentTSO) [];
}
jump stg_block_noregs;
#endif
static void removeFromQueues(Capability *cap, StgTSO *tso);
-static void blockedThrowTo (StgTSO *source, StgTSO *target);
+static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
static void performBlockedException (Capability *cap,
StgTSO *source, StgTSO *target);
// follow ThreadRelocated links in the target first
while (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
// No, it might be a WHITEHOLE:
// ASSERT(get_itbl(target)->type == TSO);
}
// just moved this TSO.
if (target->what_next == ThreadRelocated) {
unlockTSO(target);
- target = target->link;
+ target = target->_link;
goto retry;
}
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
}
info = lockClosure((StgClosure *)mvar);
if (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
unlockClosure((StgClosure *)mvar,info);
goto retry;
}
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
lockClosure((StgClosure *)target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
unlockClosure((StgClosure *)mvar, info);
*out = target;
return THROWTO_BLOCKED; // caller releases TSO
} else {
- removeThreadFromMVarQueue(mvar, target);
+ removeThreadFromMVarQueue(cap, mvar, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
unlockClosure((StgClosure *)mvar, info);
if (target->flags & TSO_BLOCKEX) {
lockTSO(target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
RELEASE_LOCK(&sched_mutex);
*out = target;
return THROWTO_BLOCKED; // caller releases TSO
} else {
- removeThreadFromQueue(&blackhole_queue, target);
+ removeThreadFromQueue(cap, &blackhole_queue, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
RELEASE_LOCK(&sched_mutex);
goto retry;
}
if (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
unlockTSO(target2);
goto retry;
}
if (target2->what_next == ThreadRelocated) {
- target->block_info.tso = target2->link;
+ target->block_info.tso = target2->_link;
unlockTSO(target2);
goto retry;
}
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
lockTSO(target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
unlockTSO(target2);
*out = target;
return THROWTO_BLOCKED;
} else {
- removeThreadFromQueue(&target2->blocked_exceptions, target);
+ removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
unlockTSO(target2);
}
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
} else {
// thread is blocking exceptions, and block on its
// blocked_exception queue.
lockTSO(target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
#endif
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
return THROWTO_BLOCKED;
} else {
removeFromQueues(cap,target);
// complex to achieve as there's no single lock on a TSO; see
// throwTo()).
static void
-blockedThrowTo (StgTSO *source, StgTSO *target)
+blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
{
debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
- source->link = target->blocked_exceptions;
+ setTSOLink(cap, source, target->blocked_exceptions);
target->blocked_exceptions = source;
- dirtyTSO(target); // we modified the blocked_exceptions queue
+ dirty_TSO(cap,target); // we modified the blocked_exceptions queue
source->block_info.tso = target;
write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
goto done;
case BlockedOnMVar:
- removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
+ removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
goto done;
case BlockedOnBlackHole:
- removeThreadFromQueue(&blackhole_queue, tso);
+ removeThreadFromQueue(cap, &blackhole_queue, tso);
goto done;
case BlockedOnException:
// ASSERT(get_itbl(target)->type == TSO);
while (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
}
- removeThreadFromQueue(&target->blocked_exceptions, tso);
+ removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
goto done;
}
#if defined(mingw32_HOST_OS)
case BlockedOnDoProc:
#endif
- removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
+ removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
#if defined(mingw32_HOST_OS)
/* (Cooperatively) signal that the worker thread should abort
* the request.
goto done;
case BlockedOnDelay:
- removeThreadFromQueue(&sleeping_queue, tso);
+ removeThreadFromQueue(cap, &sleeping_queue, tso);
goto done;
#endif
}
done:
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE; // no write barrier reqd
tso->why_blocked = NotBlocked;
tso->block_info.closure = NULL;
appendToRunQueue(cap,tso);
#endif
// mark it dirty; we're about to change its stack.
- dirtyTSO(tso);
+ dirty_TSO(cap, tso);
sp = tso->sp;
#ifdef DEBUG_RETAINER
debugBelch("ThreadRelocated encountered in retainClosure()\n");
#endif
- c = (StgClosure *)((StgTSO *)c)->link;
+ c = (StgClosure *)((StgTSO *)c)->_link;
goto inner_loop;
}
break;
StgPtr stack_end = stack + stack_size;
if (tso->what_next == ThreadRelocated) {
- checkTSO(tso->link);
+ checkTSO(tso->_link);
return;
}
cap->in_haskell = rtsTrue;
- dirtyTSO(t);
+ dirty_TSO(cap,t);
#if defined(THREADED_RTS)
if (recent_activity == ACTIVITY_DONE_GC) {
// Check whether we have more threads on our run queue, or sparks
// in our pool, that we could hand to another Capability.
- if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
+ if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE)
&& sparkPoolSizeCap(cap) < 2) {
return;
}
if (cap->run_queue_hd != END_TSO_QUEUE) {
prev = cap->run_queue_hd;
- t = prev->link;
- prev->link = END_TSO_QUEUE;
+ t = prev->_link;
+ prev->_link = END_TSO_QUEUE;
for (; t != END_TSO_QUEUE; t = next) {
- next = t->link;
- t->link = END_TSO_QUEUE;
+ next = t->_link;
+ t->_link = END_TSO_QUEUE;
if (t->what_next == ThreadRelocated
|| t->bound == task // don't move my bound thread
|| tsoLocked(t)) { // don't move a locked thread
- prev->link = t;
+ setTSOLink(cap, prev, t);
prev = t;
} else if (i == n_free_caps) {
pushed_to_all = rtsTrue;
i = 0;
// keep one for us
- prev->link = t;
+ setTSOLink(cap, prev, t);
prev = t;
} else {
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
cap->run_queue_hd = cap->wakeup_queue_hd;
cap->run_queue_tl = cap->wakeup_queue_tl;
} else {
- cap->run_queue_tl->link = cap->wakeup_queue_hd;
+ setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
cap->run_queue_tl = cap->wakeup_queue_tl;
}
cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
- ASSERT(t->link == END_TSO_QUEUE);
+ ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
// doing stack squeezing (which can be expensive), just run the
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
// don't allow threads to catch the ThreadKilled
debugTrace(DEBUG_sched,"deleting all threads");
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
deleteThread(cap,t);
tso = task->suspended_tso;
task->suspended_tso = NULL;
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE; // no write barrier reqd
debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
if (tso->why_blocked == BlockedOnCCall) {
#endif
/* We might have GC'd, mark the TSO dirty again */
- dirtyTSO(tso);
+ dirty_TSO(cap,tso);
IF_DEBUG(sanity, checkTSO(tso));
* dead TSO's stack.
*/
tso->what_next = ThreadRelocated;
- tso->link = dest;
+ setTSOLink(cap,tso,dest);
tso->sp = (P_)&(tso->stack[tso->stack_size]);
tso->why_blocked = NotBlocked;
*prev = t;
any_woke_up = rtsTrue;
} else {
- prev = &t->link;
- t = t->link;
+ prev = &t->_link;
+ t = t->_link;
}
}
INLINE_HEADER void
appendToRunQueue (Capability *cap, StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_hd = tso;
} else {
- cap->run_queue_tl->link = tso;
+ setTSOLink(cap, cap->run_queue_tl, tso);
}
cap->run_queue_tl = tso;
}
INLINE_HEADER void
pushOnRunQueue (Capability *cap, StgTSO *tso)
{
- tso->link = cap->run_queue_hd;
+ setTSOLink(cap, tso, cap->run_queue_hd);
cap->run_queue_hd = tso;
if (cap->run_queue_tl == END_TSO_QUEUE) {
cap->run_queue_tl = tso;
{
StgTSO *t = cap->run_queue_hd;
ASSERT(t != END_TSO_QUEUE);
- cap->run_queue_hd = t->link;
- t->link = END_TSO_QUEUE;
+ cap->run_queue_hd = t->_link;
+ t->_link = END_TSO_QUEUE; // no write barrier req'd
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_tl = END_TSO_QUEUE;
}
INLINE_HEADER void
appendToBlockedQueue(StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (blocked_queue_hd == END_TSO_QUEUE) {
blocked_queue_hd = tso;
} else {
- blocked_queue_tl->link = tso;
+ setTSOLink(&MainCapability, blocked_queue_tl, tso);
}
blocked_queue_tl = tso;
}
INLINE_HEADER void
appendToWakeupQueue (Capability *cap, StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->wakeup_queue_hd == END_TSO_QUEUE) {
cap->wakeup_queue_hd = tso;
} else {
- cap->wakeup_queue_tl->link = tso;
+ setTSOLink(cap, cap->wakeup_queue_tl, tso);
}
cap->wakeup_queue_tl = tso;
}
#endif /* !IN_STG_CODE */
-INLINE_HEADER void
-dirtyTSO (StgTSO *tso)
-{
- tso->flags |= TSO_DIRTY;
-}
-
#endif /* SCHEDULE_H */
#endif
/* Put ourselves on the blackhole queue */
- StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ StgTSO__link(CurrentTSO) = W_[blackhole_queue];
W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
#endif
/* Put ourselves on the blackhole queue */
- StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ StgTSO__link(CurrentTSO) = W_[blackhole_queue];
W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
/* put a stop frame on the stack */
tso->sp -= sizeofW(StgStopFrame);
SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE;
// ToDo: check this
#if defined(GRAN)
-------------------------------------------------------------------------- */
void
-removeThreadFromQueue (StgTSO **queue, StgTSO *tso)
+removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
{
StgTSO *t, *prev;
prev = NULL;
- for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->link) {
+ for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
if (t == tso) {
if (prev) {
- prev->link = t->link;
+ setTSOLink(cap,prev,t->_link);
} else {
- *queue = t->link;
+ *queue = t->_link;
}
return;
}
}
void
-removeThreadFromDeQueue (StgTSO **head, StgTSO **tail, StgTSO *tso)
+removeThreadFromDeQueue (Capability *cap,
+ StgTSO **head, StgTSO **tail, StgTSO *tso)
{
StgTSO *t, *prev;
prev = NULL;
- for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->link) {
+ for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
if (t == tso) {
if (prev) {
- prev->link = t->link;
+ setTSOLink(cap,prev,t->_link);
} else {
- *head = t->link;
+ *head = t->_link;
}
if (*tail == tso) {
if (prev) {
}
void
-removeThreadFromMVarQueue (StgMVar *mvar, StgTSO *tso)
+removeThreadFromMVarQueue (Capability *cap, StgMVar *mvar, StgTSO *tso)
{
- removeThreadFromDeQueue (&mvar->head, &mvar->tail, tso);
+ removeThreadFromDeQueue (cap, &mvar->head, &mvar->tail, tso);
}
/* ----------------------------------------------------------------------------
ASSERT(tso->why_blocked != NotBlocked);
tso->why_blocked = NotBlocked;
- next = tso->link;
- tso->link = END_TSO_QUEUE;
+ next = tso->_link;
+ tso->_link = END_TSO_QUEUE;
#if defined(THREADED_RTS)
if (tso->cap == cap || (!tsoLocked(tso) &&
for (i = 0; i < n_capabilities; i++) {
cap = &capabilities[i];
debugBelch("threads on capability %d:\n", cap->no);
- for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
+ for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
printThreadStatus(t);
}
}
printThreadStatus(t);
}
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
}
printThreadQueue(StgTSO *t)
{
nat i = 0;
- for (; t != END_TSO_QUEUE; t = t->link) {
+ for (; t != END_TSO_QUEUE; t = t->_link) {
printThreadStatus(t);
i++;
}
void awakenBlockedQueue (Capability *cap, StgTSO *tso);
#endif
-void removeThreadFromMVarQueue (StgMVar *mvar, StgTSO *tso);
-void removeThreadFromQueue (StgTSO **queue, StgTSO *tso);
-void removeThreadFromDeQueue (StgTSO **head, StgTSO **tail, StgTSO *tso);
+void removeThreadFromMVarQueue (Capability *cap, StgMVar *mvar, StgTSO *tso);
+void removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso);
+void removeThreadFromDeQueue (Capability *cap, StgTSO **head, StgTSO **tail, StgTSO *tso);
StgBool isThreadBound (StgTSO* tso);
while (sleeping_queue != END_TSO_QUEUE &&
(int)(ticks - sleeping_queue->block_info.target) >= 0) {
tso = sleeping_queue;
- sleeping_queue = tso->link;
+ sleeping_queue = tso->_link;
tso->why_blocked = NotBlocked;
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE;
IF_DEBUG(scheduler,debugBelch("Waking up sleeping thread %lu\n", (unsigned long)tso->id));
// MainCapability: this code is !THREADED_RTS
pushOnRunQueue(&MainCapability,tso);
FD_ZERO(&wfd);
for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
- next = tso->link;
+ next = tso->_link;
/* On FreeBSD FD_SETSIZE is unsigned. Cast it to signed int
* in order to switch off the 'comparison between signed and
prev = NULL;
if (select_succeeded || unblock_all) {
for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
- next = tso->link;
+ next = tso->_link;
switch (tso->why_blocked) {
case BlockedOnRead:
ready = unblock_all || FD_ISSET(tso->block_info.fd, &rfd);
if (ready) {
IF_DEBUG(scheduler,debugBelch("Waking up blocked thread %lu\n", (unsigned long)tso->id));
tso->why_blocked = NotBlocked;
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE;
pushOnRunQueue(&MainCapability,tso);
} else {
if (prev == NULL)
blocked_queue_hd = tso;
else
- prev->link = tso;
+ setTSOLink(&MainCapability, prev, tso);
prev = tso;
}
}
if (prev == NULL)
blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
else {
- prev->link = END_TSO_QUEUE;
+ prev->_link = END_TSO_QUEUE;
blocked_queue_tl = prev;
}
}
static StgPtr
thread_TSO (StgTSO *tso)
{
- thread_(&tso->link);
+ thread_(&tso->_link);
thread_(&tso->global_link);
if ( tso->why_blocked == BlockedOnMVar
info = get_itbl(q);
if (info->type == TSO &&
((StgTSO *)q)->what_next == ThreadRelocated) {
- q = (StgClosure *)((StgTSO *)q)->link;
+ q = (StgClosure *)((StgTSO *)q)->_link;
*p = q;
goto loop;
}
/* Deal with redirected TSOs (a TSO that's had its stack enlarged).
*/
if (tso->what_next == ThreadRelocated) {
- q = (StgClosure *)tso->link;
+ q = (StgClosure *)tso->_link;
*p = q;
goto loop;
}
case TSO:
if (((StgTSO *)q)->what_next == ThreadRelocated) {
- p = (StgClosure *)((StgTSO *)q)->link;
+ p = (StgClosure *)((StgTSO *)q)->_link;
continue;
}
return NULL;
ASSERT(get_itbl(t)->type == TSO);
switch (t->what_next) {
case ThreadRelocated:
- next = t->link;
+ next = t->_link;
*prev = next;
continue;
case ThreadKilled:
*/
{
StgTSO **pt;
- for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
+ for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
*pt = (StgTSO *)isAlive((StgClosure *)*pt);
ASSERT(*pt != NULL);
}
flag = rtsFalse;
prev = NULL;
- for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->link) {
+ for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
// if the thread is not yet alive...
if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
// if the closure it is blocked on is either (a) a
}
tmp = t;
evacuate((StgClosure **)&tmp);
- if (prev) prev->link = t;
+ if (prev) prev->_link = t;
+ // no write barrier when on the blackhole queue,
+ // because we traverse the whole queue on every GC.
flag = rtsTrue;
}
}
Scavenge a TSO.
-------------------------------------------------------------------------- */
+STATIC_INLINE void
+scavenge_TSO_link (StgTSO *tso)
+{
+ // We don't always chase the link field: TSOs on the blackhole
+ // queue are not automatically alive, so the link field is a
+ // "weak" pointer in that case.
+ if (tso->why_blocked != BlockedOnBlackHole) {
+ evacuate((StgClosure **)&tso->_link);
+ }
+}
+
static void
scavengeTSO (StgTSO *tso)
{
}
evacuate((StgClosure **)&tso->blocked_exceptions);
- // We don't always chase the link field: TSOs on the blackhole
- // queue are not automatically alive, so the link field is a
- // "weak" pointer in that case.
- if (tso->why_blocked != BlockedOnBlackHole) {
- evacuate((StgClosure **)&tso->link);
- }
-
// scavange current transaction record
evacuate((StgClosure **)&tso->trec);
if (gct->failed_to_evac) {
tso->flags |= TSO_DIRTY;
+ scavenge_TSO_link(tso);
} else {
tso->flags &= ~TSO_DIRTY;
+ scavenge_TSO_link(tso);
+ if (gct->failed_to_evac) {
+ tso->flags |= TSO_LINK_DIRTY;
+ } else {
+ tso->flags &= ~TSO_LINK_DIRTY;
+ }
}
gct->eager_promotion = saved_eager;
case TSO:
{
scavengeTSO((StgTSO*)p);
- gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
case TSO:
{
scavengeTSO((StgTSO*)p);
- gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
case TSO: {
StgTSO *tso = (StgTSO *)p;
if ((tso->flags & TSO_DIRTY) == 0) {
- // A clean TSO: we don't have to traverse its
- // stack. However, we *do* follow the link field:
- // we don't want to have to mark a TSO dirty just
- // because we put it on a different queue.
- if (tso->why_blocked != BlockedOnBlackHole) {
- evacuate((StgClosure **)&tso->link);
- }
- recordMutableGen_GC((StgClosure *)p,gen);
+ // Must be on the mutable list because its link
+ // field is dirty.
+ ASSERT(tso->flags & TSO_LINK_DIRTY);
+
+ scavenge_TSO_link(tso);
+ if (gct->failed_to_evac) {
+ recordMutableGen_GC((StgClosure *)p,gen);
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ tso->flags &= ~TSO_LINK_DIRTY;
+ }
continue;
}
}
case TSO:
{
StgTSO *tso = (StgTSO *)p;
-
- gct->eager_promotion = rtsFalse;
- scavengeTSO(tso);
- gct->eager_promotion = saved_eager_promotion;
-
- if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
- gct->failed_to_evac = rtsTrue; // always on the mutable list
+ scavengeTSO(tso);
p += tso_sizeW(tso);
break;
}
}
}
+// Setting a TSO's link field with a write barrier.
+// It is *not* necessary to call this function when
+// * setting the link field to END_TSO_QUEUE
+// * putting a TSO on the blackhole_queue
+// * setting the link field of the currently running TSO, as it
+// will already be dirty.
+void
+setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
+{
+ bdescr *bd;
+ if ((tso->flags & (TSO_DIRTY|TSO_LINK_DIRTY)) == 0) {
+ tso->flags |= TSO_LINK_DIRTY;
+ bd = Bdescr((StgPtr)tso);
+ if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ }
+ tso->_link = target;
+}
+
+void
+dirty_TSO (Capability *cap, StgTSO *tso)
+{
+ bdescr *bd;
+ if ((tso->flags & TSO_DIRTY) == 0) {
+ tso->flags |= TSO_DIRTY;
+ bd = Bdescr((StgPtr)tso);
+ if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ }
+}
+
/*
This is the write barrier for MVARs. An MVAR_CLEAN objects is not
on the mutable list; a MVAR_DIRTY is. When written to, a