t->saved_winerror = GetLastError();
#endif
- traceEventStopThread(cap, t, ret);
+ if (ret == ThreadBlocked) {
+ if (t->why_blocked == BlockedOnBlackHole) {
+ StgTSO *owner = blackHoleOwner(t->block_info.bh->bh);
+ traceEventStopThread(cap, t, t->why_blocked + 6,
+ owner != NULL ? owner->id : 0);
+ } else {
+ traceEventStopThread(cap, t, t->why_blocked + 6, 0);
+ }
+ } else {
+ traceEventStopThread(cap, t, ret, 0);
+ }
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
ASSERT(t->cap == cap);
scheduleProcessInbox (Capability *cap USED_IF_THREADS)
{
#if defined(THREADED_RTS)
- Message *m;
+ Message *m, *next;
+ int r;
while (!emptyInbox(cap)) {
- ACQUIRE_LOCK(&cap->lock);
+ if (cap->r.rCurrentNursery->link == NULL ||
+ g0->n_new_large_words >= large_alloc_lim) {
+ scheduleDoGC(cap, cap->running_task, rtsFalse);
+ }
+
+ // don't use a blocking acquire; if the lock is held by
+ // another thread then just carry on. This seems to avoid
+ // getting stuck in a message ping-pong situation with other
+ // processors. We'll check the inbox again later anyway.
+ //
+ // We should really use a more efficient queue data structure
+ // here. The trickiness is that we must ensure a Capability
+ // never goes idle if the inbox is non-empty, which is why we
+ // use cap->lock (cap->lock is released as the last thing
+ // before going idle; see Capability.c:releaseCapability()).
+ r = TRY_ACQUIRE_LOCK(&cap->lock);
+ if (r != 0) return;
+
m = cap->inbox;
- cap->inbox = m->link;
+ cap->inbox = (Message*)END_TSO_QUEUE;
+
RELEASE_LOCK(&cap->lock);
- executeMessage(cap, (Message *)m);
+
+ while (m != (Message*)END_TSO_QUEUE) {
+ next = m->link;
+ executeMessage(cap, m);
+ m = next;
+ }
}
#endif
}
recent_activity = ACTIVITY_YES;
}
+ if (heap_census) {
+ debugTrace(DEBUG_sched, "performing heap census");
+ heapCensus();
+ performHeapProfile = rtsFalse;
+ }
+
#if defined(THREADED_RTS)
if (gc_type == PENDING_GC_PAR)
{
}
#endif
- if (heap_census) {
- debugTrace(DEBUG_sched, "performing heap census");
- heapCensus();
- performHeapProfile = rtsFalse;
- }
-
if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
// GC set the heap_overflow flag, so we should proceed with
// an orderly shutdown now. Ultimately we want the main
task = cap->running_task;
tso = cap->r.rCurrentTSO;
- traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
+ traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL, 0);
// XXX this might not be necessary --SDM
tso->what_next = ThreadRunGHC;
void
scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
{
-#if defined(THREADED_RTS)
tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
// move this thread from now on.
+#if defined(THREADED_RTS)
cpu %= RtsFlags.ParFlags.nNodes;
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
#endif
}
+void markScheduler (evac_fn evac USED_IF_NOT_THREADS,
+ void *user USED_IF_NOT_THREADS)
+{
+#if !defined(THREADED_RTS)
+ evac(user, (StgClosure **)(void *)&blocked_queue_hd);
+ evac(user, (StgClosure **)(void *)&blocked_queue_tl);
+ evac(user, (StgClosure **)(void *)&sleeping_queue);
+#endif
+}
+
/* -----------------------------------------------------------------------------
performGC
return CATCH_STM_FRAME;
case UNDERFLOW_FRAME:
+ tso->stackobj->sp = p;
threadStackUnderflow(cap,tso);
p = tso->stackobj->sp;
continue;