X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=f5cb5686005d303ae2eedb652a4c9e152e7b4f2e;hb=02c76a04093613960f8016fba07a8a6e59e7cd3b;hp=4343a149cc87aaf5c3dd2656889e9e0b7ed27a05;hpb=784e214dd44eba39f4c34936a27e6cc82948205c;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 4343a14..f5cb568 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -951,14 +951,38 @@ static void scheduleProcessInbox (Capability *cap USED_IF_THREADS) { #if defined(THREADED_RTS) - Message *m; + Message *m, *next; + int r; while (!emptyInbox(cap)) { - ACQUIRE_LOCK(&cap->lock); + if (cap->r.rCurrentNursery->link == NULL || + g0->n_new_large_words >= large_alloc_lim) { + scheduleDoGC(cap, cap->running_task, rtsFalse); + } + + // don't use a blocking acquire; if the lock is held by + // another thread then just carry on. This seems to avoid + // getting stuck in a message ping-pong situation with other + // processors. We'll check the inbox again later anyway. + // + // We should really use a more efficient queue data structure + // here. The trickiness is that we must ensure a Capability + // never goes idle if the inbox is non-empty, which is why we + // use cap->lock (cap->lock is released as the last thing + // before going idle; see Capability.c:releaseCapability()). + r = TRY_ACQUIRE_LOCK(&cap->lock); + if (r != 0) return; + m = cap->inbox; - cap->inbox = m->link; + cap->inbox = (Message*)END_TSO_QUEUE; + RELEASE_LOCK(&cap->lock); - executeMessage(cap, (Message *)m); + + while (m != (Message*)END_TSO_QUEUE) { + next = m->link; + executeMessage(cap, m); + m = next; + } } #endif } @@ -1850,9 +1874,9 @@ scheduleThread(Capability *cap, StgTSO *tso) void scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) { -#if defined(THREADED_RTS) tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't // move this thread from now on. +#if defined(THREADED_RTS) cpu %= RtsFlags.ParFlags.nNodes; if (cpu == cap->no) { appendToRunQueue(cap,tso); @@ -2045,6 +2069,16 @@ freeScheduler( void ) #endif } +void markScheduler (evac_fn evac USED_IF_NOT_THREADS, + void *user USED_IF_NOT_THREADS) +{ +#if !defined(THREADED_RTS) + evac(user, (StgClosure **)(void *)&blocked_queue_hd); + evac(user, (StgClosure **)(void *)&blocked_queue_tl); + evac(user, (StgClosure **)(void *)&sleeping_queue); +#endif +} + /* ----------------------------------------------------------------------------- performGC