X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2FSchedule.c;h=9636223836296e3a457ed97358bd09724a59bf18;hp=4343a149cc87aaf5c3dd2656889e9e0b7ed27a05;hb=febf1ced754a3996ac1a5877dcded87828560d1c;hpb=784e214dd44eba39f4c34936a27e6cc82948205c diff --git a/rts/Schedule.c b/rts/Schedule.c index 4343a14..9636223 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -951,14 +951,38 @@ static void scheduleProcessInbox (Capability *cap USED_IF_THREADS) { #if defined(THREADED_RTS) - Message *m; + Message *m, *next; + int r; while (!emptyInbox(cap)) { - ACQUIRE_LOCK(&cap->lock); + if (cap->r.rCurrentNursery->link == NULL || + g0->n_new_large_words >= large_alloc_lim) { + scheduleDoGC(cap, cap->running_task, rtsFalse); + } + + // don't use a blocking acquire; if the lock is held by + // another thread then just carry on. This seems to avoid + // getting stuck in a message ping-pong situation with other + // processors. We'll check the inbox again later anyway. + // + // We should really use a more efficient queue data structure + // here. The trickiness is that we must ensure a Capability + // never goes idle if the inbox is non-empty, which is why we + // use cap->lock (cap->lock is released as the last thing + // before going idle; see Capability.c:releaseCapability()). + r = TRY_ACQUIRE_LOCK(&cap->lock); + if (r != 0) return; + m = cap->inbox; - cap->inbox = m->link; + cap->inbox = (Message*)END_TSO_QUEUE; + RELEASE_LOCK(&cap->lock); - executeMessage(cap, (Message *)m); + + while (m != (Message*)END_TSO_QUEUE) { + next = m->link; + executeMessage(cap, m); + m = next; + } } #endif } @@ -1423,6 +1447,12 @@ delete_threads_and_gc: recent_activity = ACTIVITY_YES; } + if (heap_census) { + debugTrace(DEBUG_sched, "performing heap census"); + heapCensus(); + performHeapProfile = rtsFalse; + } + #if defined(THREADED_RTS) if (gc_type == PENDING_GC_PAR) { @@ -1430,12 +1460,6 @@ delete_threads_and_gc: } #endif - if (heap_census) { - debugTrace(DEBUG_sched, "performing heap census"); - heapCensus(); - performHeapProfile = rtsFalse; - } - if (heap_overflow && sched_state < SCHED_INTERRUPTING) { // GC set the heap_overflow flag, so we should proceed with // an orderly shutdown now. Ultimately we want the main @@ -1850,9 +1874,9 @@ scheduleThread(Capability *cap, StgTSO *tso) void scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) { -#if defined(THREADED_RTS) tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't // move this thread from now on. +#if defined(THREADED_RTS) cpu %= RtsFlags.ParFlags.nNodes; if (cpu == cap->no) { appendToRunQueue(cap,tso); @@ -2045,6 +2069,16 @@ freeScheduler( void ) #endif } +void markScheduler (evac_fn evac USED_IF_NOT_THREADS, + void *user USED_IF_NOT_THREADS) +{ +#if !defined(THREADED_RTS) + evac(user, (StgClosure **)(void *)&blocked_queue_hd); + evac(user, (StgClosure **)(void *)&blocked_queue_tl); + evac(user, (StgClosure **)(void *)&sleeping_queue); +#endif +} + /* ----------------------------------------------------------------------------- performGC