X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.c;h=3f428141dff7f48641386855c0a25388c79b86d2;hb=894775f034d278ff5493eeaf8587914626e8df21;hp=94aac6ccc41c1f40c93ac7677fd3d827cba8804b;hpb=297b05a9c9a27175e25cb8ec7b60dde51bfafbf3;p=ghc-hetmet.git diff --git a/rts/Schedule.c b/rts/Schedule.c index 94aac6c..3f42814 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -969,10 +969,10 @@ scheduleDetectDeadlock (Capability *cap, Task *task) * Send pending messages (PARALLEL_HASKELL only) * ------------------------------------------------------------------------- */ +#if defined(PARALLEL_HASKELL) static StgTSO * scheduleSendPendingMessages(void) { -#if defined(PARALLEL_HASKELL) # if defined(PAR) // global Mem.Mgmt., omit for now if (PendingFetches != END_BF_QUEUE) { @@ -985,8 +985,8 @@ scheduleSendPendingMessages(void) // packets which have become too old... sendOldBuffers(); } -#endif } +#endif /* ---------------------------------------------------------------------------- * Activate spark threads (PARALLEL_HASKELL only) @@ -1402,10 +1402,10 @@ scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED ) static Capability * scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) { - StgTSO *t; rtsBool heap_census; #ifdef THREADED_RTS - static volatile StgWord waiting_for_gc; + /* extern static volatile StgWord waiting_for_gc; + lives inside capability.c */ rtsBool was_waiting; nat i; #endif @@ -1422,6 +1422,10 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major) // the other tasks to sleep and stay asleep. // + /* Other capabilities are prevented from running yet more Haskell + threads if waiting_for_gc is set. Tested inside + yieldCapability() and releaseCapability() in Capability.c */ + was_waiting = cas(&waiting_for_gc, 0, 1); if (was_waiting) { do { @@ -1866,7 +1870,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso) if (cpu == cap->no) { appendToRunQueue(cap,tso); } else { - migrateThreadToCapability_lock(&capabilities[cpu],tso); + wakeupThreadOnCapability(cap, &capabilities[cpu], tso); } #else appendToRunQueue(cap,tso); @@ -1908,7 +1912,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap) * ------------------------------------------------------------------------- */ #if defined(THREADED_RTS) -void +void OSThreadProcAttr workerStart(Task *task) { Capability *cap; @@ -2188,7 +2192,7 @@ static StgTSO * threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso) { bdescr *bd, *new_bd; - lnat new_tso_size_w, tso_size_w; + lnat free_w, tso_size_w; StgTSO *new_tso; tso_size_w = tso_sizeW(tso); @@ -2203,19 +2207,19 @@ threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso) // while we are moving the TSO: lockClosure((StgClosure *)tso); - new_tso_size_w = round_to_mblocks(tso_size_w/2); - - debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu", - tso->id, tso_size_w, new_tso_size_w); + // this is the number of words we'll free + free_w = round_to_mblocks(tso_size_w/2); bd = Bdescr((StgPtr)tso); - new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W); - new_bd->free = bd->free; + new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W); bd->free = bd->start + TSO_STRUCT_SIZEW; new_tso = (StgTSO *)new_bd->start; memcpy(new_tso,tso,TSO_STRUCT_SIZE); - new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW; + new_tso->stack_size = new_bd->free - new_tso->stack; + + debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu", + (long)tso->id, tso_size_w, tso_sizeW(new_tso)); tso->what_next = ThreadRelocated; tso->_link = new_tso; // no write barrier reqd: same generation @@ -2307,8 +2311,6 @@ checkBlackHoles (Capability *cap) if (type != BLACKHOLE && type != CAF_BLACKHOLE) { IF_DEBUG(sanity,checkTSO(t)); t = unblockOne(cap, t); - // urk, the threads migrate to the current capability - // here, but we'd like to keep them on the original one. *prev = t; any_woke_up = rtsTrue; } else {