* Send pending messages (PARALLEL_HASKELL only)
* ------------------------------------------------------------------------- */
+#if defined(PARALLEL_HASKELL)
static StgTSO *
scheduleSendPendingMessages(void)
{
-#if defined(PARALLEL_HASKELL)
# if defined(PAR) // global Mem.Mgmt., omit for now
if (PendingFetches != END_BF_QUEUE) {
// packets which have become too old...
sendOldBuffers();
}
-#endif
}
+#endif
/* ----------------------------------------------------------------------------
* Activate spark threads (PARALLEL_HASKELL only)
static Capability *
scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
{
- StgTSO *t;
rtsBool heap_census;
#ifdef THREADED_RTS
- static volatile StgWord waiting_for_gc;
+ /* extern static volatile StgWord waiting_for_gc;
+ lives inside capability.c */
rtsBool was_waiting;
nat i;
#endif
// the other tasks to sleep and stay asleep.
//
+ /* Other capabilities are prevented from running yet more Haskell
+ threads if waiting_for_gc is set. Tested inside
+ yieldCapability() and releaseCapability() in Capability.c */
+
was_waiting = cas(&waiting_for_gc, 0, 1);
if (was_waiting) {
do {
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
- migrateThreadToCapability_lock(&capabilities[cpu],tso);
+ wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
}
#else
appendToRunQueue(cap,tso);
* ------------------------------------------------------------------------- */
#if defined(THREADED_RTS)
-void
+void OSThreadProcAttr
workerStart(Task *task)
{
Capability *cap;
threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
{
bdescr *bd, *new_bd;
- lnat new_tso_size_w, tso_size_w;
+ lnat free_w, tso_size_w;
StgTSO *new_tso;
tso_size_w = tso_sizeW(tso);
// while we are moving the TSO:
lockClosure((StgClosure *)tso);
- new_tso_size_w = round_to_mblocks(tso_size_w/2);
-
- debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
- tso->id, tso_size_w, new_tso_size_w);
+ // this is the number of words we'll free
+ free_w = round_to_mblocks(tso_size_w/2);
bd = Bdescr((StgPtr)tso);
- new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W);
- new_bd->free = bd->free;
+ new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
bd->free = bd->start + TSO_STRUCT_SIZEW;
new_tso = (StgTSO *)new_bd->start;
memcpy(new_tso,tso,TSO_STRUCT_SIZE);
- new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW;
+ new_tso->stack_size = new_bd->free - new_tso->stack;
+
+ debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
+ (long)tso->id, tso_size_w, tso_sizeW(new_tso));
tso->what_next = ThreadRelocated;
tso->_link = new_tso; // no write barrier reqd: same generation
if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
IF_DEBUG(sanity,checkTSO(t));
t = unblockOne(cap, t);
- // urk, the threads migrate to the current capability
- // here, but we'd like to keep them on the original one.
*prev = t;
any_woke_up = rtsTrue;
} else {