globalWorkToDo (void)
{
return blackholes_need_checking
- || interrupted
+ || sched_state >= SCHED_INTERRUPTING
;
}
#endif
// can't be sure that we have the right capability: the thread
// might be woken up on some other capability, and task->cap
// could change under our feet.
- return (!emptyRunQueue(cap) && cap->run_queue_hd->bound == task);
+ return !emptyRunQueue(cap) && cap->run_queue_hd->bound == task;
} else {
- // A vanilla worker task runs if either (a) there is a
- // lightweight thread at the head of the run queue, or (b)
- // there are sparks to execute, or (c) there is some other
- // global condition to check, such as threads blocked on
- // blackholes.
- return ((!emptyRunQueue(cap) && cap->run_queue_hd->bound == NULL)
- || !emptySparkPoolCap(cap)
- || globalWorkToDo());
+ // A vanilla worker task runs if either there is a lightweight
+ // thread at the head of the run queue, or the run queue is
+ // empty and (there are sparks to execute, or there is some
+ // other global condition to check, such as threads blocked on
+ // blackholes).
+ if (emptyRunQueue(cap)) {
+ return !emptySparkPoolCap(cap) || globalWorkToDo();
+ } else
+ return cap->run_queue_hd->bound == NULL;
}
}
#endif
// is interrupted, we only create a worker task if there
// are threads that need to be completed. If the system is
// shutting down, we never create a new worker.
- if (!shutting_down_scheduler) {
+ if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
IF_DEBUG(scheduler,
sched_belch("starting new worker on capability %d", cap->no));
startWorkerTask(cap, workerStart);
{
nat i;
- ASSERT(interrupted && shutting_down_scheduler);
+ ASSERT(sched_state == SCHED_SHUTTING_DOWN);
task->cap = cap;