*/
rtsBool blackholes_need_checking = rtsFalse;
-/* flag set by signal handler to precipitate a context switch
- * LOCK: none (just an advisory flag)
- */
-int context_switch = 0;
-
/* flag that tracks whether we have done any execution in this time slice.
* LOCK: currently none, perhaps we should lock (but needs to be
* updated in the fast path of the scheduler).
static void scheduleSendPendingMessages(void);
static void scheduleActivateSpark(Capability *cap);
#endif
-static void schedulePostRunThread(StgTSO *t);
+static void schedulePostRunThread(Capability *cap, StgTSO *t);
static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
static void scheduleHandleStackOverflow( Capability *cap, Task *task,
StgTSO *t);
*/
if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
&& !emptyThreadQueues(cap)) {
- context_switch = 1;
+ cap->context_switch = 1;
}
run_thread:
CCCS = CCS_SYSTEM;
#endif
- schedulePostRunThread(t);
+ schedulePostRunThread(cap,t);
t = threadStackUnderflow(task,t);
* ------------------------------------------------------------------------- */
static void
-schedulePostRunThread (StgTSO *t)
+schedulePostRunThread (Capability *cap, StgTSO *t)
{
// We have to be able to catch transactions that are in an
// infinite loop as a result of seeing an inconsistent view of
// ATOMICALLY_FRAME, aborting the (nested)
// transaction, and saving the stack of any
// partially-evaluated thunks on the heap.
- throwToSingleThreaded_(&capabilities[0], t,
- NULL, rtsTrue, NULL);
+ throwToSingleThreaded_(cap, t, NULL, rtsTrue, NULL);
ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
}
"--<< thread %ld (%s) stopped: HeapOverflow",
(long)t->id, whatNext_strs[t->what_next]);
- if (context_switch) {
+ if (cap->context_switch) {
// Sometimes we miss a context switch, e.g. when calling
// primitives in a tight loop, MAYBE_GC() doesn't check the
// context switch flag, and we end up waiting for a GC.
// See #1984, and concurrent/should_run/1984
- context_switch = 0;
+ cap->context_switch = 0;
addToRunQueue(cap,t);
} else {
pushOnRunQueue(cap,t);
// the CPU because the tick always arrives during GC). This way
// penalises threads that do a lot of allocation, but that seems
// better than the alternative.
- context_switch = 0;
+ cap->context_switch = 0;
/* put the thread back on the run queue. Then, if we're ready to
* GC, check whether this is the last task to stop. If so, wake
return cap; // NOTE: task->cap might have changed here
}
+ setContextSwitches();
for (i=0; i < n_capabilities; i++) {
debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
if (cap != &capabilities[i]) {
// all the Capabilities, but even so it's a slightly
// unsavoury invariant.
task->cap = pcap;
- context_switch = 1;
waitForReturnCapability(&pcap, task);
if (pcap != &capabilities[i]) {
barf("scheduleDoGC: got the wrong capability");
blackhole_queue = END_TSO_QUEUE;
- context_switch = 0;
sched_state = SCHED_RUNNING;
recent_activity = ACTIVITY_YES;
}
/* Try to double the current stack size. If that takes us over the
- * maximum stack size for this thread, then use the maximum instead.
- * Finally round up so the TSO ends up as a whole number of blocks.
+ * maximum stack size for this thread, then use the maximum instead
+ * (that is, unless we're already at or over the max size and we
+ * can't raise the StackOverflow exception (see above), in which
+ * case just double the size). Finally round up so the TSO ends up as
+ * a whole number of blocks.
*/
- new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+ if (tso->stack_size >= tso->max_stack_size) {
+ new_stack_size = tso->stack_size * 2;
+ } else {
+ new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+ }
new_tso_size = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
TSO_STRUCT_SIZE)/sizeof(W_);
new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
interruptStgRts(void)
{
sched_state = SCHED_INTERRUPTING;
- context_switch = 1;
+ setContextSwitches();
wakeUpRts();
}