*/
rtsBool blackholes_need_checking = rtsFalse;
-/* flag set by signal handler to precipitate a context switch
- * LOCK: none (just an advisory flag)
- */
-int context_switch = 0;
-
/* flag that tracks whether we have done any execution in this time slice.
* LOCK: currently none, perhaps we should lock (but needs to be
* updated in the fast path of the scheduler).
static void scheduleSendPendingMessages(void);
static void scheduleActivateSpark(Capability *cap);
#endif
-static void schedulePostRunThread(StgTSO *t);
+static void schedulePostRunThread(Capability *cap, StgTSO *t);
static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
static void scheduleHandleStackOverflow( Capability *cap, Task *task,
StgTSO *t);
*/
if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
&& !emptyThreadQueues(cap)) {
- context_switch = 1;
+ cap->context_switch = 1;
}
run_thread:
CCCS = CCS_SYSTEM;
#endif
- schedulePostRunThread(t);
+ schedulePostRunThread(cap,t);
t = threadStackUnderflow(task,t);
* Send pending messages (PARALLEL_HASKELL only)
* ------------------------------------------------------------------------- */
+#if defined(PARALLEL_HASKELL)
static StgTSO *
scheduleSendPendingMessages(void)
{
-#if defined(PARALLEL_HASKELL)
# if defined(PAR) // global Mem.Mgmt., omit for now
if (PendingFetches != END_BF_QUEUE) {
// packets which have become too old...
sendOldBuffers();
}
-#endif
}
+#endif
/* ----------------------------------------------------------------------------
* Activate spark threads (PARALLEL_HASKELL only)
* ------------------------------------------------------------------------- */
static void
-schedulePostRunThread (StgTSO *t)
+schedulePostRunThread (Capability *cap, StgTSO *t)
{
// We have to be able to catch transactions that are in an
// infinite loop as a result of seeing an inconsistent view of
// ATOMICALLY_FRAME, aborting the (nested)
// transaction, and saving the stack of any
// partially-evaluated thunks on the heap.
- throwToSingleThreaded_(&capabilities[0], t,
- NULL, rtsTrue, NULL);
+ throwToSingleThreaded_(cap, t, NULL, rtsTrue, NULL);
ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
}
"--<< thread %ld (%s) stopped: HeapOverflow",
(long)t->id, whatNext_strs[t->what_next]);
- if (context_switch) {
+ if (cap->context_switch) {
// Sometimes we miss a context switch, e.g. when calling
// primitives in a tight loop, MAYBE_GC() doesn't check the
// context switch flag, and we end up waiting for a GC.
// See #1984, and concurrent/should_run/1984
- context_switch = 0;
+ cap->context_switch = 0;
addToRunQueue(cap,t);
} else {
pushOnRunQueue(cap,t);
// the CPU because the tick always arrives during GC). This way
// penalises threads that do a lot of allocation, but that seems
// better than the alternative.
- context_switch = 0;
+ cap->context_switch = 0;
/* put the thread back on the run queue. Then, if we're ready to
* GC, check whether this is the last task to stop. If so, wake
static Capability *
scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
{
- StgTSO *t;
rtsBool heap_census;
#ifdef THREADED_RTS
/* extern static volatile StgWord waiting_for_gc;
return cap; // NOTE: task->cap might have changed here
}
+ setContextSwitches();
for (i=0; i < n_capabilities; i++) {
debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
if (cap != &capabilities[i]) {
// all the Capabilities, but even so it's a slightly
// unsavoury invariant.
task->cap = pcap;
- context_switch = 1;
waitForReturnCapability(&pcap, task);
if (pcap != &capabilities[i]) {
barf("scheduleDoGC: got the wrong capability");
* ------------------------------------------------------------------------- */
#if defined(THREADED_RTS)
-void
+void OSThreadProcAttr
workerStart(Task *task)
{
Capability *cap;
blackhole_queue = END_TSO_QUEUE;
- context_switch = 0;
sched_state = SCHED_RUNNING;
recent_activity = ACTIVITY_YES;
threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
{
bdescr *bd, *new_bd;
- lnat new_tso_size_w, tso_size_w;
+ lnat free_w, tso_size_w;
StgTSO *new_tso;
tso_size_w = tso_sizeW(tso);
// while we are moving the TSO:
lockClosure((StgClosure *)tso);
- new_tso_size_w = round_to_mblocks(tso_size_w/2);
-
- debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
- tso->id, tso_size_w, new_tso_size_w);
+ // this is the number of words we'll free
+ free_w = round_to_mblocks(tso_size_w/2);
bd = Bdescr((StgPtr)tso);
- new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W);
- new_bd->free = bd->free;
+ new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
bd->free = bd->start + TSO_STRUCT_SIZEW;
new_tso = (StgTSO *)new_bd->start;
memcpy(new_tso,tso,TSO_STRUCT_SIZE);
- new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW;
+ new_tso->stack_size = new_bd->free - new_tso->stack;
+
+ debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
+ (long)tso->id, tso_size_w, tso_sizeW(new_tso));
tso->what_next = ThreadRelocated;
tso->_link = new_tso; // no write barrier reqd: same generation
interruptStgRts(void)
{
sched_state = SCHED_INTERRUPTING;
- context_switch = 1;
+ setContextSwitches();
wakeUpRts();
}