* --------------------------------------------------------------------------*/
#include "PosixSource.h"
+#define KEEP_LOCKCLOSURE
#include "Rts.h"
#include "SchedAPI.h"
#include "RtsUtils.h"
#include "ThreadLabels.h"
#include "LdvProfile.h"
#include "Updates.h"
-#ifdef PROFILING
#include "Proftimer.h"
#include "ProfHeap.h"
-#endif
#if defined(GRAN) || defined(PARALLEL_HASKELL)
# include "GranSimRts.h"
# include "GranSim.h"
*/
rtsBool blackholes_need_checking = rtsFalse;
-/* Linked list of all threads.
- * Used for detecting garbage collected threads.
- * LOCK: sched_mutex+capability, or all capabilities
- */
-StgTSO *all_threads = NULL;
-
/* flag set by signal handler to precipitate a context switch
* LOCK: none (just an advisory flag)
*/
#if defined(PAR) || defined(GRAN)
static void scheduleGranParReport(void);
#endif
-static void schedulePostRunThread(void);
+static void schedulePostRunThread(StgTSO *t);
static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
static void scheduleHandleStackOverflow( Capability *cap, Task *task,
StgTSO *t);
static void scheduleHandleThreadBlocked( StgTSO *t );
static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
-static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
+static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
static Capability *scheduleDoGC(Capability *cap, Task *task,
rtsBool force_major);
static rtsBool checkBlackHoles(Capability *cap);
static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
+static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
static void deleteThread (Capability *cap, StgTSO *tso);
static void deleteAllThreads (Capability *cap);
}
#endif
- cap->r.rCurrentTSO = t;
-
/* context switches are initiated by the timer signal, unless
* the user specified "context switch as often as possible", with
* +RTS -C0
run_thread:
+ // CurrentTSO is the thread to run. t might be different if we
+ // loop back to run_thread, so make sure to set CurrentTSO after
+ // that.
+ cap->r.rCurrentTSO = t;
+
debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
(long)t->id, whatNext_strs[t->what_next]);
-#if defined(PROFILING)
startHeapProfTimer();
-#endif
// Check for exceptions blocked on this thread
maybePerformBlockedException (cap, t);
prev_what_next = t->what_next;
errno = t->saved_errno;
+#if mingw32_HOST_OS
+ SetLastError(t->saved_winerror);
+#endif
+
cap->in_haskell = rtsTrue;
- dirtyTSO(t);
+ dirty_TSO(cap,t);
- recent_activity = ACTIVITY_YES;
+#if defined(THREADED_RTS)
+ if (recent_activity == ACTIVITY_DONE_GC) {
+ // ACTIVITY_DONE_GC means we turned off the timer signal to
+ // conserve power (see #1623). Re-enable it here.
+ nat prev;
+ prev = xchg((P_)&recent_activity, ACTIVITY_YES);
+ if (prev == ACTIVITY_DONE_GC) {
+ startTimer();
+ }
+ } else {
+ recent_activity = ACTIVITY_YES;
+ }
+#endif
switch (prev_what_next) {
// XXX: possibly bogus for SMP because this thread might already
// be running again, see code below.
t->saved_errno = errno;
+#if mingw32_HOST_OS
+ // Similarly for Windows error code
+ t->saved_winerror = GetLastError();
+#endif
#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
// ----------------------------------------------------------------------
// Costs for the scheduler are assigned to CCS_SYSTEM
-#if defined(PROFILING)
stopHeapProfTimer();
+#if defined(PROFILING)
CCCS = CCS_SYSTEM;
#endif
- schedulePostRunThread();
+ schedulePostRunThread(t);
+
+ t = threadStackUnderflow(task,t);
ready_to_gc = rtsFalse;
barf("schedule: invalid thread return code %d", (int)ret);
}
- if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
- if (ready_to_gc) {
+ if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
cap = scheduleDoGC(cap,task,rtsFalse);
}
} /* end of while() */
-
- debugTrace(PAR_DEBUG_verbose,
- "== Leaving schedule() after having received Finish");
}
/* ----------------------------------------------------------------------------
// Check whether we have more threads on our run queue, or sparks
// in our pool, that we could hand to another Capability.
- if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
+ if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE)
&& sparkPoolSizeCap(cap) < 2) {
return;
}
if (cap->run_queue_hd != END_TSO_QUEUE) {
prev = cap->run_queue_hd;
- t = prev->link;
- prev->link = END_TSO_QUEUE;
+ t = prev->_link;
+ prev->_link = END_TSO_QUEUE;
for (; t != END_TSO_QUEUE; t = next) {
- next = t->link;
- t->link = END_TSO_QUEUE;
+ next = t->_link;
+ t->_link = END_TSO_QUEUE;
if (t->what_next == ThreadRelocated
|| t->bound == task // don't move my bound thread
|| tsoLocked(t)) { // don't move a locked thread
- prev->link = t;
+ setTSOLink(cap, prev, t);
prev = t;
} else if (i == n_free_caps) {
pushed_to_all = rtsTrue;
i = 0;
// keep one for us
- prev->link = t;
+ setTSOLink(cap, prev, t);
prev = t;
} else {
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
static void
scheduleStartSignalHandlers(Capability *cap)
{
- if (signals_pending()) { // safe outside the lock
+ if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) {
+ // safe outside the lock
startSignalHandlers(cap);
}
}
cap->run_queue_hd = cap->wakeup_queue_hd;
cap->run_queue_tl = cap->wakeup_queue_tl;
} else {
- cap->run_queue_tl->link = cap->wakeup_queue_hd;
+ setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
cap->run_queue_tl = cap->wakeup_queue_tl;
}
cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
recent_activity = ACTIVITY_DONE_GC;
+ // disable timer signals (see #1623)
+ stopTimer();
if ( !emptyRunQueue(cap) ) return;
* for signals to arrive rather then bombing out with a
* deadlock.
*/
- if ( anyUserHandlers() ) {
+ if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) {
debugTrace(DEBUG_sched,
"still deadlocked, waiting for signals...");
// either we have threads to run, or we were interrupted:
ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
+
+ return;
}
#endif
case BlockedOnException:
case BlockedOnMVar:
throwToSingleThreaded(cap, task->tso,
- (StgClosure *)NonTermination_closure);
+ (StgClosure *)nonTermination_closure);
return;
default:
barf("deadlock: main thread blocked in a strange way");
* ------------------------------------------------------------------------- */
static void
-schedulePostRunThread(void)
+schedulePostRunThread (StgTSO *t)
{
+ // We have to be able to catch transactions that are in an
+ // infinite loop as a result of seeing an inconsistent view of
+ // memory, e.g.
+ //
+ // atomically $ do
+ // [a,b] <- mapM readTVar [ta,tb]
+ // when (a == b) loop
+ //
+ // and a is never equal to b given a consistent view of memory.
+ //
+ if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
+ if (!stmValidateNestOfTransactions (t -> trec)) {
+ debugTrace(DEBUG_sched | DEBUG_stm,
+ "trec %p found wasting its time", t);
+
+ // strip the stack back to the
+ // ATOMICALLY_FRAME, aborting the (nested)
+ // transaction, and saving the stack of any
+ // partially-evaluated thunks on the heap.
+ throwToSingleThreaded_(&capabilities[0], t,
+ NULL, rtsTrue, NULL);
+
+ ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
+ }
+ }
+
#if defined(PAR)
/* HACK 675: if the last thread didn't yield, make sure to print a
SCHEDULE event to the log file when StgRunning the next thread, even
}
debugTrace(DEBUG_sched,
- "--<< thread %ld (%s) stopped: HeapOverflow\n",
+ "--<< thread %ld (%s) stopped: HeapOverflow",
(long)t->id, whatNext_strs[t->what_next]);
#if defined(GRAN)
}
#endif
- pushOnRunQueue(cap,t);
+ if (context_switch) {
+ // Sometimes we miss a context switch, e.g. when calling
+ // primitives in a tight loop, MAYBE_GC() doesn't check the
+ // context switch flag, and we end up waiting for a GC.
+ // See #1984, and concurrent/should_run/1984
+ context_switch = 0;
+ addToRunQueue(cap,t);
+ } else {
+ pushOnRunQueue(cap,t);
+ }
return rtsTrue;
/* actual GC is done at the end of the while loop in schedule() */
}
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
- ASSERT(t->link == END_TSO_QUEUE);
+ ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
// doing stack squeezing (which can be expensive), just run the
debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished",
(unsigned long)t->id, whatNext_strs[t->what_next]);
- /* Inform the Hpc that a thread has finished */
- hs_hpc_event("Thread Finished",t);
-
#if defined(GRAN)
endThread(t, CurrentProc); // clean-up the thread
#elif defined(PARALLEL_HASKELL)
// point where we can deal with this. Leaving it on the run
// queue also ensures that the garbage collector knows about
// this thread and its return value (it gets dropped from the
- // all_threads list so there's no other way to find it).
+ // step->threads list so there's no other way to find it).
appendToRunQueue(cap,t);
return rtsFalse;
#else
}
/* -----------------------------------------------------------------------------
- * Perform a heap census, if PROFILING
+ * Perform a heap census
* -------------------------------------------------------------------------- */
static rtsBool
-scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED )
+scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
{
-#if defined(PROFILING)
// When we have +RTS -i0 and we're heap profiling, do a census at
// every GC. This lets us get repeatable runs for debugging.
if (performHeapProfile ||
(RtsFlags.ProfFlags.profileInterval==0 &&
RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
-
- // checking black holes is necessary before GC, otherwise
- // there may be threads that are unreachable except by the
- // blackhole queue, which the GC will consider to be
- // deadlocked.
- scheduleCheckBlackHoles(&MainCapability);
-
- debugTrace(DEBUG_sched, "garbage collecting before heap census");
- GarbageCollect(rtsTrue);
-
- debugTrace(DEBUG_sched, "performing heap census");
- heapCensus();
-
- performHeapProfile = rtsFalse;
- return rtsTrue; // true <=> we already GC'd
+ return rtsTrue;
+ } else {
+ return rtsFalse;
}
-#endif
- return rtsFalse;
}
/* -----------------------------------------------------------------------------
scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
{
StgTSO *t;
+ rtsBool heap_census;
#ifdef THREADED_RTS
static volatile StgWord waiting_for_gc;
rtsBool was_waiting;
waiting_for_gc = rtsFalse;
#endif
- /* Kick any transactions which are invalid back to their
- * atomically frames. When next scheduled they will try to
- * commit, this commit will fail and they will retry.
- */
- {
- StgTSO *next;
-
- for (t = all_threads; t != END_TSO_QUEUE; t = next) {
- if (t->what_next == ThreadRelocated) {
- next = t->link;
- } else {
- next = t->global_link;
-
- // This is a good place to check for blocked
- // exceptions. It might be the case that a thread is
- // blocked on delivering an exception to a thread that
- // is also blocked - we try to ensure that this
- // doesn't happen in throwTo(), but it's too hard (or
- // impossible) to close all the race holes, so we
- // accept that some might get through and deal with
- // them here. A GC will always happen at some point,
- // even if the system is otherwise deadlocked.
- maybePerformBlockedException (&capabilities[0], t);
-
- if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
- if (!stmValidateNestOfTransactions (t -> trec)) {
- debugTrace(DEBUG_sched | DEBUG_stm,
- "trec %p found wasting its time", t);
-
- // strip the stack back to the
- // ATOMICALLY_FRAME, aborting the (nested)
- // transaction, and saving the stack of any
- // partially-evaluated thunks on the heap.
- throwToSingleThreaded_(&capabilities[0], t,
- NULL, rtsTrue, NULL);
-
-#ifdef REG_R1
- ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
-#endif
- }
- }
- }
- }
- }
-
// so this happens periodically:
if (cap) scheduleCheckBlackHoles(cap);
deleteAllThreads(&capabilities[0]);
sched_state = SCHED_SHUTTING_DOWN;
}
+
+ heap_census = scheduleNeedHeapProfile(rtsTrue);
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
#if defined(THREADED_RTS)
debugTrace(DEBUG_sched, "doing GC");
#endif
- GarbageCollect(force_major);
+ GarbageCollect(force_major || heap_census);
+ if (heap_census) {
+ debugTrace(DEBUG_sched, "performing heap census");
+ heapCensus();
+ performHeapProfile = rtsFalse;
+ }
+
#if defined(THREADED_RTS)
// release our stash of capabilities.
for (i = 0; i < n_capabilities; i++) {
* Singleton fork(). Do not copy any running threads.
* ------------------------------------------------------------------------- */
-StgInt
+pid_t
forkProcess(HsStablePtr *entry
#ifndef FORKPROCESS_PRIMOP_SUPPORTED
STG_UNUSED
pid_t pid;
StgTSO* t,*next;
Capability *cap;
+ nat s;
#if defined(THREADED_RTS)
if (RtsFlags.ParFlags.nNodes > 1) {
// ToDo: for SMP, we should probably acquire *all* the capabilities
cap = rts_lock();
+ // no funny business: hold locks while we fork, otherwise if some
+ // other thread is holding a lock when the fork happens, the data
+ // structure protected by the lock will forever be in an
+ // inconsistent state in the child. See also #1391.
+ ACQUIRE_LOCK(&sched_mutex);
+ ACQUIRE_LOCK(&cap->lock);
+ ACQUIRE_LOCK(&cap->running_task->lock);
+
pid = fork();
if (pid) { // parent
+ RELEASE_LOCK(&sched_mutex);
+ RELEASE_LOCK(&cap->lock);
+ RELEASE_LOCK(&cap->running_task->lock);
+
// just return the pid
rts_unlock(cap);
return pid;
} else { // child
+#if defined(THREADED_RTS)
+ initMutex(&sched_mutex);
+ initMutex(&cap->lock);
+ initMutex(&cap->running_task->lock);
+#endif
+
// Now, all OS threads except the thread that forked are
// stopped. We need to stop all Haskell threads, including
// those involved in foreign calls. Also we need to delete
// all Tasks, because they correspond to OS threads that are
// now gone.
- for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+ for (s = 0; s < total_steps; s++) {
+ for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
// don't allow threads to catch the ThreadKilled
// threads may be evaluating thunks that we need later.
deleteThread_(cap,t);
}
+ }
}
// Empty the run queue. It seems tempting to let all the
// don't exist now:
cap->suspended_ccalling_tasks = NULL;
- // Empty the all_threads list. Otherwise, the garbage
+ // Empty the threads lists. Otherwise, the garbage
// collector may attempt to resurrect some of these threads.
- all_threads = END_TSO_QUEUE;
+ for (s = 0; s < total_steps; s++) {
+ all_steps[s].threads = END_TSO_QUEUE;
+ }
// Wipe the task list, except the current Task.
ACQUIRE_LOCK(&sched_mutex);
for (task = all_tasks; task != NULL; task=task->all_link) {
if (task != cap->running_task) {
+#if defined(THREADED_RTS)
+ initMutex(&task->lock); // see #1391
+#endif
discardTask(task);
}
}
cap->returning_tasks_tl = NULL;
#endif
+ // On Unix, all timers are reset in the child, so we need to start
+ // the timer again.
+ initTimer();
+ startTimer();
+
cap = rts_evalStableIO(cap, entry, NULL); // run the action
rts_checkSchedStatus("forkProcess",cap);
// NOTE: only safe to call if we own all capabilities.
StgTSO* t, *next;
+ nat s;
+
debugTrace(DEBUG_sched,"deleting all threads");
- for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+ for (s = 0; s < total_steps; s++) {
+ for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
deleteThread(cap,t);
}
+ }
}
// The run queue now contains a bunch of ThreadKilled threads. We
suspendThread (StgRegTable *reg)
{
Capability *cap;
- int saved_errno = errno;
+ int saved_errno;
StgTSO *tso;
Task *task;
+#if mingw32_HOST_OS
+ StgWord32 saved_winerror;
+#endif
+
+ saved_errno = errno;
+#if mingw32_HOST_OS
+ saved_winerror = GetLastError();
+#endif
/* assume that *reg is a pointer to the StgRegTable part of a Capability.
*/
#endif
errno = saved_errno;
+#if mingw32_HOST_OS
+ SetLastError(saved_winerror);
+#endif
return task;
}
{
StgTSO *tso;
Capability *cap;
- int saved_errno = errno;
Task *task = task_;
+ int saved_errno;
+#if mingw32_HOST_OS
+ StgWord32 saved_winerror;
+#endif
+
+ saved_errno = errno;
+#if mingw32_HOST_OS
+ saved_winerror = GetLastError();
+#endif
cap = task->cap;
// Wait for permission to re-enter the RTS with the result.
tso = task->suspended_tso;
task->suspended_tso = NULL;
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE; // no write barrier reqd
debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
if (tso->why_blocked == BlockedOnCCall) {
cap->r.rCurrentTSO = tso;
cap->in_haskell = rtsTrue;
errno = saved_errno;
+#if mingw32_HOST_OS
+ SetLastError(saved_winerror);
+#endif
/* We might have GC'd, mark the TSO dirty again */
- dirtyTSO(tso);
+ dirty_TSO(cap,tso);
IF_DEBUG(sanity, checkTSO(tso));
#endif
blackhole_queue = END_TSO_QUEUE;
- all_threads = END_TSO_QUEUE;
context_switch = 0;
sched_state = SCHED_RUNNING;
+ recent_activity = ACTIVITY_YES;
#if defined(THREADED_RTS)
/* Initialise the mutex and condition variables used by
}
void
-exitScheduler( void )
+exitScheduler(
+ rtsBool wait_foreign
+#if !defined(THREADED_RTS)
+ __attribute__((unused))
+#endif
+)
+ /* see Capability.c, shutdownCapability() */
{
Task *task = NULL;
nat i;
for (i = 0; i < n_capabilities; i++) {
- shutdownCapability(&capabilities[i], task);
+ shutdownCapability(&capabilities[i], task, wait_foreign);
}
boundTaskExiting(task);
stopTaskManager();
#endif
}
-/* ---------------------------------------------------------------------------
- Where are the roots that we know about?
-
- - all the threads on the runnable queue
- - all the threads on the blocked queue
- - all the threads on the sleeping queue
- - all the thread currently executing a _ccall_GC
- - all the "main threads"
-
- ------------------------------------------------------------------------ */
-
-/* This has to be protected either by the scheduler monitor, or by the
- garbage collection monitor (probably the latter).
- KH @ 25/10/99
-*/
-
-void
-GetRoots( evac_fn evac )
-{
- nat i;
- Capability *cap;
- Task *task;
-
-#if defined(GRAN)
- for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
- if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
- evac((StgClosure **)&run_queue_hds[i]);
- if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
- evac((StgClosure **)&run_queue_tls[i]);
-
- if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
- evac((StgClosure **)&blocked_queue_hds[i]);
- if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
- evac((StgClosure **)&blocked_queue_tls[i]);
- if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
- evac((StgClosure **)&ccalling_threads[i]);
- }
-
- markEventQueue();
-
-#else /* !GRAN */
-
- for (i = 0; i < n_capabilities; i++) {
- cap = &capabilities[i];
- evac((StgClosure **)(void *)&cap->run_queue_hd);
- evac((StgClosure **)(void *)&cap->run_queue_tl);
-#if defined(THREADED_RTS)
- evac((StgClosure **)(void *)&cap->wakeup_queue_hd);
- evac((StgClosure **)(void *)&cap->wakeup_queue_tl);
-#endif
- for (task = cap->suspended_ccalling_tasks; task != NULL;
- task=task->next) {
- debugTrace(DEBUG_sched,
- "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
- evac((StgClosure **)(void *)&task->suspended_tso);
- }
-
- }
-
-
-#if !defined(THREADED_RTS)
- evac((StgClosure **)(void *)&blocked_queue_hd);
- evac((StgClosure **)(void *)&blocked_queue_tl);
- evac((StgClosure **)(void *)&sleeping_queue);
-#endif
-#endif
-
- // evac((StgClosure **)&blackhole_queue);
-
-#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
- markSparkQueue(evac);
-#endif
-
-#if defined(RTS_USER_SIGNALS)
- // mark the signal handlers (signals should be already blocked)
- markSignalHandlers(evac);
-#endif
-}
-
/* -----------------------------------------------------------------------------
performGC
// while we are moving the TSO:
lockClosure((StgClosure *)tso);
- if (tso->stack_size >= tso->max_stack_size) {
+ if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
+ // NB. never raise a StackOverflow exception if the thread is
+ // inside Control.Exceptino.block. It is impractical to protect
+ // against stack overflow exceptions, since virtually anything
+ // can raise one (even 'catch'), so this is the only sensible
+ // thing to do here. See bug #767.
debugTrace(DEBUG_gc,
"threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
"increasing stack size from %ld words to %d.",
(long)tso->stack_size, new_stack_size);
- dest = (StgTSO *)allocate(new_tso_size);
+ dest = (StgTSO *)allocateLocal(cap,new_tso_size);
TICK_ALLOC_TSO(new_stack_size,0);
/* copy the TSO block and the old stack into the new area */
* dead TSO's stack.
*/
tso->what_next = ThreadRelocated;
- tso->link = dest;
+ setTSOLink(cap,tso,dest);
tso->sp = (P_)&(tso->stack[tso->stack_size]);
tso->why_blocked = NotBlocked;
return dest;
}
+static StgTSO *
+threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
+{
+ bdescr *bd, *new_bd;
+ lnat new_tso_size_w, tso_size_w;
+ StgTSO *new_tso;
+
+ tso_size_w = tso_sizeW(tso);
+
+ if (tso_size_w < MBLOCK_SIZE_W ||
+ (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4)
+ {
+ return tso;
+ }
+
+ // don't allow throwTo() to modify the blocked_exceptions queue
+ // while we are moving the TSO:
+ lockClosure((StgClosure *)tso);
+
+ new_tso_size_w = round_to_mblocks(tso_size_w/2);
+
+ debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
+ tso->id, tso_size_w, new_tso_size_w);
+
+ bd = Bdescr((StgPtr)tso);
+ new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W);
+ new_bd->free = bd->free;
+ bd->free = bd->start + TSO_STRUCT_SIZEW;
+
+ new_tso = (StgTSO *)new_bd->start;
+ memcpy(new_tso,tso,TSO_STRUCT_SIZE);
+ new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW;
+
+ tso->what_next = ThreadRelocated;
+ tso->_link = new_tso; // no write barrier reqd: same generation
+
+ // The TSO attached to this Task may have moved, so update the
+ // pointer to it.
+ if (task->tso == tso) {
+ task->tso = new_tso;
+ }
+
+ unlockTSO(new_tso);
+ unlockTSO(tso);
+
+ IF_DEBUG(sanity,checkTSO(new_tso));
+
+ return new_tso;
+}
+
/* ---------------------------------------------------------------------------
Interrupt execution
- usually called inside a signal handler so it mustn't do anything fancy.
t = blackhole_queue;
while (t != END_TSO_QUEUE) {
ASSERT(t->why_blocked == BlockedOnBlackHole);
- type = get_itbl(t->block_info.closure)->type;
+ type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
IF_DEBUG(sanity,checkTSO(t));
t = unblockOne(cap, t);
*prev = t;
any_woke_up = rtsTrue;
} else {
- prev = &t->link;
- t = t->link;
+ prev = &t->_link;
+ t = t->_link;
}
}
return CATCH_RETRY_FRAME;
case CATCH_STM_FRAME: {
- debugTrace(DEBUG_stm,
- "found CATCH_STM_FRAME at %p during retry", p);
StgTRecHeader *trec = tso -> trec;
StgTRecHeader *outer = stmGetEnclosingTRec(trec);
+ debugTrace(DEBUG_stm,
+ "found CATCH_STM_FRAME at %p during retry", p);
debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
stmAbortTransaction(tso -> cap, trec);
stmFreeAbortedTRec(tso -> cap, trec);
{
StgTSO *tso, *next;
Capability *cap;
+ step *step;
for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
next = tso->global_link;
- tso->global_link = all_threads;
- all_threads = tso;
+
+ step = Bdescr((P_)tso)->step;
+ tso->global_link = step->threads;
+ step->threads = tso;
+
debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
// Wake up the thread on the Capability it was last on
case BlockedOnException:
/* Called by GC - sched_mutex lock is currently held. */
throwToSingleThreaded(cap, tso,
- (StgClosure *)BlockedOnDeadMVar_closure);
+ (StgClosure *)blockedOnDeadMVar_closure);
break;
case BlockedOnBlackHole:
throwToSingleThreaded(cap, tso,
- (StgClosure *)NonTermination_closure);
+ (StgClosure *)nonTermination_closure);
break;
case BlockedOnSTM:
throwToSingleThreaded(cap, tso,
- (StgClosure *)BlockedIndefinitely_closure);
+ (StgClosure *)blockedIndefinitely_closure);
break;
case NotBlocked:
/* This might happen if the thread was blocked on a black hole
}
}
}
+
+/* -----------------------------------------------------------------------------
+ performPendingThrowTos is called after garbage collection, and
+ passed a list of threads that were found to have pending throwTos
+ (tso->blocked_exceptions was not empty), and were blocked.
+ Normally this doesn't happen, because we would deliver the
+ exception directly if the target thread is blocked, but there are
+ small windows where it might occur on a multiprocessor (see
+ throwTo()).
+
+ NB. we must be holding all the capabilities at this point, just
+ like resurrectThreads().
+ -------------------------------------------------------------------------- */
+
+void
+performPendingThrowTos (StgTSO *threads)
+{
+ StgTSO *tso, *next;
+ Capability *cap;
+ step *step;
+
+ for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
+ next = tso->global_link;
+
+ step = Bdescr((P_)tso)->step;
+ tso->global_link = step->threads;
+ step->threads = tso;
+
+ debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);
+
+ cap = tso->cap;
+ maybePerformBlockedException(cap, tso);
+ }
+}