# define STATIC_INLINE static
#endif
-#ifdef THREADED_RTS
-#define USED_WHEN_THREADED_RTS
-#define USED_WHEN_NON_THREADED_RTS STG_UNUSED
-#else
-#define USED_WHEN_THREADED_RTS STG_UNUSED
-#define USED_WHEN_NON_THREADED_RTS
-#endif
-
-#ifdef SMP
-#define USED_WHEN_SMP
-#else
-#define USED_WHEN_SMP STG_UNUSED
-#endif
-
/* -----------------------------------------------------------------------------
* Global variables
* -------------------------------------------------------------------------- */
/* if this flag is set as well, give up execution
* LOCK: none (changes once, from false->true)
*/
-rtsBool interrupted = rtsFalse;
+rtsBool sched_state = SCHED_RUNNING;
/* Next thread ID to allocate.
* LOCK: sched_mutex
/*
* This mutex protects most of the global scheduler data in
- * the THREADED_RTS and (inc. SMP) runtime.
+ * the THREADED_RTS runtime.
*/
#if defined(THREADED_RTS)
Mutex sched_mutex;
// scheduler clearer.
//
static void schedulePreLoop (void);
-#if defined(SMP)
+#if defined(THREADED_RTS)
static void schedulePushWork(Capability *cap, Task *task);
#endif
static void scheduleStartSignalHandlers (Capability *cap);
static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
-static void scheduleDoGC(Capability *cap, Task *task, rtsBool force_major);
+static Capability *scheduleDoGC(Capability *cap, Task *task,
+ rtsBool force_major,
+ void (*get_roots)(evac_fn));
static void unblockThread(Capability *cap, StgTSO *tso);
static rtsBool checkBlackHoles(Capability *cap);
rtsBool stop_at_atomically, StgPtr stop_here);
static void deleteThread (Capability *cap, StgTSO *tso);
-static void deleteRunQueue (Capability *cap);
+static void deleteAllThreads (Capability *cap);
#ifdef DEBUG
static void printThreadBlockage(StgTSO *tso);
}
#endif
-#ifdef SMP
+#if defined(THREADED_RTS)
schedulePushWork(cap,task);
#endif
stg_exit(EXIT_FAILURE);
}
+ // The interruption / shutdown sequence.
+ //
+ // In order to cleanly shut down the runtime, we want to:
+ // * make sure that all main threads return to their callers
+ // with the state 'Interrupted'.
+ // * clean up all OS threads assocated with the runtime
+ // * free all memory etc.
+ //
+ // So the sequence for ^C goes like this:
+ //
+ // * ^C handler sets sched_state := SCHED_INTERRUPTING and
+ // arranges for some Capability to wake up
+ //
+ // * all threads in the system are halted, and the zombies are
+ // placed on the run queue for cleaning up. We acquire all
+ // the capabilities in order to delete the threads, this is
+ // done by scheduleDoGC() for convenience (because GC already
+ // needs to acquire all the capabilities). We can't kill
+ // threads involved in foreign calls.
+ //
+ // * sched_state := SCHED_INTERRUPTED
+ //
+ // * somebody calls shutdownHaskell(), which calls exitScheduler()
+ //
+ // * sched_state := SCHED_SHUTTING_DOWN
+ //
+ // * all workers exit when the run queue on their capability
+ // drains. All main threads will also exit when their TSO
+ // reaches the head of the run queue and they can return.
//
- // Test for interruption. If interrupted==rtsTrue, then either
- // we received a keyboard interrupt (^C), or the scheduler is
- // trying to shut down all the tasks (shutting_down_scheduler) in
- // the threaded RTS.
+ // * eventually all Capabilities will shut down, and the RTS can
+ // exit.
//
- if (interrupted) {
- deleteRunQueue(cap);
-#if defined(SMP)
+ // * We might be left with threads blocked in foreign calls,
+ // we should really attempt to kill these somehow (TODO);
+
+ switch (sched_state) {
+ case SCHED_RUNNING:
+ break;
+ case SCHED_INTERRUPTING:
+ IF_DEBUG(scheduler, sched_belch("SCHED_INTERRUPTING"));
+#if defined(THREADED_RTS)
discardSparksCap(cap);
#endif
- if (shutting_down_scheduler) {
- IF_DEBUG(scheduler, sched_belch("shutting down"));
- // If we are a worker, just exit. If we're a bound thread
- // then we will exit below when we've removed our TSO from
- // the run queue.
- if (task->tso == NULL && emptyRunQueue(cap)) {
- return cap;
- }
- } else {
- IF_DEBUG(scheduler, sched_belch("interrupted"));
+ /* scheduleDoGC() deletes all the threads */
+ cap = scheduleDoGC(cap,task,rtsFalse,GetRoots);
+ break;
+ case SCHED_INTERRUPTED:
+ IF_DEBUG(scheduler, sched_belch("SCHED_INTERRUPTED"));
+ break;
+ case SCHED_SHUTTING_DOWN:
+ IF_DEBUG(scheduler, sched_belch("SCHED_SHUTTING_DOWN"));
+ // If we are a worker, just exit. If we're a bound thread
+ // then we will exit below when we've removed our TSO from
+ // the run queue.
+ if (task->tso == NULL && emptyRunQueue(cap)) {
+ return cap;
}
+ break;
+ default:
+ barf("sched_state: %d", sched_state);
}
-#if defined(SMP)
+#if defined(THREADED_RTS)
// If the run queue is empty, take a spark and turn it into a thread.
{
if (emptyRunQueue(cap)) {
}
}
}
-#endif // SMP
+#endif // THREADED_RTS
scheduleStartSignalHandlers(cap);
scheduleCheckBlockedThreads(cap);
scheduleDetectDeadlock(cap,task);
+#if defined(THREADED_RTS)
+ cap = task->cap; // reload cap, it might have changed
+#endif
// Normally, the only way we can get here with no threads to
// run is if a keyboard interrupt received during
// as a result of a console event having been delivered.
if ( emptyRunQueue(cap) ) {
#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
- ASSERT(interrupted);
+ ASSERT(sched_state >= SCHED_INTERRUPTING);
#endif
continue; // nothing to do
}
// ----------------------------------------------------------------------
// Run the current thread
+ ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
+
prev_what_next = t->what_next;
errno = t->saved_errno;
cap->in_haskell = rtsTrue;
+ dirtyTSO(t);
+
recent_activity = ACTIVITY_YES;
switch (prev_what_next) {
// be running again, see code below.
t->saved_errno = errno;
-#ifdef SMP
+#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
// blocked, we are in limbo - the TSO is now owned by whatever it
// is blocked on, and may in fact already have been woken up,
// immediately and return to normaility.
if (ret == ThreadBlocked) {
IF_DEBUG(scheduler,
- debugBelch("--<< thread %d (%s) stopped: blocked\n",
- t->id, whatNext_strs[t->what_next]));
+ sched_belch("--<< thread %d (%s) stopped: blocked\n",
+ t->id, whatNext_strs[t->what_next]));
continue;
}
#endif
}
if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
- if (ready_to_gc) { scheduleDoGC(cap,task,rtsFalse); }
+ if (ready_to_gc) {
+ cap = scheduleDoGC(cap,task,rtsFalse,GetRoots);
+ }
} /* end of while() */
IF_PAR_DEBUG(verbose,
* Push work to other Capabilities if we have some.
* -------------------------------------------------------------------------- */
-#ifdef SMP
+#if defined(THREADED_RTS)
static void
-schedulePushWork(Capability *cap USED_WHEN_SMP,
- Task *task USED_WHEN_SMP)
+schedulePushWork(Capability *cap USED_IF_THREADS,
+ Task *task USED_IF_THREADS)
{
Capability *free_caps[n_capabilities], *cap0;
nat i, n_free_caps;
* ------------------------------------------------------------------------- */
static void
-scheduleCheckBlockedThreads(Capability *cap USED_WHEN_NON_THREADED_RTS)
+scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
{
#if !defined(THREADED_RTS)
//
{
#if defined(PARALLEL_HASKELL)
- // ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
+ // ToDo: add deadlock detection in GUM (similar to THREADED_RTS) -- HWL
return;
#endif
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
- scheduleDoGC( cap, task, rtsTrue/*force major GC*/ );
+ cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/, GetRoots);
+
recent_activity = ACTIVITY_DONE_GC;
if ( !emptyRunQueue(cap) ) return;
}
// either we have threads to run, or we were interrupted:
- ASSERT(!emptyRunQueue(cap) || interrupted);
+ ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
}
#endif
if (cap->r.rCurrentNursery->u.back != NULL) {
cap->r.rCurrentNursery->u.back->link = bd;
} else {
-#if !defined(SMP)
+#if !defined(THREADED_RTS)
ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
g0s0 == cap->r.rNursery);
#endif
// has tidied up its stack and placed itself on whatever queue
// it needs to be on.
-#if !defined(SMP)
+#if !defined(THREADED_RTS)
ASSERT(t->why_blocked != NotBlocked);
- // This might not be true under SMP: we don't have
+ // This might not be true under THREADED_RTS: we don't have
// exclusive access to this TSO, so someone might have
// woken it up by now. This actually happens: try
// conc023 +RTS -N2.
if (task->ret) {
*(task->ret) = NULL;
}
- if (interrupted) {
+ if (sched_state >= SCHED_INTERRUPTING) {
task->stat = Interrupted;
} else {
task->stat = Killed;
if (performHeapProfile ||
(RtsFlags.ProfFlags.profileInterval==0 &&
RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
+
+ // checking black holes is necessary before GC, otherwise
+ // there may be threads that are unreachable except by the
+ // blackhole queue, which the GC will consider to be
+ // deadlocked.
+ scheduleCheckBlackHoles(&MainCapability);
+
+ IF_DEBUG(scheduler, sched_belch("garbage collecting before heap census"));
GarbageCollect(GetRoots, rtsTrue);
+
+ IF_DEBUG(scheduler, sched_belch("performing heap census"));
heapCensus();
+
performHeapProfile = rtsFalse;
return rtsTrue; // true <=> we already GC'd
}
* Perform a garbage collection if necessary
* -------------------------------------------------------------------------- */
-static void
-scheduleDoGC( Capability *cap, Task *task USED_WHEN_SMP, rtsBool force_major )
+static Capability *
+scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS,
+ rtsBool force_major, void (*get_roots)(evac_fn))
{
StgTSO *t;
-#ifdef SMP
+#ifdef THREADED_RTS
static volatile StgWord waiting_for_gc;
rtsBool was_waiting;
nat i;
#endif
-#ifdef SMP
+#ifdef THREADED_RTS
// In order to GC, there must be no threads running Haskell code.
// Therefore, the GC thread needs to hold *all* the capabilities,
// and release them after the GC has completed.
if (was_waiting) {
do {
IF_DEBUG(scheduler, sched_belch("someone else is trying to GC..."));
- yieldCapability(&cap,task);
+ if (cap) yieldCapability(&cap,task);
} while (waiting_for_gc);
- return;
+ return cap; // NOTE: task->cap might have changed here
}
for (i=0; i < n_capabilities; i++) {
// ATOMICALLY_FRAME, aborting the (nested)
// transaction, and saving the stack of any
// partially-evaluated thunks on the heap.
- raiseAsync_(cap, t, NULL, rtsTrue, NULL);
+ raiseAsync_(&capabilities[0], t, NULL, rtsTrue, NULL);
#ifdef REG_R1
ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
}
// so this happens periodically:
- scheduleCheckBlackHoles(cap);
+ if (cap) scheduleCheckBlackHoles(cap);
IF_DEBUG(scheduler, printAllThreads());
+ /*
+ * We now have all the capabilities; if we're in an interrupting
+ * state, then we should take the opportunity to delete all the
+ * threads in the system.
+ */
+ if (sched_state >= SCHED_INTERRUPTING) {
+ deleteAllThreads(&capabilities[0]);
+ sched_state = SCHED_INTERRUPTED;
+ }
+
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
* to do it in another thread. Either way, we need to
#if defined(THREADED_RTS)
IF_DEBUG(scheduler,sched_belch("doing GC"));
#endif
- GarbageCollect(GetRoots, force_major);
+ GarbageCollect(get_roots, force_major);
-#if defined(SMP)
+#if defined(THREADED_RTS)
// release our stash of capabilities.
for (i = 0; i < n_capabilities; i++) {
if (cap != &capabilities[i]) {
releaseCapability(&capabilities[i]);
}
}
- task->cap = cap;
+ if (cap) {
+ task->cap = cap;
+ } else {
+ task->cap = NULL;
+ }
#endif
#if defined(GRAN)
G_EVENTQ(0);
G_CURR_THREADQ(0));
#endif /* GRAN */
+
+ return cap;
}
/* ---------------------------------------------------------------------------
* ------------------------------------------------------------------------- */
StgBool
-isThreadBound(StgTSO* tso USED_WHEN_THREADED_RTS)
+isThreadBound(StgTSO* tso USED_IF_THREADS)
{
#if defined(THREADED_RTS)
return (tso->bound != NULL);
* Singleton fork(). Do not copy any running threads.
* ------------------------------------------------------------------------- */
-#if !defined(mingw32_HOST_OS) && !defined(SMP)
+#if !defined(mingw32_HOST_OS)
#define FORKPROCESS_PRIMOP_SUPPORTED
#endif
#ifdef FORKPROCESS_PRIMOP_SUPPORTED
static void
-deleteThreadImmediately(Capability *cap, StgTSO *tso);
+deleteThread_(Capability *cap, StgTSO *tso);
#endif
StgInt
forkProcess(HsStablePtr *entry
StgTSO* t,*next;
Capability *cap;
+#if defined(THREADED_RTS)
+ if (RtsFlags.ParFlags.nNodes > 1) {
+ errorBelch("forking not supported with +RTS -N<n> greater than 1");
+ stg_exit(EXIT_FAILURE);
+ }
+#endif
+
IF_DEBUG(scheduler,sched_belch("forking!"));
// ToDo: for SMP, we should probably acquire *all* the capabilities
} else { // child
- // delete all threads
- cap->run_queue_hd = END_TSO_QUEUE;
- cap->run_queue_tl = END_TSO_QUEUE;
-
+ // Now, all OS threads except the thread that forked are
+ // stopped. We need to stop all Haskell threads, including
+ // those involved in foreign calls. Also we need to delete
+ // all Tasks, because they correspond to OS threads that are
+ // now gone.
+
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
- next = t->link;
-
- // don't allow threads to catch the ThreadKilled exception
- deleteThreadImmediately(cap,t);
+ next = t->global_link;
+ // don't allow threads to catch the ThreadKilled
+ // exception, but we do want to raiseAsync() because these
+ // threads may be evaluating thunks that we need later.
+ deleteThread_(cap,t);
}
- // wipe the task list
+ // Empty the run queue. It seems tempting to let all the
+ // killed threads stay on the run queue as zombies to be
+ // cleaned up later, but some of them correspond to bound
+ // threads for which the corresponding Task does not exist.
+ cap->run_queue_hd = END_TSO_QUEUE;
+ cap->run_queue_tl = END_TSO_QUEUE;
+
+ // Any suspended C-calling Tasks are no more, their OS threads
+ // don't exist now:
+ cap->suspended_ccalling_tasks = NULL;
+
+ // Empty the all_threads list. Otherwise, the garbage
+ // collector may attempt to resurrect some of these threads.
+ all_threads = END_TSO_QUEUE;
+
+ // Wipe the task list, except the current Task.
ACQUIRE_LOCK(&sched_mutex);
for (task = all_tasks; task != NULL; task=task->all_link) {
- if (task != cap->running_task) discardTask(task);
+ if (task != cap->running_task) {
+ discardTask(task);
+ }
}
RELEASE_LOCK(&sched_mutex);
- cap->suspended_ccalling_tasks = NULL;
-
#if defined(THREADED_RTS)
- // wipe our spare workers list.
+ // Wipe our spare workers list, they no longer exist. New
+ // workers will be created if necessary.
cap->spare_workers = NULL;
cap->returning_tasks_hd = NULL;
cap->returning_tasks_tl = NULL;
}
/* ---------------------------------------------------------------------------
- * Delete the threads on the run queue of the current capability.
+ * Delete all the threads in the system
* ------------------------------------------------------------------------- */
static void
-deleteRunQueue (Capability *cap)
+deleteAllThreads ( Capability *cap )
{
- StgTSO *t, *next;
- for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = next) {
- ASSERT(t->what_next != ThreadRelocated);
- next = t->link;
- deleteThread(cap, t);
- }
-}
+ StgTSO* t, *next;
+ IF_DEBUG(scheduler,sched_belch("deleting all threads"));
+ for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+ if (t->what_next == ThreadRelocated) {
+ next = t->link;
+ } else {
+ next = t->global_link;
+ deleteThread(cap,t);
+ }
+ }
-/* startThread and insertThread are now in GranSim.c -- HWL */
+ // The run queue now contains a bunch of ThreadKilled threads. We
+ // must not throw these away: the main thread(s) will be in there
+ // somewhere, and the main scheduler loop has to deal with it.
+ // Also, the run queue is the only thing keeping these threads from
+ // being GC'd, and we don't want the "main thread has been GC'd" panic.
+#if !defined(THREADED_RTS)
+ ASSERT(blocked_queue_hd == END_TSO_QUEUE);
+ ASSERT(sleeping_queue == END_TSO_QUEUE);
+#endif
+}
/* -----------------------------------------------------------------------------
Managing the suspended_ccalling_tasks list.
cap->in_haskell = rtsTrue;
errno = saved_errno;
+ /* We might have GC'd, mark the TSO dirty again */
+ dirtyTSO(tso);
+
+ IF_DEBUG(sanity, checkTSO(tso));
+
return &cap->r;
}
tso->why_blocked = NotBlocked;
tso->blocked_exceptions = NULL;
+ tso->flags = TSO_DIRTY;
tso->saved_errno = 0;
tso->bound = NULL;
all_threads = END_TSO_QUEUE;
context_switch = 0;
- interrupted = 0;
+ sched_state = SCHED_RUNNING;
RtsFlags.ConcFlags.ctxtSwitchTicks =
RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS;
/* A capability holds the state a native thread needs in
* order to execute STG code. At least one capability is
- * floating around (only SMP builds have more than one).
+ * floating around (only THREADED_RTS builds have more than one).
*/
initCapabilities();
initTaskManager();
-#if defined(SMP) || defined(PARALLEL_HASKELL)
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
initSparkPools();
#endif
-#if defined(SMP)
+#if defined(THREADED_RTS)
/*
* Eagerly start one worker to run each Capability, except for
* Capability 0. The idea is that we're probably going to start a
void
exitScheduler( void )
{
- interrupted = rtsTrue;
- shutting_down_scheduler = rtsTrue;
+ Task *task = NULL;
+
+#if defined(THREADED_RTS)
+ ACQUIRE_LOCK(&sched_mutex);
+ task = newBoundTask();
+ RELEASE_LOCK(&sched_mutex);
+#endif
+
+ // If we haven't killed all the threads yet, do it now.
+ if (sched_state < SCHED_INTERRUPTED) {
+ sched_state = SCHED_INTERRUPTING;
+ scheduleDoGC(NULL,task,rtsFalse,GetRoots);
+ }
+ sched_state = SCHED_SHUTTING_DOWN;
#if defined(THREADED_RTS)
{
- Task *task;
nat i;
- ACQUIRE_LOCK(&sched_mutex);
- task = newBoundTask();
- RELEASE_LOCK(&sched_mutex);
-
for (i = 0; i < n_capabilities; i++) {
shutdownCapability(&capabilities[i], task);
}
for (task = cap->suspended_ccalling_tasks; task != NULL;
task=task->next) {
+ IF_DEBUG(scheduler,sched_belch("evac'ing suspended TSO %d", task->suspended_tso->id));
evac((StgClosure **)&task->suspended_tso);
}
}
#if !defined(THREADED_RTS)
- evac((StgClosure **)&blocked_queue_hd);
- evac((StgClosure **)&blocked_queue_tl);
- evac((StgClosure **)&sleeping_queue);
+ evac((StgClosure **)(void *)&blocked_queue_hd);
+ evac((StgClosure **)(void *)&blocked_queue_tl);
+ evac((StgClosure **)(void *)&sleeping_queue);
#endif
#endif
- evac((StgClosure **)&blackhole_queue);
+ // evac((StgClosure **)&blackhole_queue);
-#if defined(SMP) || defined(PARALLEL_HASKELL) || defined(GRAN)
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
markSparkQueue(evac);
#endif
static void (*extra_roots)(evac_fn);
+static void
+performGC_(rtsBool force_major, void (*get_roots)(evac_fn))
+{
+ Task *task = myTask();
+
+ if (task == NULL) {
+ ACQUIRE_LOCK(&sched_mutex);
+ task = newBoundTask();
+ RELEASE_LOCK(&sched_mutex);
+ scheduleDoGC(NULL,task,force_major, get_roots);
+ boundTaskExiting(task);
+ } else {
+ scheduleDoGC(NULL,task,force_major, get_roots);
+ }
+}
+
void
performGC(void)
{
-#ifdef THREADED_RTS
- // ToDo: we have to grab all the capabilities here.
- errorBelch("performGC not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
- /* Obligated to hold this lock upon entry */
- GarbageCollect(GetRoots,rtsFalse);
+ performGC_(rtsFalse, GetRoots);
}
void
performMajorGC(void)
{
-#ifdef THREADED_RTS
- errorBelch("performMayjorGC not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
- GarbageCollect(GetRoots,rtsTrue);
+ performGC_(rtsTrue, GetRoots);
}
static void
void
performGCWithRoots(void (*get_roots)(evac_fn))
{
-#ifdef THREADED_RTS
- errorBelch("performGCWithRoots not supported in threaded RTS (yet)");
- stg_exit(EXIT_FAILURE);
-#endif
extra_roots = get_roots;
- GarbageCollect(AllRoots,rtsFalse);
+ performGC_(rtsFalse, AllRoots);
}
/* -----------------------------------------------------------------------------
void
interruptStgRts(void)
{
- interrupted = 1;
+ sched_state = SCHED_INTERRUPTING;
context_switch = 1;
#if defined(THREADED_RTS)
prodAllCapabilities();
tso->why_blocked = NotBlocked;
tso->block_info.closure = NULL;
appendToRunQueue(cap,tso);
+
+ // We might have just migrated this TSO to our Capability:
+ if (tso->bound) {
+ tso->bound->cap = cap;
+ }
}
#endif
* CATCH_FRAME on the stack. In either case, we strip the entire
* stack and replace the thread with a zombie.
*
- * ToDo: in SMP mode, this function is only safe if either (a) we hold
- * all the Capabilities (eg. in GC), or (b) we own the Capability that
- * the TSO is currently blocked on or on the run queue of.
+ * ToDo: in THREADED_RTS mode, this function is only safe if either
+ * (a) we hold all the Capabilities (eg. in GC, or if there is only
+ * one Capability), or (b) we own the Capability that the TSO is
+ * currently blocked on or on the run queue of.
*
* -------------------------------------------------------------------------- */
// Remove it from any blocking queues
unblockThread(cap,tso);
+ // mark it dirty; we're about to change its stack.
+ dirtyTSO(tso);
+
sp = tso->sp;
// The stack freezing code assumes there's a closure pointer on
// we've got an exception to raise, so let's pass it to the
// handler in this frame.
//
- raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+MIN_UPD_SIZE);
+ raise = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
TICK_ALLOC_SE_THK(1,0);
SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
raise->payload[0] = exception;
#ifdef FORKPROCESS_PRIMOP_SUPPORTED
static void
-deleteThreadImmediately(Capability *cap, StgTSO *tso)
+deleteThread_(Capability *cap, StgTSO *tso)
{ // for forkProcess only:
- // delete thread without giving it a chance to catch the KillThread exception
+ // like deleteThread(), but we delete threads in foreign calls, too.
- if (tso->what_next == ThreadComplete || tso->what_next == ThreadKilled) {
- return;
- }
-
- if (tso->why_blocked != BlockedOnCCall &&
- tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
- unblockThread(cap,tso);
- }
-
- tso->what_next = ThreadKilled;
+ if (tso->why_blocked == BlockedOnCCall ||
+ tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
+ unblockOne(cap,tso);
+ tso->what_next = ThreadKilled;
+ } else {
+ deleteThread(cap,tso);
+ }
}
#endif
// thunks which are currently under evaluataion.
//
- //
+ // OLD COMMENT (we don't have MIN_UPD_SIZE now):
// LDV profiling: stg_raise_info has THUNK as its closure
// type. Since a THUNK takes at least MIN_UPD_SIZE words in its
// payload, MIN_UPD_SIZE is more approprate than 1. It seems that
// Only create raise_closure if we need to.
if (raise_closure == NULL) {
raise_closure =
- (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+MIN_UPD_SIZE);
+ (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
SET_HDR(raise_closure, &stg_raise_info, CCCS);
raise_closure->payload[0] = exception;
}
va_list ap;
va_start(ap,s);
#ifdef THREADED_RTS
- debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId());
+ debugBelch("sched (task %p, pid %d): ", (void *)(unsigned long)(unsigned int)osThreadId(), getpid());
#elif defined(PARALLEL_HASKELL)
debugBelch("== ");
#else