#include "Interpreter.h"
#include "Printer.h"
#include "RtsSignals.h"
-#include "Sanity.h"
+#include "sm/Sanity.h"
#include "Stats.h"
#include "STM.h"
#include "Prelude.h"
static rtsBool checkBlackHoles(Capability *cap);
static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
-static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
+static StgTSO *threadStackUnderflow(Capability *cap, Task *task, StgTSO *tso);
static void deleteThread (Capability *cap, StgTSO *tso);
static void deleteAllThreads (Capability *cap);
schedulePostRunThread(cap,t);
if (ret != StackOverflow) {
- t = threadStackUnderflow(task,t);
+ t = threadStackUnderflow(cap,task,t);
}
ready_to_gc = rtsFalse;
{
bdescr *x;
for (x = bd; x < bd + blocks; x++) {
- x->step = cap->r.rNursery;
- x->gen_no = 0;
+ initBdescr(x,cap->r.rNursery);
+ x->free = x->start;
x->flags = 0;
}
}
}
#endif
- IF_DEBUG(sanity,
- //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
- checkTSO(t));
ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
return rtsTrue;
}
+ IF_DEBUG(sanity,
+ //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
+ checkTSO(t));
+
addToRunQueue(cap,t);
return rtsFalse;
// GC thread each.
waitForGcThreads(cap);
}
+
+#else /* !THREADED_RTS */
+
+ // do this while the other Capabilities stop:
+ if (cap) scheduleCheckBlackHoles(cap);
+
#endif
IF_DEBUG(scheduler, printAllThreads());
initTimer();
startTimer();
+#if defined(THREADED_RTS)
+ cap = ioManagerStartCap(cap);
+#endif
+
cap = rts_evalStableIO(cap, entry, NULL); // run the action
rts_checkSchedStatus("forkProcess",cap);
for (i = 0; i < n_capabilities; i++) {
shutdownCapability(&capabilities[i], task, wait_foreign);
}
- boundTaskExiting(task);
}
#endif
+
+ boundTaskExiting(task);
}
void
// while we are moving the TSO:
lockClosure((StgClosure *)tso);
- if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
+ if (tso->stack_size >= tso->max_stack_size
+ && !(tso->flags & TSO_BLOCKEX)) {
// NB. never raise a StackOverflow exception if the thread is
// inside Control.Exceptino.block. It is impractical to protect
// against stack overflow exceptions, since virtually anything
// can raise one (even 'catch'), so this is the only sensible
// thing to do here. See bug #767.
+ //
+
+ if (tso->flags & TSO_SQUEEZED) {
+ return tso;
+ }
+ // #3677: In a stack overflow situation, stack squeezing may
+ // reduce the stack size, but we don't know whether it has been
+ // reduced enough for the stack check to succeed if we try
+ // again. Fortunately stack squeezing is idempotent, so all we
+ // need to do is record whether *any* squeezing happened. If we
+ // are at the stack's absolute -K limit, and stack squeezing
+ // happened, then we try running the thread again. The
+ // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
+ // squeezing happened or not.
debugTrace(DEBUG_gc,
"threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
return tso;
}
+
+ // We also want to avoid enlarging the stack if squeezing has
+ // already released some of it. However, we don't want to get into
+ // a pathalogical situation where a thread has a nearly full stack
+ // (near its current limit, but not near the absolute -K limit),
+ // keeps allocating a little bit, squeezing removes a little bit,
+ // and then it runs again. So to avoid this, if we squeezed *and*
+ // there is still less than BLOCK_SIZE_W words free, then we enlarge
+ // the stack anyway.
+ if ((tso->flags & TSO_SQUEEZED) &&
+ ((W_)(tso->sp - tso->stack) >= BLOCK_SIZE_W)) {
+ unlockTSO(tso);
+ return tso;
+ }
+
/* Try to double the current stack size. If that takes us over the
* maximum stack size for this thread, then use the maximum instead
* (that is, unless we're already at or over the max size and we
"increasing stack size from %ld words to %d.",
(long)tso->stack_size, new_stack_size);
- dest = (StgTSO *)allocateLocal(cap,new_tso_size);
+ dest = (StgTSO *)allocate(cap,new_tso_size);
TICK_ALLOC_TSO(new_stack_size,0);
/* copy the TSO block and the old stack into the new area */
}
static StgTSO *
-threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
+threadStackUnderflow (Capability *cap, Task *task, StgTSO *tso)
{
bdescr *bd, *new_bd;
lnat free_w, tso_size_w;
memcpy(new_tso,tso,TSO_STRUCT_SIZE);
new_tso->stack_size = new_bd->free - new_tso->stack;
+ // The original TSO was dirty and probably on the mutable
+ // list. The new TSO is not yet on the mutable list, so we better
+ // put it there.
+ new_tso->dirty = 0;
+ new_tso->flags &= !TSO_LINK_DIRTY;
+ dirty_TSO(cap, new_tso);
+
debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
(long)tso->id, tso_size_w, tso_sizeW(new_tso));
// Only create raise_closure if we need to.
if (raise_closure == NULL) {
raise_closure =
- (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
+ (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
SET_HDR(raise_closure, &stg_raise_info, CCCS);
raise_closure->payload[0] = exception;
}