* --------------------------------------------------------------------------*/
#include "PosixSource.h"
+#define KEEP_LOCKCLOSURE
#include "Rts.h"
#include "SchedAPI.h"
#include "RtsUtils.h"
#include "ThreadLabels.h"
#include "LdvProfile.h"
#include "Updates.h"
-#ifdef PROFILING
#include "Proftimer.h"
#include "ProfHeap.h"
-#endif
#if defined(GRAN) || defined(PARALLEL_HASKELL)
# include "GranSimRts.h"
# include "GranSim.h"
static void scheduleHandleThreadBlocked( StgTSO *t );
static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
-static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
+static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
static Capability *scheduleDoGC(Capability *cap, Task *task,
rtsBool force_major);
debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
(long)t->id, whatNext_strs[t->what_next]);
-#if defined(PROFILING)
startHeapProfTimer();
-#endif
// Check for exceptions blocked on this thread
maybePerformBlockedException (cap, t);
dirtyTSO(t);
- recent_activity = ACTIVITY_YES;
+#if defined(THREADED_RTS)
+ if (recent_activity == ACTIVITY_DONE_GC) {
+ // ACTIVITY_DONE_GC means we turned off the timer signal to
+ // conserve power (see #1623). Re-enable it here.
+ nat prev;
+ prev = xchg(&recent_activity, ACTIVITY_YES);
+ if (prev == ACTIVITY_DONE_GC) {
+ startTimer();
+ }
+ } else {
+ recent_activity = ACTIVITY_YES;
+ }
+#endif
switch (prev_what_next) {
// ----------------------------------------------------------------------
// Costs for the scheduler are assigned to CCS_SYSTEM
-#if defined(PROFILING)
stopHeapProfTimer();
+#if defined(PROFILING)
CCCS = CCS_SYSTEM;
#endif
barf("schedule: invalid thread return code %d", (int)ret);
}
- if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
- if (ready_to_gc) {
+ if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
cap = scheduleDoGC(cap,task,rtsFalse);
}
} /* end of while() */
cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
recent_activity = ACTIVITY_DONE_GC;
+ // disable timer signals (see #1623)
+ stopTimer();
if ( !emptyRunQueue(cap) ) return;
debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished",
(unsigned long)t->id, whatNext_strs[t->what_next]);
- /* Inform the Hpc that a thread has finished */
- hs_hpc_thread_finished_event(t);
-
#if defined(GRAN)
endThread(t, CurrentProc); // clean-up the thread
#elif defined(PARALLEL_HASKELL)
}
/* -----------------------------------------------------------------------------
- * Perform a heap census, if PROFILING
+ * Perform a heap census
* -------------------------------------------------------------------------- */
static rtsBool
-scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED )
+scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
{
-#if defined(PROFILING)
// When we have +RTS -i0 and we're heap profiling, do a census at
// every GC. This lets us get repeatable runs for debugging.
if (performHeapProfile ||
(RtsFlags.ProfFlags.profileInterval==0 &&
RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
-
- // checking black holes is necessary before GC, otherwise
- // there may be threads that are unreachable except by the
- // blackhole queue, which the GC will consider to be
- // deadlocked.
- scheduleCheckBlackHoles(&MainCapability);
-
- debugTrace(DEBUG_sched, "garbage collecting before heap census");
- GarbageCollect(rtsTrue);
-
- debugTrace(DEBUG_sched, "performing heap census");
- heapCensus();
-
- performHeapProfile = rtsFalse;
- return rtsTrue; // true <=> we already GC'd
+ return rtsTrue;
+ } else {
+ return rtsFalse;
}
-#endif
- return rtsFalse;
}
/* -----------------------------------------------------------------------------
scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
{
StgTSO *t;
+ rtsBool heap_census;
#ifdef THREADED_RTS
static volatile StgWord waiting_for_gc;
rtsBool was_waiting;
deleteAllThreads(&capabilities[0]);
sched_state = SCHED_SHUTTING_DOWN;
}
+
+ heap_census = scheduleNeedHeapProfile(rtsTrue);
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
#if defined(THREADED_RTS)
debugTrace(DEBUG_sched, "doing GC");
#endif
- GarbageCollect(force_major);
+ GarbageCollect(force_major || heap_census);
+ if (heap_census) {
+ debugTrace(DEBUG_sched, "performing heap census");
+ heapCensus();
+ performHeapProfile = rtsFalse;
+ }
+
#if defined(THREADED_RTS)
// release our stash of capabilities.
for (i = 0; i < n_capabilities; i++) {
// ToDo: for SMP, we should probably acquire *all* the capabilities
cap = rts_lock();
+ // no funny business: hold locks while we fork, otherwise if some
+ // other thread is holding a lock when the fork happens, the data
+ // structure protected by the lock will forever be in an
+ // inconsistent state in the child. See also #1391.
+ ACQUIRE_LOCK(&sched_mutex);
+ ACQUIRE_LOCK(&cap->lock);
+ ACQUIRE_LOCK(&cap->running_task->lock);
+
pid = fork();
if (pid) { // parent
+ RELEASE_LOCK(&sched_mutex);
+ RELEASE_LOCK(&cap->lock);
+ RELEASE_LOCK(&cap->running_task->lock);
+
// just return the pid
rts_unlock(cap);
return pid;
} else { // child
+#if defined(THREADED_RTS)
+ initMutex(&sched_mutex);
+ initMutex(&cap->lock);
+ initMutex(&cap->running_task->lock);
+#endif
+
// Now, all OS threads except the thread that forked are
// stopped. We need to stop all Haskell threads, including
// those involved in foreign calls. Also we need to delete
ACQUIRE_LOCK(&sched_mutex);
for (task = all_tasks; task != NULL; task=task->all_link) {
if (task != cap->running_task) {
+#if defined(THREADED_RTS)
+ initMutex(&task->lock); // see #1391
+#endif
discardTask(task);
}
}
// On Unix, all timers are reset in the child, so we need to start
// the timer again.
+ initTimer();
startTimer();
cap = rts_evalStableIO(cap, entry, NULL); // run the action
context_switch = 0;
sched_state = SCHED_RUNNING;
+ recent_activity = ACTIVITY_YES;
#if defined(THREADED_RTS)
/* Initialise the mutex and condition variables used by
}
void
-exitScheduler( void )
+exitScheduler(
+ rtsBool wait_foreign
+#if !defined(THREADED_RTS)
+ __attribute__((unused))
+#endif
+)
+ /* see Capability.c, shutdownCapability() */
{
Task *task = NULL;
nat i;
for (i = 0; i < n_capabilities; i++) {
- shutdownCapability(&capabilities[i], task);
+ shutdownCapability(&capabilities[i], task, wait_foreign);
}
boundTaskExiting(task);
stopTaskManager();
// while we are moving the TSO:
lockClosure((StgClosure *)tso);
- if (tso->stack_size >= tso->max_stack_size) {
+ if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
+ // NB. never raise a StackOverflow exception if the thread is
+ // inside Control.Exceptino.block. It is impractical to protect
+ // against stack overflow exceptions, since virtually anything
+ // can raise one (even 'catch'), so this is the only sensible
+ // thing to do here. See bug #767.
debugTrace(DEBUG_gc,
"threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
"increasing stack size from %ld words to %d.",
(long)tso->stack_size, new_stack_size);
- dest = (StgTSO *)allocate(new_tso_size);
+ dest = (StgTSO *)allocateLocal(cap,new_tso_size);
TICK_ALLOC_TSO(new_stack_size,0);
/* copy the TSO block and the old stack into the new area */
t = blackhole_queue;
while (t != END_TSO_QUEUE) {
ASSERT(t->why_blocked == BlockedOnBlackHole);
- type = get_itbl(t->block_info.closure)->type;
+ type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
IF_DEBUG(sanity,checkTSO(t));
t = unblockOne(cap, t);
return CATCH_RETRY_FRAME;
case CATCH_STM_FRAME: {
- debugTrace(DEBUG_stm,
- "found CATCH_STM_FRAME at %p during retry", p);
StgTRecHeader *trec = tso -> trec;
StgTRecHeader *outer = stmGetEnclosingTRec(trec);
+ debugTrace(DEBUG_stm,
+ "found CATCH_STM_FRAME at %p during retry", p);
debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
stmAbortTransaction(tso -> cap, trec);
stmFreeAbortedTRec(tso -> cap, trec);