#if defined(mingw32_HOST_OS)
#include "win32/IOManager.h"
#endif
+#include "Trace.h"
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
// The sched_mutex is *NOT* held
// NB. on return, we still hold a capability.
- IF_DEBUG(scheduler,
- sched_belch("### NEW SCHEDULER LOOP (task: %p, cap: %p)",
- task, initialCapability);
- );
+ debugTrace (DEBUG_sched,
+ "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
+ task, initialCapability);
schedulePreLoop();
// needs to acquire all the capabilities). We can't kill
// threads involved in foreign calls.
//
- // * sched_state := SCHED_INTERRUPTED
- //
// * somebody calls shutdownHaskell(), which calls exitScheduler()
//
// * sched_state := SCHED_SHUTTING_DOWN
case SCHED_RUNNING:
break;
case SCHED_INTERRUPTING:
- IF_DEBUG(scheduler, sched_belch("SCHED_INTERRUPTING"));
+ debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
#if defined(THREADED_RTS)
discardSparksCap(cap);
#endif
/* scheduleDoGC() deletes all the threads */
cap = scheduleDoGC(cap,task,rtsFalse,GetRoots);
break;
- case SCHED_INTERRUPTED:
- IF_DEBUG(scheduler, sched_belch("SCHED_INTERRUPTED"));
- break;
case SCHED_SHUTTING_DOWN:
- IF_DEBUG(scheduler, sched_belch("SCHED_SHUTTING_DOWN"));
+ debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
// If we are a worker, just exit. If we're a bound thread
// then we will exit below when we've removed our TSO from
// the run queue.
StgClosure *spark;
spark = findSpark(cap);
if (spark != NULL) {
- IF_DEBUG(scheduler,
- sched_belch("turning spark of closure %p into a thread",
- (StgClosure *)spark));
+ debugTrace(DEBUG_sched,
+ "turning spark of closure %p into a thread",
+ (StgClosure *)spark);
createSparkThread(cap,spark);
}
}
if (bound) {
if (bound == task) {
- IF_DEBUG(scheduler,
- sched_belch("### Running thread %d in bound thread",
- t->id));
+ debugTrace(DEBUG_sched,
+ "### Running thread %d in bound thread", t->id);
// yes, the Haskell thread is bound to the current native thread
} else {
- IF_DEBUG(scheduler,
- sched_belch("### thread %d bound to another OS thread",
- t->id));
+ debugTrace(DEBUG_sched,
+ "### thread %d bound to another OS thread", t->id);
// no, bound to a different Haskell thread: pass to that thread
pushOnRunQueue(cap,t);
continue;
} else {
// The thread we want to run is unbound.
if (task->tso) {
- IF_DEBUG(scheduler,
- sched_belch("### this OS thread cannot run thread %d", t->id));
+ debugTrace(DEBUG_sched,
+ "### this OS thread cannot run thread %d", t->id);
// no, the current native thread is bound to a different
// Haskell thread, so pass it to any worker thread
pushOnRunQueue(cap,t);
run_thread:
- IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...",
- (long)t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched, "-->> running thread %ld %s ...",
+ (long)t->id, whatNext_strs[t->what_next]);
#if defined(PROFILING)
startHeapProfTimer();
// that task->cap != cap. We better yield this Capability
// immediately and return to normaility.
if (ret == ThreadBlocked) {
- IF_DEBUG(scheduler,
- sched_belch("--<< thread %d (%s) stopped: blocked\n",
- t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched,
+ "--<< thread %d (%s) stopped: blocked",
+ t->id, whatNext_strs[t->what_next]);
continue;
}
#endif
CCCS = CCS_SYSTEM;
#endif
-#if defined(THREADED_RTS)
- IF_DEBUG(scheduler,debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()););
-#elif !defined(GRAN) && !defined(PARALLEL_HASKELL)
- IF_DEBUG(scheduler,debugBelch("sched: "););
-#endif
-
schedulePostRunThread();
ready_to_gc = rtsFalse;
}
} /* end of while() */
- IF_PAR_DEBUG(verbose,
- debugBelch("== Leaving schedule() after having received Finish\n"));
+ debugTrace(PAR_DEBUG_verbose,
+ "== Leaving schedule() after having received Finish");
}
/* ----------------------------------------------------------------------------
ContinueThread,
CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
- IF_DEBUG(gran,
- debugBelch("GRAN: Init CurrentTSO (in schedule) = %p\n",
- CurrentTSO);
- G_TSO(CurrentTSO, 5));
+ debugTrace (DEBUG_gran,
+ "GRAN: Init CurrentTSO (in schedule) = %p",
+ CurrentTSO);
+ IF_DEBUG(gran, G_TSO(CurrentTSO, 5));
if (RtsFlags.GranFlags.Light) {
/* Save current time; GranSim Light only */
StgTSO *prev, *t, *next;
rtsBool pushed_to_all;
- IF_DEBUG(scheduler, sched_belch("excess threads on run queue and %d free capabilities, sharing...", n_free_caps));
+ debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
i = 0;
pushed_to_all = rtsFalse;
prev->link = t;
prev = t;
} else {
- IF_DEBUG(scheduler, sched_belch("pushing thread %d to capability %d", t->id, free_caps[i]->no));
+ debugTrace(DEBUG_sched, "pushing thread %d to capability %d", t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
if (emptySparkPoolCap(free_caps[i])) {
spark = findSpark(cap);
if (spark != NULL) {
- IF_DEBUG(scheduler, sched_belch("pushing spark %p to capability %d", spark, free_caps[i]->no));
+ debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
newSpark(&(free_caps[i]->r), spark);
}
}
if (recent_activity != ACTIVITY_INACTIVE) return;
#endif
- IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
+ debugTrace(DEBUG_sched, "deadlocked, forcing major GC...");
// Garbage collection can release some new threads due to
// either (a) finalizers or (b) threads resurrected because
* deadlock.
*/
if ( anyUserHandlers() ) {
- IF_DEBUG(scheduler,
- sched_belch("still deadlocked, waiting for signals..."));
+ debugTrace(DEBUG_sched,
+ "still deadlocked, waiting for signals...");
awaitUserSignals();
case ThreadBlocked:
# if defined(GRAN)
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
- t->id, t, whatNext_strs[t->what_next], t->block_info.closure,
- (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
+ t->id, t, whatNext_strs[t->what_next], t->block_info.closure,
+ (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
if (t->block_info.closure!=(StgClosure*)NULL)
print_bq(t->block_info.closure);
debugBelch("\n"));
blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
- (long)t->id, whatNext_strs[t->what_next], blocks));
-
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
+ (long)t->id, whatNext_strs[t->what_next], blocks);
+
// don't do this if the nursery is (nearly) full, we'll GC first.
if (cap->r.rCurrentNursery->link != NULL ||
cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
}
}
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
- (long)t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped: HeapOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
#elif defined(PARALLEL_HASKELL)
static void
scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
{
- IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n",
- (long)t->id, whatNext_strs[t->what_next]));
+ debugTrace (DEBUG_sched,
+ "--<< thread %ld (%s) stopped, StackOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+
/* just adjust the stack for this thread, then pop it back
* on the run queue.
*/
* up the GC thread. getThread will block during a GC until the
* GC is finished.
*/
- IF_DEBUG(scheduler,
- if (t->what_next != prev_what_next) {
- debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n",
- (long)t->id, whatNext_strs[t->what_next]);
- } else {
- debugBelch("--<< thread %ld (%s) stopped, yielding\n",
- (long)t->id, whatNext_strs[t->what_next]);
- }
- );
+#ifdef DEBUG
+ if (t->what_next != prev_what_next) {
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped to switch evaluators\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+ } else {
+ debugTrace(DEBUG_sched,
+ "--<< thread %ld (%s) stopped, yielding\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+ }
+#endif
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
// conc023 +RTS -N2.
#endif
- IF_DEBUG(scheduler,
- debugBelch("--<< thread %d (%s) stopped: ",
- t->id, whatNext_strs[t->what_next]);
- printThreadBlockage(t);
- debugBelch("\n"));
+#ifdef DEBUG
+ if (traceClass(DEBUG_sched)) {
+ debugTraceBegin("--<< thread %d (%s) stopped: ",
+ t->id, whatNext_strs[t->what_next]);
+ printThreadBlockage(t);
+ debugTraceEnd();
+ }
+#endif
/* Only for dumping event to log file
ToDo: do I need this in GranSim, too?
* We also end up here if the thread kills itself with an
* uncaught exception, see Exception.cmm.
*/
- IF_DEBUG(scheduler,debugBelch("--++ thread %d (%s) finished\n",
- t->id, whatNext_strs[t->what_next]));
+ debugTrace(DEBUG_sched, "--++ thread %d (%s) finished",
+ t->id, whatNext_strs[t->what_next]);
#if defined(GRAN)
endThread(t, CurrentProc); // clean-up the thread
// deadlocked.
scheduleCheckBlackHoles(&MainCapability);
- IF_DEBUG(scheduler, sched_belch("garbage collecting before heap census"));
+ debugTrace(DEBUG_sched, "garbage collecting before heap census");
GarbageCollect(GetRoots, rtsTrue);
- IF_DEBUG(scheduler, sched_belch("performing heap census"));
+ debugTrace(DEBUG_sched, "performing heap census");
heapCensus();
performHeapProfile = rtsFalse;
was_waiting = cas(&waiting_for_gc, 0, 1);
if (was_waiting) {
do {
- IF_DEBUG(scheduler, sched_belch("someone else is trying to GC..."));
+ debugTrace(DEBUG_sched, "someone else is trying to GC...");
if (cap) yieldCapability(&cap,task);
} while (waiting_for_gc);
return cap; // NOTE: task->cap might have changed here
}
for (i=0; i < n_capabilities; i++) {
- IF_DEBUG(scheduler, sched_belch("ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities));
+ debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
if (cap != &capabilities[i]) {
Capability *pcap = &capabilities[i];
// we better hope this task doesn't get migrated to
next = t->global_link;
if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
if (!stmValidateNestOfTransactions (t -> trec)) {
- IF_DEBUG(stm, sched_belch("trec %p found wasting its time", t));
+ debugTrace(DEBUG_sched | DEBUG_stm,
+ "trec %p found wasting its time", t);
// strip the stack back to the
// ATOMICALLY_FRAME, aborting the (nested)
*/
if (sched_state >= SCHED_INTERRUPTING) {
deleteAllThreads(&capabilities[0]);
- sched_state = SCHED_INTERRUPTED;
+ sched_state = SCHED_SHUTTING_DOWN;
}
/* everybody back, start the GC.
* broadcast on gc_pending_cond afterward.
*/
#if defined(THREADED_RTS)
- IF_DEBUG(scheduler,sched_belch("doing GC"));
+ debugTrace(DEBUG_sched, "doing GC");
#endif
GarbageCollect(get_roots, force_major);
}
#endif
- IF_DEBUG(scheduler,sched_belch("forking!"));
+ debugTrace(DEBUG_sched, "forking!");
// ToDo: for SMP, we should probably acquire *all* the capabilities
cap = rts_lock();
deleteAllThreads ( Capability *cap )
{
StgTSO* t, *next;
- IF_DEBUG(scheduler,sched_belch("deleting all threads"));
+ debugTrace(DEBUG_sched,"deleting all threads");
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
next = t->link;
task = cap->running_task;
tso = cap->r.rCurrentTSO;
- IF_DEBUG(scheduler,
- sched_belch("thread %d did a safe foreign call", cap->r.rCurrentTSO->id));
+ debugTrace(DEBUG_sched,
+ "thread %d did a safe foreign call",
+ cap->r.rCurrentTSO->id);
// XXX this might not be necessary --SDM
tso->what_next = ThreadRunGHC;
/* Preparing to leave the RTS, so ensure there's a native thread/task
waiting to take over.
*/
- IF_DEBUG(scheduler, sched_belch("thread %d: leaving RTS", tso->id));
+ debugTrace(DEBUG_sched, "thread %d: leaving RTS", tso->id);
#endif
errno = saved_errno;
tso = task->suspended_tso;
task->suspended_tso = NULL;
tso->link = END_TSO_QUEUE;
- IF_DEBUG(scheduler, sched_belch("thread %d: re-entering RTS", tso->id));
+ debugTrace(DEBUG_sched, "thread %d: re-entering RTS", tso->id);
if (tso->why_blocked == BlockedOnCCall) {
awakenBlockedQueue(cap,tso->blocked_exceptions);
#endif
#if defined(GRAN)
- IF_GRAN_DEBUG(pri,
- sched_belch("==__ schedule: Created TSO %d (%p);",
- CurrentProc, tso, tso->id));
+ debugTrace(GRAN_DEBUG_pri,
+ "==__ schedule: Created TSO %d (%p);",
+ CurrentProc, tso, tso->id);
#elif defined(PARALLEL_HASKELL)
- IF_PAR_DEBUG(verbose,
- sched_belch("==__ schedule: Created TSO %d (%p); %d threads active",
- (long)tso->id, tso, advisory_thread_count));
+ debugTrace(PAR_DEBUG_verbose,
+ "==__ schedule: Created TSO %d (%p); %d threads active",
+ (long)tso->id, tso, advisory_thread_count);
#else
- IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
- (long)tso->id, (long)tso->stack_size));
+ debugTrace(DEBUG_sched,
+ "created thread %ld, stack size = %lx words",
+ (long)tso->id, (long)tso->stack_size);
#endif
return tso;
}
appendToRunQueue(cap,tso);
- IF_DEBUG(scheduler, sched_belch("new bound thread (%d)", tso->id));
+ debugTrace(DEBUG_sched, "new bound thread (%d)", tso->id);
#if defined(GRAN)
/* GranSim specific init */
ASSERT(task->stat != NoStatus);
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
- IF_DEBUG(scheduler, sched_belch("bound thread (%d) finished", task->tso->id));
+ debugTrace(DEBUG_sched, "bound thread (%d) finished", task->tso->id);
return cap;
}
// On exit from schedule(), we have a Capability.
releaseCapability(cap);
- taskStop(task);
+ workerTaskStop(task);
}
#endif
}
#endif
+ trace(TRACE_sched, "start: %d capabilities", n_capabilities);
+
RELEASE_LOCK(&sched_mutex);
}
#endif
// If we haven't killed all the threads yet, do it now.
- if (sched_state < SCHED_INTERRUPTED) {
+ if (sched_state < SCHED_SHUTTING_DOWN) {
sched_state = SCHED_INTERRUPTING;
scheduleDoGC(NULL,task,rtsFalse,GetRoots);
}
#endif
for (task = cap->suspended_ccalling_tasks; task != NULL;
task=task->next) {
- IF_DEBUG(scheduler,sched_belch("evac'ing suspended TSO %d", task->suspended_tso->id));
+ debugTrace(DEBUG_sched,
+ "evac'ing suspended TSO %d", task->suspended_tso->id);
evac((StgClosure **)(void *)&task->suspended_tso);
}
static void
performGC_(rtsBool force_major, void (*get_roots)(evac_fn))
{
- Task *task = myTask();
-
- if (task == NULL) {
- ACQUIRE_LOCK(&sched_mutex);
- task = newBoundTask();
- RELEASE_LOCK(&sched_mutex);
- scheduleDoGC(NULL,task,force_major, get_roots);
- boundTaskExiting(task);
- } else {
- scheduleDoGC(NULL,task,force_major, get_roots);
- }
+ Task *task;
+ // We must grab a new Task here, because the existing Task may be
+ // associated with a particular Capability, and chained onto the
+ // suspended_ccalling_tasks queue.
+ ACQUIRE_LOCK(&sched_mutex);
+ task = newBoundTask();
+ RELEASE_LOCK(&sched_mutex);
+ scheduleDoGC(NULL,task,force_major, get_roots);
+ boundTaskExiting(task);
}
void
IF_DEBUG(sanity,checkTSO(tso));
if (tso->stack_size >= tso->max_stack_size) {
- IF_DEBUG(gc,
- debugBelch("@@ threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)\n",
- (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
- /* If we're debugging, just print out the top of the stack */
- printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
- tso->sp+64)));
+ debugTrace(DEBUG_gc,
+ "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)\n",
+ (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
+ IF_DEBUG(gc,
+ /* If we're debugging, just print out the top of the stack */
+ printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
+ tso->sp+64)));
/* Send this thread the StackOverflow exception */
raiseAsync(cap, tso, (StgClosure *)stackOverflow_closure);
new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
- IF_DEBUG(scheduler, sched_belch("increasing stack size from %ld words to %d.\n", (long)tso->stack_size, new_stack_size));
+ debugTrace(DEBUG_sched,
+ "increasing stack size from %ld words to %d.\n",
+ (long)tso->stack_size, new_stack_size);
dest = (StgTSO *)allocate(new_tso_size);
TICK_ALLOC_TSO(new_stack_size,0);
(node_loc==tso_loc ? "Local" : "Global"),
tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
tso->block_info.closure = NULL;
- IF_DEBUG(scheduler,debugBelch("-- Waking up thread %ld (%p)\n",
- tso->id, tso));
+ debugTrace(DEBUG_sched, "-- waking up thread %ld (%p)\n",
+ tso->id, tso));
}
#elif defined(PARALLEL_HASKELL)
StgBlockingQueueElement *
context_switch = 1;
#endif
- IF_DEBUG(scheduler,sched_belch("waking up thread %ld on cap %d", (long)tso->id, tso->cap->no));
+ debugTrace(DEBUG_sched,
+ "waking up thread %ld on cap %d",
+ (long)tso->id, tso->cap->no);
+
return next;
}
{
sched_state = SCHED_INTERRUPTING;
context_switch = 1;
+ wakeUpRts();
+}
+
+/* -----------------------------------------------------------------------------
+ Wake up the RTS
+
+ This function causes at least one OS thread to wake up and run the
+ scheduler loop. It is invoked when the RTS might be deadlocked, or
+ an external event has arrived that may need servicing (eg. a
+ keyboard interrupt).
+
+ In the single-threaded RTS we don't do anything here; we only have
+ one thread anyway, and the event that caused us to want to wake up
+ will have interrupted any blocking system call in progress anyway.
+ -------------------------------------------------------------------------- */
+
+void
+wakeUpRts(void)
+{
#if defined(THREADED_RTS)
- prodAllCapabilities();
+#if !defined(mingw32_HOST_OS)
+ // This forces the IO Manager thread to wakeup, which will
+ // in turn ensure that some OS thread wakes up and runs the
+ // scheduler loop, which will cause a GC and deadlock check.
+ ioManagerWakeup();
+#else
+ // On Windows this might be safe enough, because we aren't
+ // in a signal handler. Later we should use the IO Manager,
+ // though.
+ prodOneCapability();
+#endif
#endif
}
// blackhole_queue is global:
ASSERT_LOCK_HELD(&sched_mutex);
- IF_DEBUG(scheduler, sched_belch("checking threads blocked on black holes"));
+ debugTrace(DEBUG_sched, "checking threads blocked on black holes");
// ASSUMES: sched_mutex
prev = &blackhole_queue;
return;
}
- IF_DEBUG(scheduler,
- sched_belch("raising exception in thread %ld.", (long)tso->id));
+ debugTrace(DEBUG_sched,
+ "raising exception in thread %ld.", (long)tso->id);
// Remove it from any blocking queues
unblockThread(cap,tso);
((StgClosure *)frame)->header.prof.ccs /* ToDo */);
TICK_ALLOC_UP_THK(words+1,0);
- IF_DEBUG(scheduler,
- debugBelch("sched: Updating ");
- printPtr((P_)((StgUpdateFrame *)frame)->updatee);
- debugBelch(" with ");
- printObj((StgClosure *)ap);
- );
+ //IF_DEBUG(scheduler,
+ // debugBelch("sched: Updating ");
+ // printPtr((P_)((StgUpdateFrame *)frame)->updatee);
+ // debugBelch(" with ");
+ // printObj((StgClosure *)ap);
+ // );
// Replace the updatee with an indirection
//
// whether the transaction is valid or not because its
// possible validity cannot have caused the exception
// and will not be visible after the abort.
- IF_DEBUG(stm,
- debugBelch("Found atomically block delivering async exception\n"));
+ debugTrace(DEBUG_stm,
+ "found atomically block delivering async exception");
+
StgTRecHeader *trec = tso -> trec;
StgTRecHeader *outer = stmGetEnclosingTRec(trec);
stmAbortTransaction(cap, trec);
continue;
case ATOMICALLY_FRAME:
- IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p\n", p));
+ debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
tso->sp = p;
return ATOMICALLY_FRAME;
return CATCH_FRAME;
case CATCH_STM_FRAME:
- IF_DEBUG(stm, debugBelch("Found CATCH_STM_FRAME at %p\n", p));
+ debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
tso->sp = p;
return CATCH_STM_FRAME;
switch (info->i.type) {
case ATOMICALLY_FRAME:
- IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p during retrry\n", p));
- tso->sp = p;
- return ATOMICALLY_FRAME;
+ debugTrace(DEBUG_stm,
+ "found ATOMICALLY_FRAME at %p during retrry", p);
+ tso->sp = p;
+ return ATOMICALLY_FRAME;
case CATCH_RETRY_FRAME:
- IF_DEBUG(stm, debugBelch("Found CATCH_RETRY_FRAME at %p during retrry\n", p));
- tso->sp = p;
- return CATCH_RETRY_FRAME;
+ debugTrace(DEBUG_stm,
+ "found CATCH_RETRY_FRAME at %p during retrry", p);
+ tso->sp = p;
+ return CATCH_RETRY_FRAME;
case CATCH_STM_FRAME:
default:
next = tso->global_link;
tso->global_link = all_threads;
all_threads = tso;
- IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id));
+ debugTrace(DEBUG_sched, "resurrecting thread %d", tso->id);
// Wake up the thread on the Capability it was last on
cap = tso->cap;
}
#endif
-void
-sched_belch(char *s, ...)
-{
- va_list ap;
- va_start(ap,s);
-#ifdef THREADED_RTS
- debugBelch("sched (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId());
-#elif defined(PARALLEL_HASKELL)
- debugBelch("== ");
-#else
- debugBelch("sched: ");
-#endif
- vdebugBelch(s, ap);
- debugBelch("\n");
- va_end(ap);
-}
-
#endif /* DEBUG */