recent_activity = ACTIVITY_YES;
}
+ if (heap_census) {
+ debugTrace(DEBUG_sched, "performing heap census");
+ heapCensus();
+ performHeapProfile = rtsFalse;
+ }
+
#if defined(THREADED_RTS)
if (gc_type == PENDING_GC_PAR)
{
}
#endif
- if (heap_census) {
- debugTrace(DEBUG_sched, "performing heap census");
- heapCensus();
- performHeapProfile = rtsFalse;
- }
-
if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
// GC set the heap_overflow flag, so we should proceed with
// an orderly shutdown now. Ultimately we want the main
void
scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
{
-#if defined(THREADED_RTS)
tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
// move this thread from now on.
+#if defined(THREADED_RTS)
cpu %= RtsFlags.ParFlags.nNodes;
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
#endif
}
+void markScheduler (evac_fn evac USED_IF_NOT_THREADS,
+ void *user USED_IF_NOT_THREADS)
+{
+#if !defined(THREADED_RTS)
+ evac(user, (StgClosure **)(void *)&blocked_queue_hd);
+ evac(user, (StgClosure **)(void *)&blocked_queue_tl);
+ evac(user, (StgClosure **)(void *)&sleeping_queue);
+#endif
+}
+
/* -----------------------------------------------------------------------------
performGC