#include "ProfHeap.h"
#include "GC.h"
#include "Weak.h"
+#include "EventLog.h"
/* PARALLEL_HASKELL includes go here */
if (running_finalizers) {
errorBelch("error: a C finalizer called back into Haskell.\n"
- " use Foreign.Concurrent.newForeignPtr for Haskell finalizers.");
+ " This was previously allowed, but is disallowed in GHC 6.10.2 and later.\n"
+ " To create finalizers that may call back into Haskll, use\n"
+ " Foreign.Concurrent.newForeignPtr instead of Foreign.newForeignPtr.");
stg_exit(EXIT_FAILURE);
}
}
#endif
+ postEvent(cap, EVENT_RUN_THREAD, t->id, 0);
+
switch (prev_what_next) {
case ThreadKilled:
t->saved_winerror = GetLastError();
#endif
+ postEvent (cap, EVENT_STOP_THREAD, t->id, ret);
+
#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
// blocked, we are in limbo - the TSO is now owned by whatever it
} else {
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
+
+ postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
+
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
i++;
spark = tryStealSpark(cap->sparks);
if (spark != NULL) {
debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
+
+ postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no);
+
newSpark(&(free_caps[i]->r), spark);
}
}
"--<< thread %ld (%s) stopped: HeapOverflow",
(long)t->id, whatNext_strs[t->what_next]);
- if (cap->context_switch) {
+ if (cap->r.rHpLim == NULL || cap->context_switch) {
// Sometimes we miss a context switch, e.g. when calling
// primitives in a tight loop, MAYBE_GC() doesn't check the
// context switch flag, and we end up waiting for a GC.
if (gc_type == PENDING_GC_SEQ)
{
+ postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
// single-threaded GC: grab all the capabilities
for (i=0; i < n_capabilities; i++) {
debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
{
// multi-threaded GC: make sure all the Capabilities donate one
// GC thread each.
+ postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
waitForGcThreads(cap);
heap_census = scheduleNeedHeapProfile(rtsTrue);
+#if defined(THREADED_RTS)
+ postEvent(cap, EVENT_GC_START, 0, 0);
+ debugTrace(DEBUG_sched, "doing GC");
+ // reset waiting_for_gc *before* GC, so that when the GC threads
+ // emerge they don't immediately re-enter the GC.
+ waiting_for_gc = 0;
+ GarbageCollect(force_major || heap_census, gc_type, cap);
+#else
+ GarbageCollect(force_major || heap_census, 0, cap);
+#endif
+ postEvent(cap, EVENT_GC_END, 0, 0);
+
if (recent_activity == ACTIVITY_INACTIVE && force_major)
{
// We are doing a GC because the system has been idle for a
// timeslice and we need to check for deadlock. Record the
// fact that we've done a GC and turn off the timer signal;
// it will get re-enabled if we run any threads after the GC.
- //
- // Note: this is done before GC, because after GC there might
- // be threads already running (GarbageCollect() releases the
- // GC threads when it completes), so we risk turning off the
- // timer signal when it should really be on.
recent_activity = ACTIVITY_DONE_GC;
stopTimer();
}
+ else
+ {
+ // the GC might have taken long enough for the timer to set
+ // recent_activity = ACTIVITY_INACTIVE, but we aren't
+ // necessarily deadlocked:
+ recent_activity = ACTIVITY_YES;
+ }
#if defined(THREADED_RTS)
- debugTrace(DEBUG_sched, "doing GC");
- // reset waiting_for_gc *before* GC, so that when the GC threads
- // emerge they don't immediately re-enter the GC.
- waiting_for_gc = 0;
- GarbageCollect(force_major || heap_census, gc_type, cap);
-#else
- GarbageCollect(force_major || heap_census, 0, cap);
+ if (gc_type == PENDING_GC_PAR)
+ {
+ releaseGCThreads(cap);
+ }
#endif
if (heap_census) {
task = cap->running_task;
tso = cap->r.rCurrentTSO;
+ postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL);
debugTrace(DEBUG_sched,
"thread %lu did a safe foreign call",
(unsigned long)cap->r.rCurrentTSO->id);
tso = task->suspended_tso;
task->suspended_tso = NULL;
tso->_link = END_TSO_QUEUE; // no write barrier reqd
+
+ postEvent(cap, EVENT_RUN_THREAD, tso->id, 0);
debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
if (tso->why_blocked == BlockedOnCCall) {
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
+ postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no);
wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
}
#else
cap = task->cap;
RELEASE_LOCK(&task->lock);
+ if (RtsFlags.ParFlags.setAffinity) {
+ setThreadAffinity(cap->no, n_capabilities);
+ }
+
// set the thread-local pointer to the Task:
taskEnter(task);
}
#endif
- trace(TRACE_sched, "start: %d capabilities", n_capabilities);
-
RELEASE_LOCK(&sched_mutex);
}
{
Task *task = NULL;
- ACQUIRE_LOCK(&sched_mutex);
task = newBoundTask();
- RELEASE_LOCK(&sched_mutex);
// If we haven't killed all the threads yet, do it now.
if (sched_state < SCHED_SHUTTING_DOWN) {
// We must grab a new Task here, because the existing Task may be
// associated with a particular Capability, and chained onto the
// suspended_ccalling_tasks queue.
- ACQUIRE_LOCK(&sched_mutex);
task = newBoundTask();
- RELEASE_LOCK(&sched_mutex);
waitForReturnCapability(&task->cap,task);
scheduleDoGC(task->cap,task,force_major);
prev = &blackhole_queue;
t = blackhole_queue;
while (t != END_TSO_QUEUE) {
+ if (t->what_next == ThreadRelocated) {
+ t = t->_link;
+ continue;
+ }
ASSERT(t->why_blocked == BlockedOnBlackHole);
type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
if (type != BLACKHOLE && type != CAF_BLACKHOLE) {