projects
/
ghc-hetmet.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Make setInlineActivation left-associative
[ghc-hetmet.git]
/
rts
/
Schedule.c
diff --git
a/rts/Schedule.c
b/rts/Schedule.c
index
bb36f9b
..
cfdb392
100644
(file)
--- a/
rts/Schedule.c
+++ b/
rts/Schedule.c
@@
-17,7
+17,7
@@
#include "Interpreter.h"
#include "Printer.h"
#include "RtsSignals.h"
#include "Interpreter.h"
#include "Printer.h"
#include "RtsSignals.h"
-#include "Sanity.h"
+#include "sm/Sanity.h"
#include "Stats.h"
#include "STM.h"
#include "Prelude.h"
#include "Stats.h"
#include "STM.h"
#include "Prelude.h"
@@
-162,7
+162,7
@@
static Capability *scheduleDoGC(Capability *cap, Task *task,
static rtsBool checkBlackHoles(Capability *cap);
static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
static rtsBool checkBlackHoles(Capability *cap);
static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
-static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
+static StgTSO *threadStackUnderflow(Capability *cap, Task *task, StgTSO *tso);
static void deleteThread (Capability *cap, StgTSO *tso);
static void deleteAllThreads (Capability *cap);
static void deleteThread (Capability *cap, StgTSO *tso);
static void deleteAllThreads (Capability *cap);
@@
-468,7
+468,7
@@
run_thread:
}
#endif
}
#endif
- traceSchedEvent(cap, EVENT_RUN_THREAD, t, 0);
+ traceEventRunThread(cap, t);
switch (prev_what_next) {
switch (prev_what_next) {
@@
-518,7
+518,7
@@
run_thread:
t->saved_winerror = GetLastError();
#endif
t->saved_winerror = GetLastError();
#endif
- traceSchedEvent (cap, EVENT_STOP_THREAD, t, ret);
+ traceEventStopThread(cap, t, ret);
#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
#if defined(THREADED_RTS)
// If ret is ThreadBlocked, and this Task is bound to the TSO that
@@
-547,7
+547,7
@@
run_thread:
schedulePostRunThread(cap,t);
if (ret != StackOverflow) {
schedulePostRunThread(cap,t);
if (ret != StackOverflow) {
- t = threadStackUnderflow(task,t);
+ t = threadStackUnderflow(cap,task,t);
}
ready_to_gc = rtsFalse;
}
ready_to_gc = rtsFalse;
@@
-778,7
+778,7
@@
schedulePushWork(Capability *cap USED_IF_THREADS,
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
appendToRunQueue(free_caps[i],t);
- traceSchedEvent (cap, EVENT_MIGRATE_THREAD, t, free_caps[i]->no);
+ traceEventMigrateThread (cap, t, free_caps[i]->no);
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
if (t->bound) { t->bound->cap = free_caps[i]; }
t->cap = free_caps[i];
@@
-802,7
+802,7
@@
schedulePushWork(Capability *cap USED_IF_THREADS,
if (spark != NULL) {
debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
if (spark != NULL) {
debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
- traceSchedEvent(free_caps[i], EVENT_STEAL_SPARK, t, cap->no);
+ traceEventStealSpark(free_caps[i], t, cap->no);
newSpark(&(free_caps[i]->r), spark);
}
newSpark(&(free_caps[i]->r), spark);
}
@@
-1118,7
+1118,7
@@
scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
{
bdescr *x;
for (x = bd; x < bd + blocks; x++) {
{
bdescr *x;
for (x = bd; x < bd + blocks; x++) {
- initBdescr(x,cap->r.rNursery);
+ initBdescr(x,g0,g0);
x->free = x->start;
x->flags = 0;
}
x->free = x->start;
x->flags = 0;
}
@@
-1206,9
+1206,6
@@
scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
}
#endif
}
#endif
- IF_DEBUG(sanity,
- //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
- checkTSO(t));
ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
@@
-1218,6
+1215,10
@@
scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
return rtsTrue;
}
return rtsTrue;
}
+ IF_DEBUG(sanity,
+ //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
+ checkTSO(t));
+
addToRunQueue(cap,t);
return rtsFalse;
addToRunQueue(cap,t);
return rtsFalse;
@@
-1377,7
+1378,7
@@
scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
if (sched_state < SCHED_INTERRUPTING
&& RtsFlags.ParFlags.parGcEnabled
&& N >= RtsFlags.ParFlags.parGcGen
if (sched_state < SCHED_INTERRUPTING
&& RtsFlags.ParFlags.parGcEnabled
&& N >= RtsFlags.ParFlags.parGcGen
- && ! oldest_gen->steps[0].mark)
+ && ! oldest_gen->mark)
{
gc_type = PENDING_GC_PAR;
} else {
{
gc_type = PENDING_GC_PAR;
} else {
@@
-1417,11
+1418,11
@@
scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
if (gc_type == PENDING_GC_SEQ)
{
if (gc_type == PENDING_GC_SEQ)
{
- traceSchedEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
+ traceEventRequestSeqGc(cap);
}
else
{
}
else
{
- traceSchedEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
+ traceEventRequestParGc(cap);
debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
}
debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
}
@@
-1477,8
+1478,8
@@
delete_threads_and_gc:
heap_census = scheduleNeedHeapProfile(rtsTrue);
heap_census = scheduleNeedHeapProfile(rtsTrue);
+ traceEventGcStart(cap);
#if defined(THREADED_RTS)
#if defined(THREADED_RTS)
- traceSchedEvent(cap, EVENT_GC_START, 0, 0);
// reset waiting_for_gc *before* GC, so that when the GC threads
// emerge they don't immediately re-enter the GC.
waiting_for_gc = 0;
// reset waiting_for_gc *before* GC, so that when the GC threads
// emerge they don't immediately re-enter the GC.
waiting_for_gc = 0;
@@
-1486,7
+1487,7
@@
delete_threads_and_gc:
#else
GarbageCollect(force_major || heap_census, 0, cap);
#endif
#else
GarbageCollect(force_major || heap_census, 0, cap);
#endif
- traceSchedEvent(cap, EVENT_GC_END, 0, 0);
+ traceEventGcEnd(cap);
if (recent_activity == ACTIVITY_INACTIVE && force_major)
{
if (recent_activity == ACTIVITY_INACTIVE && force_major)
{
@@
-1579,7
+1580,7
@@
forkProcess(HsStablePtr *entry
pid_t pid;
StgTSO* t,*next;
Capability *cap;
pid_t pid;
StgTSO* t,*next;
Capability *cap;
- nat s;
+ nat g;
#if defined(THREADED_RTS)
if (RtsFlags.ParFlags.nNodes > 1) {
#if defined(THREADED_RTS)
if (RtsFlags.ParFlags.nNodes > 1) {
@@
-1627,8
+1628,8
@@
forkProcess(HsStablePtr *entry
// all Tasks, because they correspond to OS threads that are
// now gone.
// all Tasks, because they correspond to OS threads that are
// now gone.
- for (s = 0; s < total_steps; s++) {
- for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
next = t->_link;
} else {
if (t->what_next == ThreadRelocated) {
next = t->_link;
} else {
@@
-1654,8
+1655,8
@@
forkProcess(HsStablePtr *entry
// Empty the threads lists. Otherwise, the garbage
// collector may attempt to resurrect some of these threads.
// Empty the threads lists. Otherwise, the garbage
// collector may attempt to resurrect some of these threads.
- for (s = 0; s < total_steps; s++) {
- all_steps[s].threads = END_TSO_QUEUE;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ generations[g].threads = END_TSO_QUEUE;
}
// Wipe the task list, except the current Task.
}
// Wipe the task list, except the current Task.
@@
-1709,19
+1710,19
@@
deleteAllThreads ( Capability *cap )
// NOTE: only safe to call if we own all capabilities.
StgTSO* t, *next;
// NOTE: only safe to call if we own all capabilities.
StgTSO* t, *next;
- nat s;
+ nat g;
debugTrace(DEBUG_sched,"deleting all threads");
debugTrace(DEBUG_sched,"deleting all threads");
- for (s = 0; s < total_steps; s++) {
- for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
- if (t->what_next == ThreadRelocated) {
- next = t->_link;
- } else {
- next = t->global_link;
- deleteThread(cap,t);
- }
- }
- }
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
+ if (t->what_next == ThreadRelocated) {
+ next = t->_link;
+ } else {
+ next = t->global_link;
+ deleteThread(cap,t);
+ }
+ }
+ }
// The run queue now contains a bunch of ThreadKilled threads. We
// must not throw these away: the main thread(s) will be in there
// The run queue now contains a bunch of ThreadKilled threads. We
// must not throw these away: the main thread(s) will be in there
@@
-1805,7
+1806,7
@@
suspendThread (StgRegTable *reg)
task = cap->running_task;
tso = cap->r.rCurrentTSO;
task = cap->running_task;
tso = cap->r.rCurrentTSO;
- traceSchedEvent(cap, EVENT_STOP_THREAD, tso, THREAD_SUSPENDED_FOREIGN_CALL);
+ traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
// XXX this might not be necessary --SDM
tso->what_next = ThreadRunGHC;
// XXX this might not be necessary --SDM
tso->what_next = ThreadRunGHC;
@@
-1868,7
+1869,7
@@
resumeThread (void *task_)
task->suspended_tso = NULL;
tso->_link = END_TSO_QUEUE; // no write barrier reqd
task->suspended_tso = NULL;
tso->_link = END_TSO_QUEUE; // no write barrier reqd
- traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next);
+ traceEventRunThread(cap, tso);
if (tso->why_blocked == BlockedOnCCall) {
// avoid locking the TSO if we don't have to
if (tso->why_blocked == BlockedOnCCall) {
// avoid locking the TSO if we don't have to
@@
-1924,7
+1925,7
@@
scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
- traceSchedEvent (cap, EVENT_MIGRATE_THREAD, tso, capabilities[cpu].no);
+ traceEventMigrateThread (cap, tso, capabilities[cpu].no);
wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
}
#else
wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
}
#else
@@
-2200,6
+2201,7
@@
threadStackOverflow(Capability *cap, StgTSO *tso)
//
if (tso->flags & TSO_SQUEEZED) {
//
if (tso->flags & TSO_SQUEEZED) {
+ unlockTSO(tso);
return tso;
}
// #3677: In a stack overflow situation, stack squeezing may
return tso;
}
// #3677: In a stack overflow situation, stack squeezing may
@@
-2262,7
+2264,7
@@
threadStackOverflow(Capability *cap, StgTSO *tso)
"increasing stack size from %ld words to %d.",
(long)tso->stack_size, new_stack_size);
"increasing stack size from %ld words to %d.",
(long)tso->stack_size, new_stack_size);
- dest = (StgTSO *)allocateLocal(cap,new_tso_size);
+ dest = (StgTSO *)allocate(cap,new_tso_size);
TICK_ALLOC_TSO(new_stack_size,0);
/* copy the TSO block and the old stack into the new area */
TICK_ALLOC_TSO(new_stack_size,0);
/* copy the TSO block and the old stack into the new area */
@@
-2299,7
+2301,7
@@
threadStackOverflow(Capability *cap, StgTSO *tso)
}
static StgTSO *
}
static StgTSO *
-threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
+threadStackUnderflow (Capability *cap, Task *task, StgTSO *tso)
{
bdescr *bd, *new_bd;
lnat free_w, tso_size_w;
{
bdescr *bd, *new_bd;
lnat free_w, tso_size_w;
@@
-2337,6
+2339,13
@@
threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
memcpy(new_tso,tso,TSO_STRUCT_SIZE);
new_tso->stack_size = new_bd->free - new_tso->stack;
memcpy(new_tso,tso,TSO_STRUCT_SIZE);
new_tso->stack_size = new_bd->free - new_tso->stack;
+ // The original TSO was dirty and probably on the mutable
+ // list. The new TSO is not yet on the mutable list, so we better
+ // put it there.
+ new_tso->dirty = 0;
+ new_tso->flags &= ~TSO_LINK_DIRTY;
+ dirty_TSO(cap, new_tso);
+
debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
(long)tso->id, tso_size_w, tso_sizeW(new_tso));
debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
(long)tso->id, tso_size_w, tso_sizeW(new_tso));
@@
-2533,7
+2542,7
@@
raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
// Only create raise_closure if we need to.
if (raise_closure == NULL) {
raise_closure =
// Only create raise_closure if we need to.
if (raise_closure == NULL) {
raise_closure =
- (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
+ (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
SET_HDR(raise_closure, &stg_raise_info, CCCS);
raise_closure->payload[0] = exception;
}
SET_HDR(raise_closure, &stg_raise_info, CCCS);
raise_closure->payload[0] = exception;
}
@@
-2647,14
+2656,14
@@
resurrectThreads (StgTSO *threads)
{
StgTSO *tso, *next;
Capability *cap;
{
StgTSO *tso, *next;
Capability *cap;
- step *step;
+ generation *gen;
for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
next = tso->global_link;
for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
next = tso->global_link;
- step = Bdescr((P_)tso)->step;
- tso->global_link = step->threads;
- step->threads = tso;
+ gen = Bdescr((P_)tso)->gen;
+ tso->global_link = gen->threads;
+ gen->threads = tso;
debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
@@
-2711,7
+2720,7
@@
performPendingThrowTos (StgTSO *threads)
StgTSO *tso, *next;
Capability *cap;
Task *task, *saved_task;;
StgTSO *tso, *next;
Capability *cap;
Task *task, *saved_task;;
- step *step;
+ generation *gen;
task = myTask();
cap = task->cap;
task = myTask();
cap = task->cap;
@@
-2719,9
+2728,9
@@
performPendingThrowTos (StgTSO *threads)
for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
next = tso->global_link;
for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
next = tso->global_link;
- step = Bdescr((P_)tso)->step;
- tso->global_link = step->threads;
- step->threads = tso;
+ gen = Bdescr((P_)tso)->gen;
+ tso->global_link = gen->threads;
+ gen->threads = tso;
debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);
debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);