projects
/
ghc-hetmet.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix build on Windows
[ghc-hetmet.git]
/
rts
/
Capability.c
diff --git
a/rts/Capability.c
b/rts/Capability.c
index
95050ba
..
c37ec4e
100644
(file)
--- a/
rts/Capability.c
+++ b/
rts/Capability.c
@@
-98,7
+98,7
@@
findSpark (Capability *cap)
cap->sparks_converted++;
// Post event for running a spark from capability's own pool.
cap->sparks_converted++;
// Post event for running a spark from capability's own pool.
- traceSchedEvent(cap, EVENT_RUN_SPARK, cap->r.rCurrentTSO, 0);
+ traceEventRunSpark(cap, cap->r.rCurrentTSO);
return spark;
}
return spark;
}
@@
-132,8
+132,7
@@
findSpark (Capability *cap)
if (spark != NULL) {
cap->sparks_converted++;
if (spark != NULL) {
cap->sparks_converted++;
- traceSchedEvent(cap, EVENT_STEAL_SPARK,
- cap->r.rCurrentTSO, robbed->no);
+ traceEventStealSpark(cap, cap->r.rCurrentTSO, robbed->no);
return spark;
}
return spark;
}
@@
-213,7
+212,6
@@
initCapability( Capability *cap, nat i )
cap->no = i;
cap->in_haskell = rtsFalse;
cap->no = i;
cap->in_haskell = rtsFalse;
- cap->in_gc = rtsFalse;
cap->run_queue_hd = END_TSO_QUEUE;
cap->run_queue_tl = END_TSO_QUEUE;
cap->run_queue_hd = END_TSO_QUEUE;
cap->run_queue_tl = END_TSO_QUEUE;
@@
-253,6
+251,7
@@
initCapability( Capability *cap, nat i )
cap->free_trec_headers = NO_TREC;
cap->transaction_tokens = 0;
cap->context_switch = 0;
cap->free_trec_headers = NO_TREC;
cap->transaction_tokens = 0;
cap->context_switch = 0;
+ cap->pinned_object_block = NULL;
}
/* ---------------------------------------------------------------------------
}
/* ---------------------------------------------------------------------------
@@
-395,7
+394,10
@@
releaseCapability_ (Capability* cap,
// give this Capability to the appropriate Task.
if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
// Make sure we're not about to try to wake ourselves up
// give this Capability to the appropriate Task.
if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
// Make sure we're not about to try to wake ourselves up
- ASSERT(task != cap->run_queue_hd->bound);
+ // ASSERT(task != cap->run_queue_hd->bound);
+ // assertion is false: in schedule() we force a yield after
+ // ThreadBlocked, but the thread may be back on the run queue
+ // by now.
task = cap->run_queue_hd->bound;
giveCapabilityToTask(cap,task);
return;
task = cap->run_queue_hd->bound;
giveCapabilityToTask(cap,task);
return;
@@
-579,9
+581,9
@@
yieldCapability (Capability** pCap, Task *task)
Capability *cap = *pCap;
if (waiting_for_gc == PENDING_GC_PAR) {
Capability *cap = *pCap;
if (waiting_for_gc == PENDING_GC_PAR) {
- traceSchedEvent(cap, EVENT_GC_START, 0, 0);
+ traceEventGcStart(cap);
gcWorkerThread(cap);
gcWorkerThread(cap);
- traceSchedEvent(cap, EVENT_GC_END, 0, 0);
+ traceEventGcEnd(cap);
return;
}
return;
}
@@
-788,7
+790,7
@@
shutdownCapability (Capability *cap, Task *task, rtsBool safe)
continue;
}
continue;
}
- traceSchedEvent(cap, EVENT_SHUTDOWN, 0, 0);
+ traceEventShutdown(cap);
RELEASE_LOCK(&cap->lock);
break;
}
RELEASE_LOCK(&cap->lock);
break;
}