CurrentTSO = event->tso;
#endif
- IF_DEBUG(scheduler, printAllThreads());
-
#if defined(RTS_SUPPORTS_THREADS)
// Yield the capability to higher-priority tasks if necessary.
//
barf("schedule: invalid what_next field");
}
+#if defined(SMP)
+ // in SMP mode, we might return with a different capability than
+ // we started with, if the Haskell thread made a foreign call. So
+ // let's find out what our current Capability is:
+ cap = myCapability();
+#endif
+
// We have run some Haskell code: there might be blackhole-blocked
// threads to wake up now.
if ( blackhole_queue != END_TSO_QUEUE ) {
case ThreadBlocked:
scheduleHandleThreadBlocked(t);
- threadPaused(t);
break;
case ThreadFinished:
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
+
GarbageCollect(GetRoots,rtsTrue);
recent_activity = ACTIVITY_DONE_GC;
if ( !EMPTY_RUN_QUEUE() ) return;
debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
(long)t->id, whatNext_strs[t->what_next], blocks));
- // don't do this if it would push us over the
- // alloc_blocks_lim limit; we'll GC first.
- if (alloc_blocks + blocks < alloc_blocks_lim) {
+ // don't do this if the nursery is (nearly) full, we'll GC first.
+ if (cap->r.rCurrentNursery->link != NULL ||
+ cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
+ // if the nursery has only one block.
- alloc_blocks += blocks;
bd = allocGroup( blocks );
+ cap->r.rNursery->n_blocks += blocks;
// link the new group into the list
bd->link = cap->r.rCurrentNursery;
#if !defined(SMP)
ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
g0s0 == cap->r.rNursery);
- g0s0->blocks = bd;
#endif
cap->r.rNursery->blocks = bd;
}
{
bdescr *x;
for (x = bd; x < bd + blocks; x++) {
- x->step = g0s0;
+ x->step = cap->r.rNursery;
x->gen_no = 0;
x->flags = 0;
}
}
-#if !defined(SMP)
- // don't forget to update the block count in g0s0.
- g0s0->n_blocks += blocks;
-
// This assert can be a killer if the app is doing lots
// of large block allocations.
- ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks);
-#endif
+ IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
// now update the nursery to point to the new block
cap->r.rCurrentNursery = bd;
}
}
- /* make all the running tasks block on a condition variable,
- * maybe set context_switch and wait till they all pile in,
- * then have them wait on a GC condition variable.
- */
IF_DEBUG(scheduler,
debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
(long)t->id, whatNext_strs[t->what_next]));
- threadPaused(t);
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
#elif defined(PARALLEL_HASKELL)
/* just adjust the stack for this thread, then pop it back
* on the run queue.
*/
- threadPaused(t);
{
/* enlarge the stack */
StgTSO *new_t = threadStackOverflow(t);
return rtsTrue;
}
- threadPaused(t);
-
#if defined(GRAN)
ASSERT(!is_on_queue(t,CurrentProc));
emitSchedule = rtsTrue;
#else /* !GRAN */
- /* don't need to do anything. Either the thread is blocked on
- * I/O, in which case we'll have called addToBlockedQueue
- * previously, or it's blocked on an MVar or Blackhole, in which
- * case it'll be on the relevant queue already.
- */
+
+ // We don't need to do anything. The thread is blocked, and it
+ // has tidied up its stack and placed itself on whatever queue
+ // it needs to be on.
+
+#if !defined(SMP)
ASSERT(t->why_blocked != NotBlocked);
+ // This might not be true under SMP: we don't have
+ // exclusive access to this TSO, so someone might have
+ // woken it up by now. This actually happens: try
+ // conc023 +RTS -N2.
+#endif
+
IF_DEBUG(scheduler,
debugBelch("--<< thread %d (%s) stopped: ",
t->id, whatNext_strs[t->what_next]);
// so this happens periodically:
scheduleCheckBlackHoles();
+ IF_DEBUG(scheduler, printAllThreads());
+
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
* to do it in another thread. Either way, we need to
StgTSO* t, *next;
IF_DEBUG(scheduler,sched_belch("deleting all threads"));
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
- deleteThread(t);
+ if (t->what_next == ThreadRelocated) {
+ next = t->link;
+ } else {
+ next = t->global_link;
+ deleteThread(t);
+ }
}
// The run queue now contains a bunch of ThreadKilled threads. We
#endif
#if defined(GRAN)
-static StgBlockingQueueElement *
+StgBlockingQueueElement *
unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
{
StgTSO *tso;
tso->id, tso));
}
#elif defined(PARALLEL_HASKELL)
-static StgBlockingQueueElement *
+StgBlockingQueueElement *
unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
{
StgBlockingQueueElement *next;
}
#else /* !GRAN && !PARALLEL_HASKELL */
-static StgTSO *
+StgTSO *
unblockOneLocked(StgTSO *tso)
{
StgTSO *next;
{
switch (tso->why_blocked) {
case BlockedOnRead:
- debugBelch("is blocked on read from fd %ld", tso->block_info.fd);
+ debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
break;
case BlockedOnWrite:
- debugBelch("is blocked on write to fd %ld", tso->block_info.fd);
+ debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
break;
#if defined(mingw32_HOST_OS)
case BlockedOnDoProc:
break;
#endif
case BlockedOnDelay:
- debugBelch("is blocked until %ld", tso->block_info.target);
+ debugBelch("is blocked until %ld", (long)(tso->block_info.target));
break;
case BlockedOnMVar:
debugBelch("is blocked on an MVar");
debugBelch("all threads:\n");
# endif
- for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
+ for (t = all_threads; t != END_TSO_QUEUE; ) {
debugBelch("\tthread %d @ %p ", t->id, (void *)t);
#if defined(DEBUG)
{
if (label) debugBelch("[\"%s\"] ",(char *)label);
}
#endif
- printThreadStatus(t);
- debugBelch("\n");
+ if (t->what_next == ThreadRelocated) {
+ debugBelch("has been relocated...\n");
+ t = t->link;
+ } else {
+ printThreadStatus(t);
+ debugBelch("\n");
+ t = t->global_link;
+ }
}
}