+static rtsBool
+scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
+{
+ // did the task ask for a large block?
+ if (cap->r.rHpAlloc > BLOCK_SIZE) {
+ // if so, get one and push it on the front of the nursery.
+ bdescr *bd;
+ lnat blocks;
+
+ blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
+
+ IF_DEBUG(scheduler,
+ debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
+ (long)t->id, whatNext_strs[t->what_next], blocks));
+
+ // don't do this if the nursery is (nearly) full, we'll GC first.
+ if (cap->r.rCurrentNursery->link != NULL ||
+ cap->r.rNursery->n_blocks == 1) { // paranoia to prevent infinite loop
+ // if the nursery has only one block.
+
+ ACQUIRE_SM_LOCK
+ bd = allocGroup( blocks );
+ RELEASE_SM_LOCK
+ cap->r.rNursery->n_blocks += blocks;
+
+ // link the new group into the list
+ bd->link = cap->r.rCurrentNursery;
+ bd->u.back = cap->r.rCurrentNursery->u.back;
+ if (cap->r.rCurrentNursery->u.back != NULL) {
+ cap->r.rCurrentNursery->u.back->link = bd;
+ } else {
+#if !defined(SMP)
+ ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
+ g0s0 == cap->r.rNursery);
+#endif
+ cap->r.rNursery->blocks = bd;
+ }
+ cap->r.rCurrentNursery->u.back = bd;
+
+ // initialise it as a nursery block. We initialise the
+ // step, gen_no, and flags field of *every* sub-block in
+ // this large block, because this is easier than making
+ // sure that we always find the block head of a large
+ // block whenever we call Bdescr() (eg. evacuate() and
+ // isAlive() in the GC would both have to do this, at
+ // least).
+ {
+ bdescr *x;
+ for (x = bd; x < bd + blocks; x++) {
+ x->step = cap->r.rNursery;
+ x->gen_no = 0;
+ x->flags = 0;
+ }
+ }
+
+ // This assert can be a killer if the app is doing lots
+ // of large block allocations.
+ IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
+
+ // now update the nursery to point to the new block
+ cap->r.rCurrentNursery = bd;
+
+ // we might be unlucky and have another thread get on the
+ // run queue before us and steal the large block, but in that
+ // case the thread will just end up requesting another large
+ // block.
+ pushOnRunQueue(cap,t);
+ return rtsFalse; /* not actually GC'ing */
+ }
+ }
+
+ IF_DEBUG(scheduler,
+ debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]));
+#if defined(GRAN)
+ ASSERT(!is_on_queue(t,CurrentProc));
+#elif defined(PARALLEL_HASKELL)
+ /* Currently we emit a DESCHEDULE event before GC in GUM.
+ ToDo: either add separate event to distinguish SYSTEM time from rest
+ or just nuke this DESCHEDULE (and the following SCHEDULE) */
+ if (0 && RtsFlags.ParFlags.ParStats.Full) {
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
+ emitSchedule = rtsTrue;
+ }
+#endif
+
+ pushOnRunQueue(cap,t);
+ return rtsTrue;
+ /* actual GC is done at the end of the while loop in schedule() */
+}
+
+/* -----------------------------------------------------------------------------
+ * Handle a thread that returned to the scheduler with ThreadStackOverflow
+ * -------------------------------------------------------------------------- */
+
+static void
+scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
+{
+ IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n",
+ (long)t->id, whatNext_strs[t->what_next]));
+ /* just adjust the stack for this thread, then pop it back
+ * on the run queue.
+ */
+ {
+ /* enlarge the stack */
+ StgTSO *new_t = threadStackOverflow(cap, t);
+
+ /* This TSO has moved, so update any pointers to it from the
+ * main thread stack. It better not be on any other queues...
+ * (it shouldn't be).
+ */
+ if (task->tso != NULL) {
+ task->tso = new_t;
+ }
+ pushOnRunQueue(cap,new_t);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Handle a thread that returned to the scheduler with ThreadYielding
+ * -------------------------------------------------------------------------- */
+
+static rtsBool
+scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
+{
+ // Reset the context switch flag. We don't do this just before
+ // running the thread, because that would mean we would lose ticks
+ // during GC, which can lead to unfair scheduling (a thread hogs
+ // the CPU because the tick always arrives during GC). This way
+ // penalises threads that do a lot of allocation, but that seems
+ // better than the alternative.
+ context_switch = 0;
+
+ /* put the thread back on the run queue. Then, if we're ready to
+ * GC, check whether this is the last task to stop. If so, wake
+ * up the GC thread. getThread will block during a GC until the
+ * GC is finished.
+ */
+ IF_DEBUG(scheduler,
+ if (t->what_next != prev_what_next) {
+ debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+ } else {
+ debugBelch("--<< thread %ld (%s) stopped, yielding\n",
+ (long)t->id, whatNext_strs[t->what_next]);
+ }
+ );
+
+ IF_DEBUG(sanity,
+ //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
+ checkTSO(t));
+ ASSERT(t->link == END_TSO_QUEUE);
+
+ // Shortcut if we're just switching evaluators: don't bother
+ // doing stack squeezing (which can be expensive), just run the
+ // thread.
+ if (t->what_next != prev_what_next) {
+ return rtsTrue;
+ }
+
+#if defined(GRAN)
+ ASSERT(!is_on_queue(t,CurrentProc));
+
+ IF_DEBUG(sanity,
+ //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
+ checkThreadQsSanity(rtsTrue));
+
+#endif
+
+ addToRunQueue(cap,t);
+
+#if defined(GRAN)
+ /* add a ContinueThread event to actually process the thread */
+ new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
+ ContinueThread,
+ t, (StgClosure*)NULL, (rtsSpark*)NULL);
+ IF_GRAN_DEBUG(bq,
+ debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
+ G_EVENTQ(0);
+ G_CURR_THREADQ(0));
+#endif
+ return rtsFalse;
+}