+ case FetchReply:
+ do_the_fetchreply(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case UnblockThread: /* Move from the blocked queue to the tail of */
+ do_the_unblock(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case ResumeThread: /* Move from the blocked queue to the tail of */
+ /* the runnable queue ( i.e. Qu' SImqa'lu') */
+ event->tso->gran.blocktime +=
+ CurrentTime[CurrentProc] - event->tso->gran.blockedat;
+ do_the_startthread(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case StartThread:
+ do_the_startthread(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case MoveThread:
+ do_the_movethread(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case MoveSpark:
+ do_the_movespark(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case FindWork:
+ do_the_findwork(event);
+ goto next_thread; /* handle next event in event queue */
+
+ default:
+ barf("Illegal event type %u\n", event->evttype);
+ } /* switch */
+
+ /* This point was scheduler_loop in the old RTS */
+
+ IF_DEBUG(gran, debugBelch("GRAN: after main switch\n"));
+
+ TimeOfLastEvent = CurrentTime[CurrentProc];
+ TimeOfNextEvent = get_time_of_next_event();
+ IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
+ // CurrentTSO = ThreadQueueHd;
+
+ IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n",
+ TimeOfNextEvent));
+
+ if (RtsFlags.GranFlags.Light)
+ GranSimLight_leave_system(event, &ActiveTSO);
+
+ EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
+
+ IF_DEBUG(gran,
+ debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice));
+
+ /* in a GranSim setup the TSO stays on the run queue */
+ t = CurrentTSO;
+ /* Take a thread from the run queue. */
+ POP_RUN_QUEUE(t); // take_off_run_queue(t);
+
+ IF_DEBUG(gran,
+ debugBelch("GRAN: About to run current thread, which is\n");
+ G_TSO(t,5));
+
+ context_switch = 0; // turned on via GranYield, checking events and time slice
+
+ IF_DEBUG(gran,
+ DumpGranEvent(GR_SCHEDULE, t));
+
+ procStatus[CurrentProc] = Busy;
+}
+#endif // GRAN
+
+/* ----------------------------------------------------------------------------
+ * Send pending messages (PARALLEL_HASKELL only)
+ * ------------------------------------------------------------------------- */
+
+#if defined(PARALLEL_HASKELL)
+static StgTSO *
+scheduleSendPendingMessages(void)
+{
+ StgSparkPool *pool;
+ rtsSpark spark;
+ StgTSO *t;
+
+# if defined(PAR) // global Mem.Mgmt., omit for now
+ if (PendingFetches != END_BF_QUEUE) {
+ processFetches();
+ }
+# endif
+
+ if (RtsFlags.ParFlags.BufferTime) {
+ // if we use message buffering, we must send away all message
+ // packets which have become too old...
+ sendOldBuffers();
+ }
+}
+#endif
+
+/* ----------------------------------------------------------------------------
+ * Activate spark threads (PARALLEL_HASKELL only)
+ * ------------------------------------------------------------------------- */
+
+#if defined(PARALLEL_HASKELL)
+static void
+scheduleActivateSpark(void)
+{
+#if defined(SPARKS)
+ ASSERT(EMPTY_RUN_QUEUE());
+/* We get here if the run queue is empty and want some work.
+ We try to turn a spark into a thread, and add it to the run queue,
+ from where it will be picked up in the next iteration of the scheduler
+ loop.
+*/
+
+ /* :-[ no local threads => look out for local sparks */
+ /* the spark pool for the current PE */
+ pool = &(cap.r.rSparks); // JB: cap = (old) MainCap
+ if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
+ pool->hd < pool->tl) {
+ /*
+ * ToDo: add GC code check that we really have enough heap afterwards!!
+ * Old comment:
+ * If we're here (no runnable threads) and we have pending
+ * sparks, we must have a space problem. Get enough space
+ * to turn one of those pending sparks into a
+ * thread...
+ */
+
+ spark = findSpark(rtsFalse); /* get a spark */
+ if (spark != (rtsSpark) NULL) {
+ tso = createThreadFromSpark(spark); /* turn the spark into a thread */
+ IF_PAR_DEBUG(fish, // schedule,
+ debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
+ tso->id, tso, advisory_thread_count));
+
+ if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
+ IF_PAR_DEBUG(fish, // schedule,
+ debugBelch("==^^ failed to create thread from spark @ %lx\n",
+ spark));
+ return rtsFalse; /* failed to generate a thread */
+ } /* otherwise fall through & pick-up new tso */
+ } else {
+ IF_PAR_DEBUG(fish, // schedule,
+ debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n",
+ spark_queue_len(pool)));
+ return rtsFalse; /* failed to generate a thread */
+ }
+ return rtsTrue; /* success in generating a thread */
+ } else { /* no more threads permitted or pool empty */
+ return rtsFalse; /* failed to generateThread */
+ }
+#else
+ tso = NULL; // avoid compiler warning only
+ return rtsFalse; /* dummy in non-PAR setup */
+#endif // SPARKS
+}
+#endif // PARALLEL_HASKELL
+
+/* ----------------------------------------------------------------------------
+ * Get work from a remote node (PARALLEL_HASKELL only)
+ * ------------------------------------------------------------------------- */
+
+#if defined(PARALLEL_HASKELL)
+static rtsBool
+scheduleGetRemoteWork(rtsBool *receivedFinish)
+{
+ ASSERT(EMPTY_RUN_QUEUE());
+
+ if (RtsFlags.ParFlags.BufferTime) {
+ IF_PAR_DEBUG(verbose,
+ debugBelch("...send all pending data,"));
+ {
+ nat i;
+ for (i=1; i<=nPEs; i++)
+ sendImmediately(i); // send all messages away immediately
+ }
+ }
+# ifndef SPARKS
+ //++EDEN++ idle() , i.e. send all buffers, wait for work
+ // suppress fishing in EDEN... just look for incoming messages
+ // (blocking receive)
+ IF_PAR_DEBUG(verbose,
+ debugBelch("...wait for incoming messages...\n"));
+ *receivedFinish = processMessages(); // blocking receive...
+
+ // and reenter scheduling loop after having received something
+ // (return rtsFalse below)
+
+# else /* activate SPARKS machinery */
+/* We get here, if we have no work, tried to activate a local spark, but still
+ have no work. We try to get a remote spark, by sending a FISH message.
+ Thread migration should be added here, and triggered when a sequence of
+ fishes returns without work. */
+ delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll);
+
+ /* =8-[ no local sparks => look for work on other PEs */
+ /*
+ * We really have absolutely no work. Send out a fish
+ * (there may be some out there already), and wait for
+ * something to arrive. We clearly can't run any threads
+ * until a SCHEDULE or RESUME arrives, and so that's what
+ * we're hoping to see. (Of course, we still have to
+ * respond to other types of messages.)
+ */
+ rtsTime now = msTime() /*CURRENT_TIME*/;
+ IF_PAR_DEBUG(verbose,
+ debugBelch("-- now=%ld\n", now));
+ IF_PAR_DEBUG(fish, // verbose,
+ if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
+ (last_fish_arrived_at!=0 &&
+ last_fish_arrived_at+delay > now)) {
+ debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n",
+ now, last_fish_arrived_at+delay,
+ last_fish_arrived_at,
+ delay);
+ });
+
+ if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
+ advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when?
+ if (last_fish_arrived_at==0 ||
+ (last_fish_arrived_at+delay <= now)) { // send FISH now!
+ /* outstandingFishes is set in sendFish, processFish;
+ avoid flooding system with fishes via delay */
+ next_fish_to_send_at = 0;
+ } else {
+ /* ToDo: this should be done in the main scheduling loop to avoid the
+ busy wait here; not so bad if fish delay is very small */
+ int iq = 0; // DEBUGGING -- HWL
+ next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send
+ /* send a fish when ready, but process messages that arrive in the meantime */
+ do {
+ if (PacketsWaiting()) {
+ iq++; // DEBUGGING
+ *receivedFinish = processMessages();
+ }
+ now = msTime();
+ } while (!*receivedFinish || now<next_fish_to_send_at);
+ // JB: This means the fish could become obsolete, if we receive
+ // work. Better check for work again?
+ // last line: while (!receivedFinish || !haveWork || now<...)
+ // next line: if (receivedFinish || haveWork )
+
+ if (*receivedFinish) // no need to send a FISH if we are finishing anyway
+ return rtsFalse; // NB: this will leave scheduler loop
+ // immediately after return!
+
+ IF_PAR_DEBUG(fish, // verbose,
+ debugBelch("--$$ <%llu> sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count));
+
+ }
+
+ // JB: IMHO, this should all be hidden inside sendFish(...)
+ /* pe = choosePE();
+ sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY,
+ NEW_FISH_HUNGER);
+
+ // Global statistics: count no. of fishes
+ if (RtsFlags.ParFlags.ParStats.Global &&
+ RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+ globalParStats.tot_fish_mess++;
+ }
+ */
+
+ /* delayed fishes must have been sent by now! */
+ next_fish_to_send_at = 0;
+ }
+
+ *receivedFinish = processMessages();
+# endif /* SPARKS */
+
+ return rtsFalse;
+ /* NB: this function always returns rtsFalse, meaning the scheduler
+ loop continues with the next iteration;
+ rationale:
+ return code means success in finding work; we enter this function
+ if there is no local work, thus have to send a fish which takes
+ time until it arrives with work; in the meantime we should process
+ messages in the main loop;
+ */
+}
+#endif // PARALLEL_HASKELL
+
+/* ----------------------------------------------------------------------------
+ * PAR/GRAN: Report stats & debugging info(?)
+ * ------------------------------------------------------------------------- */
+
+#if defined(PAR) || defined(GRAN)
+static void
+scheduleGranParReport(void)
+{
+ ASSERT(run_queue_hd != END_TSO_QUEUE);
+
+ /* Take a thread from the run queue, if we have work */
+ POP_RUN_QUEUE(t); // take_off_run_queue(END_TSO_QUEUE);
+
+ /* If this TSO has got its outport closed in the meantime,
+ * it mustn't be run. Instead, we have to clean it up as if it was finished.
+ * It has to be marked as TH_DEAD for this purpose.
+ * If it is TH_TERM instead, it is supposed to have finished in the normal way.
+
+JB: TODO: investigate wether state change field could be nuked
+ entirely and replaced by the normal tso state (whatnext
+ field). All we want to do is to kill tsos from outside.
+ */
+
+ /* ToDo: write something to the log-file
+ if (RTSflags.ParFlags.granSimStats && !sameThread)
+ DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
+
+ CurrentTSO = t;
+ */
+ /* the spark pool for the current PE */
+ pool = &(cap.r.rSparks); // cap = (old) MainCap
+
+ IF_DEBUG(scheduler,
+ debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
+ run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
+
+ IF_PAR_DEBUG(fish,
+ debugBelch("--=^ %d threads, %d sparks on [%#x]\n",
+ run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
+
+ if (RtsFlags.ParFlags.ParStats.Full &&
+ (t->par.sparkname != (StgInt)0) && // only log spark generated threads
+ (emitSchedule || // forced emit
+ (t && LastTSO && t->id != LastTSO->id))) {
+ /*
+ we are running a different TSO, so write a schedule event to log file
+ NB: If we use fair scheduling we also have to write a deschedule
+ event for LastTSO; with unfair scheduling we know that the
+ previous tso has blocked whenever we switch to another tso, so
+ we don't need it in GUM for now
+ */
+ IF_PAR_DEBUG(fish, // schedule,
+ debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname));
+
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
+ emitSchedule = rtsFalse;
+ }
+}
+#endif
+
+/* ----------------------------------------------------------------------------
+ * After running a thread...
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+
+static void
+schedulePostRunThread(void)
+{
+#if defined(PAR)
+ /* HACK 675: if the last thread didn't yield, make sure to print a
+ SCHEDULE event to the log file when StgRunning the next thread, even
+ if it is the same one as before */
+ LastTSO = t;
+ TimeOfLastYield = CURRENT_TIME;
+#endif
+
+ /* some statistics gathering in the parallel case */
+
+#if defined(GRAN) || defined(PAR) || defined(EDEN)
+ switch (ret) {
+ case HeapOverflow:
+# if defined(GRAN)
+ IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
+ globalGranStats.tot_heapover++;
+# elif defined(PAR)
+ globalParStats.tot_heapover++;
+# endif