+#if defined(RTS_SUPPORTS_THREADS)
+ if ( EMPTY_RUN_QUEUE() ) {
+ continue; // nothing to do
+ }
+#endif
+
+#if defined(GRAN)
+ if (RtsFlags.GranFlags.Light)
+ GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
+
+ /* adjust time based on time-stamp */
+ if (event->time > CurrentTime[CurrentProc] &&
+ event->evttype != ContinueThread)
+ CurrentTime[CurrentProc] = event->time;
+
+ /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
+ if (!RtsFlags.GranFlags.Light)
+ handleIdlePEs();
+
+ IF_DEBUG(gran, fprintf(stderr, "GRAN: switch by event-type\n"));
+
+ /* main event dispatcher in GranSim */
+ switch (event->evttype) {
+ /* Should just be continuing execution */
+ case ContinueThread:
+ IF_DEBUG(gran, fprintf(stderr, "GRAN: doing ContinueThread\n"));
+ /* ToDo: check assertion
+ ASSERT(run_queue_hd != (StgTSO*)NULL &&
+ run_queue_hd != END_TSO_QUEUE);
+ */
+ /* Ignore ContinueThreads for fetching threads (if synchr comm) */
+ if (!RtsFlags.GranFlags.DoAsyncFetch &&
+ procStatus[CurrentProc]==Fetching) {
+ belch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]",
+ CurrentTSO->id, CurrentTSO, CurrentProc);
+ goto next_thread;
+ }
+ /* Ignore ContinueThreads for completed threads */
+ if (CurrentTSO->what_next == ThreadComplete) {
+ belch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)",
+ CurrentTSO->id, CurrentTSO, CurrentProc);
+ goto next_thread;
+ }
+ /* Ignore ContinueThreads for threads that are being migrated */
+ if (PROCS(CurrentTSO)==Nowhere) {
+ belch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)",
+ CurrentTSO->id, CurrentTSO, CurrentProc);
+ goto next_thread;
+ }
+ /* The thread should be at the beginning of the run queue */
+ if (CurrentTSO!=run_queue_hds[CurrentProc]) {
+ belch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread",
+ CurrentTSO->id, CurrentTSO, CurrentProc);
+ break; // run the thread anyway
+ }
+ /*
+ new_event(proc, proc, CurrentTime[proc],
+ FindWork,
+ (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
+ goto next_thread;
+ */ /* Catches superfluous CONTINUEs -- should be unnecessary */
+ break; // now actually run the thread; DaH Qu'vam yImuHbej
+
+ case FetchNode:
+ do_the_fetchnode(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case GlobalBlock:
+ do_the_globalblock(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case FetchReply:
+ do_the_fetchreply(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case UnblockThread: /* Move from the blocked queue to the tail of */
+ do_the_unblock(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case ResumeThread: /* Move from the blocked queue to the tail of */
+ /* the runnable queue ( i.e. Qu' SImqa'lu') */
+ event->tso->gran.blocktime +=
+ CurrentTime[CurrentProc] - event->tso->gran.blockedat;
+ do_the_startthread(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case StartThread:
+ do_the_startthread(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case MoveThread:
+ do_the_movethread(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case MoveSpark:
+ do_the_movespark(event);
+ goto next_thread; /* handle next event in event queue */
+
+ case FindWork:
+ do_the_findwork(event);
+ goto next_thread; /* handle next event in event queue */
+
+ default:
+ barf("Illegal event type %u\n", event->evttype);
+ } /* switch */
+
+ /* This point was scheduler_loop in the old RTS */
+
+ IF_DEBUG(gran, belch("GRAN: after main switch"));
+
+ TimeOfLastEvent = CurrentTime[CurrentProc];
+ TimeOfNextEvent = get_time_of_next_event();
+ IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
+ // CurrentTSO = ThreadQueueHd;
+
+ IF_DEBUG(gran, belch("GRAN: time of next event is: %ld",
+ TimeOfNextEvent));
+
+ if (RtsFlags.GranFlags.Light)
+ GranSimLight_leave_system(event, &ActiveTSO);
+
+ EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
+
+ IF_DEBUG(gran,
+ belch("GRAN: end of time-slice is %#lx", EndOfTimeSlice));
+
+ /* in a GranSim setup the TSO stays on the run queue */
+ t = CurrentTSO;
+ /* Take a thread from the run queue. */
+ POP_RUN_QUEUE(t); // take_off_run_queue(t);
+
+ IF_DEBUG(gran,
+ fprintf(stderr, "GRAN: About to run current thread, which is\n");
+ G_TSO(t,5));
+
+ context_switch = 0; // turned on via GranYield, checking events and time slice
+
+ IF_DEBUG(gran,
+ DumpGranEvent(GR_SCHEDULE, t));
+
+ procStatus[CurrentProc] = Busy;
+
+#elif defined(PAR)
+ if (PendingFetches != END_BF_QUEUE) {
+ processFetches();
+ }
+
+ /* ToDo: phps merge with spark activation above */
+ /* check whether we have local work and send requests if we have none */
+ if (EMPTY_RUN_QUEUE()) { /* no runnable threads */
+ /* :-[ no local threads => look out for local sparks */
+ /* the spark pool for the current PE */
+ pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
+ if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
+ pool->hd < pool->tl) {
+ /*
+ * ToDo: add GC code check that we really have enough heap afterwards!!
+ * Old comment:
+ * If we're here (no runnable threads) and we have pending
+ * sparks, we must have a space problem. Get enough space
+ * to turn one of those pending sparks into a
+ * thread...
+ */
+
+ spark = findSpark(rtsFalse); /* get a spark */
+ if (spark != (rtsSpark) NULL) {
+ tso = activateSpark(spark); /* turn the spark into a thread */
+ IF_PAR_DEBUG(schedule,
+ belch("==== schedule: Created TSO %d (%p); %d threads active",
+ tso->id, tso, advisory_thread_count));
+
+ if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
+ belch("==^^ failed to activate spark");
+ goto next_thread;
+ } /* otherwise fall through & pick-up new tso */
+ } else {
+ IF_PAR_DEBUG(verbose,
+ belch("==^^ no local sparks (spark pool contains only NFs: %d)",
+ spark_queue_len(pool)));
+ goto next_thread;
+ }
+ }
+
+ /* If we still have no work we need to send a FISH to get a spark
+ from another PE
+ */
+ if (EMPTY_RUN_QUEUE()) {
+ /* =8-[ no local sparks => look for work on other PEs */
+ /*
+ * We really have absolutely no work. Send out a fish
+ * (there may be some out there already), and wait for
+ * something to arrive. We clearly can't run any threads
+ * until a SCHEDULE or RESUME arrives, and so that's what
+ * we're hoping to see. (Of course, we still have to
+ * respond to other types of messages.)
+ */
+ TIME now = msTime() /*CURRENT_TIME*/;
+ IF_PAR_DEBUG(verbose,
+ belch("-- now=%ld", now));
+ IF_PAR_DEBUG(verbose,
+ if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
+ (last_fish_arrived_at!=0 &&
+ last_fish_arrived_at+RtsFlags.ParFlags.fishDelay > now)) {
+ belch("--$$ delaying FISH until %ld (last fish %ld, delay %ld, now %ld)",
+ last_fish_arrived_at+RtsFlags.ParFlags.fishDelay,
+ last_fish_arrived_at,
+ RtsFlags.ParFlags.fishDelay, now);
+ });
+
+ if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
+ (last_fish_arrived_at==0 ||
+ (last_fish_arrived_at+RtsFlags.ParFlags.fishDelay <= now))) {
+ /* outstandingFishes is set in sendFish, processFish;
+ avoid flooding system with fishes via delay */
+ pe = choosePE();
+ sendFish(pe, mytid, NEW_FISH_AGE, NEW_FISH_HISTORY,
+ NEW_FISH_HUNGER);
+
+ // Global statistics: count no. of fishes
+ if (RtsFlags.ParFlags.ParStats.Global &&
+ RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+ globalParStats.tot_fish_mess++;
+ }
+ }
+
+ receivedFinish = processMessages();
+ goto next_thread;
+ }
+ } else if (PacketsWaiting()) { /* Look for incoming messages */
+ receivedFinish = processMessages();
+ }
+
+ /* Now we are sure that we have some work available */
+ ASSERT(run_queue_hd != END_TSO_QUEUE);
+
+ /* Take a thread from the run queue, if we have work */
+ POP_RUN_QUEUE(t); // take_off_run_queue(END_TSO_QUEUE);
+ IF_DEBUG(sanity,checkTSO(t));
+
+ /* ToDo: write something to the log-file
+ if (RTSflags.ParFlags.granSimStats && !sameThread)
+ DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
+
+ CurrentTSO = t;
+ */
+ /* the spark pool for the current PE */
+ pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
+
+ IF_DEBUG(scheduler,
+ belch("--=^ %d threads, %d sparks on [%#x]",
+ run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
+
+# if 1
+ if (0 && RtsFlags.ParFlags.ParStats.Full &&
+ t && LastTSO && t->id != LastTSO->id &&
+ LastTSO->why_blocked == NotBlocked &&
+ LastTSO->what_next != ThreadComplete) {
+ // if previously scheduled TSO not blocked we have to record the context switch
+ DumpVeryRawGranEvent(TimeOfLastYield, CURRENT_PROC, CURRENT_PROC,
+ GR_DESCHEDULE, LastTSO, (StgClosure *)NULL, 0, 0);
+ }
+
+ if (RtsFlags.ParFlags.ParStats.Full &&
+ (emitSchedule /* forced emit */ ||
+ (t && LastTSO && t->id != LastTSO->id))) {
+ /*
+ we are running a different TSO, so write a schedule event to log file
+ NB: If we use fair scheduling we also have to write a deschedule
+ event for LastTSO; with unfair scheduling we know that the
+ previous tso has blocked whenever we switch to another tso, so
+ we don't need it in GUM for now
+ */
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
+ emitSchedule = rtsFalse;
+ }
+
+# endif
+#else /* !GRAN && !PAR */
+
+ // grab a thread from the run queue
+ ASSERT(run_queue_hd != END_TSO_QUEUE);
+ POP_RUN_QUEUE(t);
+
+ // Sanity check the thread we're about to run. This can be
+ // expensive if there is lots of thread switching going on...
+ IF_DEBUG(sanity,checkTSO(t));
+#endif
+
+#ifdef THREADED_RTS
+ {
+ StgMainThread *m = t->main;
+
+ if(m)
+ {
+ if(m == mainThread)
+ {
+ IF_DEBUG(scheduler,
+ sched_belch("### Running thread %d in bound thread", t->id));
+ // yes, the Haskell thread is bound to the current native thread
+ }
+ else
+ {
+ IF_DEBUG(scheduler,
+ sched_belch("### thread %d bound to another OS thread", t->id));
+ // no, bound to a different Haskell thread: pass to that thread
+ PUSH_ON_RUN_QUEUE(t);
+ passCapability(&m->bound_thread_cond);
+ continue;
+ }
+ }
+ else
+ {
+ if(mainThread != NULL)
+ // The thread we want to run is bound.
+ {
+ IF_DEBUG(scheduler,
+ sched_belch("### this OS thread cannot run thread %d", t->id));
+ // no, the current native thread is bound to a different
+ // Haskell thread, so pass it to any worker thread
+ PUSH_ON_RUN_QUEUE(t);
+ passCapabilityToWorker();
+ continue;
+ }
+ }
+ }
+#endif
+
+ cap->r.rCurrentTSO = t;
+
+ /* context switches are now initiated by the timer signal, unless
+ * the user specified "context switch as often as possible", with
+ * +RTS -C0
+ */
+ if ((RtsFlags.ConcFlags.ctxtSwitchTicks == 0
+ && (run_queue_hd != END_TSO_QUEUE
+ || blocked_queue_hd != END_TSO_QUEUE
+ || sleeping_queue != END_TSO_QUEUE)))
+ context_switch = 1;
+
+run_thread:
+
+ RELEASE_LOCK(&sched_mutex);
+
+ IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...",
+ t->id, whatNext_strs[t->what_next]));
+
+#ifdef PROFILING
+ startHeapProfTimer();
+#endif
+
+ /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+ /* Run the current thread
+ */
+ prev_what_next = t->what_next;
+
+ errno = t->saved_errno;
+
+ switch (prev_what_next) {
+
+ case ThreadKilled:
+ case ThreadComplete:
+ /* Thread already finished, return to scheduler. */
+ ret = ThreadFinished;
+ break;
+
+ case ThreadRunGHC:
+ ret = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
+ break;
+
+ case ThreadInterpret:
+ ret = interpretBCO(cap);
+ break;
+
+ default:
+ barf("schedule: invalid what_next field");
+ }
+
+ // The TSO might have moved, so find the new location:
+ t = cap->r.rCurrentTSO;
+
+ // And save the current errno in this thread.
+ t->saved_errno = errno;
+
+ /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+
+ /* Costs for the scheduler are assigned to CCS_SYSTEM */
+#ifdef PROFILING
+ stopHeapProfTimer();
+ CCCS = CCS_SYSTEM;
+#endif
+
+ ACQUIRE_LOCK(&sched_mutex);
+
+#ifdef RTS_SUPPORTS_THREADS
+ IF_DEBUG(scheduler,fprintf(stderr,"sched (task %p): ", osThreadId()););
+#elif !defined(GRAN) && !defined(PAR)
+ IF_DEBUG(scheduler,fprintf(stderr,"sched: "););
+#endif
+
+#if defined(PAR)
+ /* HACK 675: if the last thread didn't yield, make sure to print a
+ SCHEDULE event to the log file when StgRunning the next thread, even
+ if it is the same one as before */
+ LastTSO = t;
+ TimeOfLastYield = CURRENT_TIME;
+#endif
+
+ switch (ret) {
+ case HeapOverflow:
+#if defined(GRAN)
+ IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
+ globalGranStats.tot_heapover++;
+#elif defined(PAR)
+ globalParStats.tot_heapover++;
+#endif
+
+ // did the task ask for a large block?
+ if (cap->r.rHpAlloc > BLOCK_SIZE) {
+ // if so, get one and push it on the front of the nursery.
+ bdescr *bd;
+ nat blocks;
+
+ blocks = (nat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
+
+ IF_DEBUG(scheduler,belch("--<< thread %ld (%s) stopped: requesting a large block (size %d)",
+ t->id, whatNext_strs[t->what_next], blocks));
+
+ // don't do this if it would push us over the
+ // alloc_blocks_lim limit; we'll GC first.
+ if (alloc_blocks + blocks < alloc_blocks_lim) {
+
+ alloc_blocks += blocks;
+ bd = allocGroup( blocks );
+
+ // link the new group into the list
+ bd->link = cap->r.rCurrentNursery;
+ bd->u.back = cap->r.rCurrentNursery->u.back;
+ if (cap->r.rCurrentNursery->u.back != NULL) {
+ cap->r.rCurrentNursery->u.back->link = bd;
+ } else {
+ ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
+ g0s0->blocks == cap->r.rNursery);
+ cap->r.rNursery = g0s0->blocks = bd;
+ }
+ cap->r.rCurrentNursery->u.back = bd;
+
+ // initialise it as a nursery block. We initialise the
+ // step, gen_no, and flags field of *every* sub-block in
+ // this large block, because this is easier than making
+ // sure that we always find the block head of a large
+ // block whenever we call Bdescr() (eg. evacuate() and
+ // isAlive() in the GC would both have to do this, at
+ // least).
+ {
+ bdescr *x;
+ for (x = bd; x < bd + blocks; x++) {
+ x->step = g0s0;
+ x->gen_no = 0;
+ x->flags = 0;
+ }
+ }
+
+ // don't forget to update the block count in g0s0.
+ g0s0->n_blocks += blocks;
+ // This assert can be a killer if the app is doing lots
+ // of large block allocations.
+ ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks);
+
+ // now update the nursery to point to the new block
+ cap->r.rCurrentNursery = bd;
+
+ // we might be unlucky and have another thread get on the
+ // run queue before us and steal the large block, but in that
+ // case the thread will just end up requesting another large
+ // block.
+ PUSH_ON_RUN_QUEUE(t);
+ break;
+ }
+ }
+
+ /* make all the running tasks block on a condition variable,
+ * maybe set context_switch and wait till they all pile in,
+ * then have them wait on a GC condition variable.
+ */
+ IF_DEBUG(scheduler,belch("--<< thread %ld (%s) stopped: HeapOverflow",
+ t->id, whatNext_strs[t->what_next]));
+ threadPaused(t);
+#if defined(GRAN)
+ ASSERT(!is_on_queue(t,CurrentProc));
+#elif defined(PAR)
+ /* Currently we emit a DESCHEDULE event before GC in GUM.
+ ToDo: either add separate event to distinguish SYSTEM time from rest
+ or just nuke this DESCHEDULE (and the following SCHEDULE) */
+ if (0 && RtsFlags.ParFlags.ParStats.Full) {
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
+ emitSchedule = rtsTrue;
+ }
+#endif
+
+ ready_to_gc = rtsTrue;
+ context_switch = 1; /* stop other threads ASAP */
+ PUSH_ON_RUN_QUEUE(t);
+ /* actual GC is done at the end of the while loop */
+ break;
+
+ case StackOverflow:
+#if defined(GRAN)
+ IF_DEBUG(gran,
+ DumpGranEvent(GR_DESCHEDULE, t));
+ globalGranStats.tot_stackover++;
+#elif defined(PAR)
+ // IF_DEBUG(par,
+ // DumpGranEvent(GR_DESCHEDULE, t);
+ globalParStats.tot_stackover++;
+#endif
+ IF_DEBUG(scheduler,belch("--<< thread %ld (%s) stopped, StackOverflow",
+ t->id, whatNext_strs[t->what_next]));
+ /* just adjust the stack for this thread, then pop it back
+ * on the run queue.
+ */
+ threadPaused(t);
+ {
+ /* enlarge the stack */
+ StgTSO *new_t = threadStackOverflow(t);
+
+ /* This TSO has moved, so update any pointers to it from the
+ * main thread stack. It better not be on any other queues...
+ * (it shouldn't be).
+ */
+ if (t->main != NULL) {
+ t->main->tso = new_t;
+ }
+ PUSH_ON_RUN_QUEUE(new_t);
+ }
+ break;
+
+ case ThreadYielding:
+ // Reset the context switch flag. We don't do this just before
+ // running the thread, because that would mean we would lose ticks
+ // during GC, which can lead to unfair scheduling (a thread hogs
+ // the CPU because the tick always arrives during GC). This way
+ // penalises threads that do a lot of allocation, but that seems
+ // better than the alternative.
+ context_switch = 0;
+
+#if defined(GRAN)
+ IF_DEBUG(gran,
+ DumpGranEvent(GR_DESCHEDULE, t));
+ globalGranStats.tot_yields++;
+#elif defined(PAR)
+ // IF_DEBUG(par,
+ // DumpGranEvent(GR_DESCHEDULE, t);
+ globalParStats.tot_yields++;
+#endif
+ /* put the thread back on the run queue. Then, if we're ready to
+ * GC, check whether this is the last task to stop. If so, wake
+ * up the GC thread. getThread will block during a GC until the
+ * GC is finished.
+ */
+ IF_DEBUG(scheduler,
+ if (t->what_next != prev_what_next) {
+ belch("--<< thread %ld (%s) stopped to switch evaluators",
+ t->id, whatNext_strs[t->what_next]);
+ } else {
+ belch("--<< thread %ld (%s) stopped, yielding",
+ t->id, whatNext_strs[t->what_next]);
+ }
+ );
+
+ IF_DEBUG(sanity,
+ //belch("&& Doing sanity check on yielding TSO %ld.", t->id);
+ checkTSO(t));
+ ASSERT(t->link == END_TSO_QUEUE);
+
+ // Shortcut if we're just switching evaluators: don't bother
+ // doing stack squeezing (which can be expensive), just run the
+ // thread.
+ if (t->what_next != prev_what_next) {
+ goto run_thread;
+ }
+
+ threadPaused(t);
+
+#if defined(GRAN)
+ ASSERT(!is_on_queue(t,CurrentProc));
+
+ IF_DEBUG(sanity,
+ //belch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
+ checkThreadQsSanity(rtsTrue));
+#endif
+
+#if defined(PAR)
+ if (RtsFlags.ParFlags.doFairScheduling) {
+ /* this does round-robin scheduling; good for concurrency */
+ APPEND_TO_RUN_QUEUE(t);
+ } else {
+ /* this does unfair scheduling; good for parallelism */
+ PUSH_ON_RUN_QUEUE(t);
+ }
+#else
+ // this does round-robin scheduling; good for concurrency
+ APPEND_TO_RUN_QUEUE(t);
+#endif
+
+#if defined(GRAN)
+ /* add a ContinueThread event to actually process the thread */
+ new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
+ ContinueThread,
+ t, (StgClosure*)NULL, (rtsSpark*)NULL);
+ IF_GRAN_DEBUG(bq,
+ belch("GRAN: eventq and runnableq after adding yielded thread to queue again:");
+ G_EVENTQ(0);
+ G_CURR_THREADQ(0));
+#endif /* GRAN */
+ break;
+
+ case ThreadBlocked:
+#if defined(GRAN)
+ IF_DEBUG(scheduler,
+ belch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ",
+ t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
+ if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
+
+ // ??? needed; should emit block before
+ IF_DEBUG(gran,
+ DumpGranEvent(GR_DESCHEDULE, t));
+ prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
+ /*
+ ngoq Dogh!
+ ASSERT(procStatus[CurrentProc]==Busy ||
+ ((procStatus[CurrentProc]==Fetching) &&
+ (t->block_info.closure!=(StgClosure*)NULL)));
+ if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
+ !(!RtsFlags.GranFlags.DoAsyncFetch &&
+ procStatus[CurrentProc]==Fetching))
+ procStatus[CurrentProc] = Idle;
+ */
+#elif defined(PAR)
+ IF_DEBUG(scheduler,
+ belch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: ",
+ t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
+ IF_PAR_DEBUG(bq,
+
+ if (t->block_info.closure!=(StgClosure*)NULL)
+ print_bq(t->block_info.closure));
+
+ /* Send a fetch (if BlockedOnGA) and dump event to log file */
+ blockThread(t);
+
+ /* whatever we schedule next, we must log that schedule */
+ emitSchedule = rtsTrue;
+
+#else /* !GRAN */
+ /* don't need to do anything. Either the thread is blocked on
+ * I/O, in which case we'll have called addToBlockedQueue
+ * previously, or it's blocked on an MVar or Blackhole, in which
+ * case it'll be on the relevant queue already.
+ */
+ IF_DEBUG(scheduler,
+ fprintf(stderr, "--<< thread %d (%s) stopped: ",
+ t->id, whatNext_strs[t->what_next]);
+ printThreadBlockage(t);
+ fprintf(stderr, "\n"));
+ fflush(stderr);
+
+ /* Only for dumping event to log file
+ ToDo: do I need this in GranSim, too?
+ blockThread(t);
+ */
+#endif
+ threadPaused(t);
+ break;
+
+ case ThreadFinished:
+ /* Need to check whether this was a main thread, and if so, signal
+ * the task that started it with the return value. If we have no
+ * more main threads, we probably need to stop all the tasks until
+ * we get a new one.
+ */
+ /* We also end up here if the thread kills itself with an
+ * uncaught exception, see Exception.hc.
+ */
+ IF_DEBUG(scheduler,belch("--++ thread %d (%s) finished",
+ t->id, whatNext_strs[t->what_next]));
+#if defined(GRAN)
+ endThread(t, CurrentProc); // clean-up the thread
+#elif defined(PAR)
+ /* For now all are advisory -- HWL */
+ //if(t->priority==AdvisoryPriority) ??
+ advisory_thread_count--;
+
+# ifdef DIST
+ if(t->dist.priority==RevalPriority)
+ FinishReval(t);
+# endif
+
+ if (RtsFlags.ParFlags.ParStats.Full &&
+ !RtsFlags.ParFlags.ParStats.Suppressed)
+ DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
+#endif
+
+ //
+ // Check whether the thread that just completed was a main
+ // thread, and if so return with the result.
+ //
+ // There is an assumption here that all thread completion goes
+ // through this point; we need to make sure that if a thread
+ // ends up in the ThreadKilled state, that it stays on the run
+ // queue so it can be dealt with here.
+ //
+ if (
+#if defined(RTS_SUPPORTS_THREADS)
+ mainThread != NULL
+#else
+ mainThread->tso == t
+#endif
+ )
+ {
+ // We are a bound thread: this must be our thread that just
+ // completed.
+ ASSERT(mainThread->tso == t);
+
+ if (t->what_next == ThreadComplete) {
+ if (mainThread->ret) {
+ // NOTE: return val is tso->sp[1] (see StgStartup.hc)
+ *(mainThread->ret) = (StgClosure *)mainThread->tso->sp[1];
+ }
+ mainThread->stat = Success;
+ } else {
+ if (mainThread->ret) {
+ *(mainThread->ret) = NULL;
+ }
+ if (was_interrupted) {
+ mainThread->stat = Interrupted;
+ } else {
+ mainThread->stat = Killed;
+ }
+ }
+#ifdef DEBUG
+ removeThreadLabel((StgWord)mainThread->tso->id);
+#endif
+ if (mainThread->prev == NULL) {
+ main_threads = mainThread->link;
+ } else {
+ mainThread->prev->link = mainThread->link;
+ }
+ if (mainThread->link != NULL) {
+ mainThread->link->prev = NULL;
+ }
+ releaseCapability(cap);
+ return;
+ }
+
+#ifdef RTS_SUPPORTS_THREADS
+ ASSERT(t->main == NULL);
+#else
+ if (t->main != NULL) {
+ // Must be a main thread that is not the topmost one. Leave
+ // it on the run queue until the stack has unwound to the
+ // point where we can deal with this. Leaving it on the run
+ // queue also ensures that the garbage collector knows about
+ // this thread and its return value (it gets dropped from the
+ // all_threads list so there's no other way to find it).
+ APPEND_TO_RUN_QUEUE(t);
+ }
+#endif
+ break;
+
+ default:
+ barf("schedule: invalid thread return code %d", (int)ret);
+ }
+
+#ifdef PROFILING
+ // When we have +RTS -i0 and we're heap profiling, do a census at
+ // every GC. This lets us get repeatable runs for debugging.
+ if (performHeapProfile ||
+ (RtsFlags.ProfFlags.profileInterval==0 &&
+ RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
+ GarbageCollect(GetRoots, rtsTrue);
+ heapCensus();
+ performHeapProfile = rtsFalse;
+ ready_to_gc = rtsFalse; // we already GC'd
+ }
+#endif
+
+ if (ready_to_gc) {
+ /* everybody back, start the GC.
+ * Could do it in this thread, or signal a condition var
+ * to do it in another thread. Either way, we need to
+ * broadcast on gc_pending_cond afterward.
+ */
+#if defined(RTS_SUPPORTS_THREADS)
+ IF_DEBUG(scheduler,sched_belch("doing GC"));
+#endif
+ GarbageCollect(GetRoots,rtsFalse);
+ ready_to_gc = rtsFalse;
+#if defined(GRAN)
+ /* add a ContinueThread event to continue execution of current thread */
+ new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
+ ContinueThread,
+ t, (StgClosure*)NULL, (rtsSpark*)NULL);
+ IF_GRAN_DEBUG(bq,
+ fprintf(stderr, "GRAN: eventq and runnableq after Garbage collection:\n");
+ G_EVENTQ(0);
+ G_CURR_THREADQ(0));
+#endif /* GRAN */
+ }
+
+#if defined(GRAN)
+ next_thread:
+ IF_GRAN_DEBUG(unused,
+ print_eventq(EventHd));
+
+ event = get_next_event();
+#elif defined(PAR)
+ next_thread:
+ /* ToDo: wait for next message to arrive rather than busy wait */
+#endif /* GRAN */
+
+ } /* end of while(1) */
+
+ IF_PAR_DEBUG(verbose,
+ belch("== Leaving schedule() after having received Finish"));
+}
+
+/* ---------------------------------------------------------------------------
+ * rtsSupportsBoundThreads(): is the RTS built to support bound threads?
+ * used by Control.Concurrent for error checking.
+ * ------------------------------------------------------------------------- */
+
+StgBool
+rtsSupportsBoundThreads(void)
+{
+#ifdef THREADED_RTS
+ return rtsTrue;
+#else
+ return rtsFalse;
+#endif
+}
+
+/* ---------------------------------------------------------------------------
+ * isThreadBound(tso): check whether tso is bound to an OS thread.
+ * ------------------------------------------------------------------------- */
+
+StgBool
+isThreadBound(StgTSO* tso USED_IN_THREADED_RTS)
+{
+#ifdef THREADED_RTS
+ return (tso->main != NULL);
+#endif
+ return rtsFalse;
+}
+
+/* ---------------------------------------------------------------------------
+ * Singleton fork(). Do not copy any running threads.
+ * ------------------------------------------------------------------------- */
+
+#ifndef mingw32_TARGET_OS
+#define FORKPROCESS_PRIMOP_SUPPORTED
+#endif
+
+#ifdef FORKPROCESS_PRIMOP_SUPPORTED
+static void
+deleteThreadImmediately(StgTSO *tso);
+#endif
+StgInt
+forkProcess(HsStablePtr *entry
+#ifndef FORKPROCESS_PRIMOP_SUPPORTED
+ STG_UNUSED
+#endif
+ )
+{
+#ifdef FORKPROCESS_PRIMOP_SUPPORTED
+ pid_t pid;
+ StgTSO* t,*next;
+ StgMainThread *m;
+ SchedulerStatus rc;
+
+ IF_DEBUG(scheduler,sched_belch("forking!"));
+ rts_lock(); // This not only acquires sched_mutex, it also
+ // makes sure that no other threads are running
+
+ pid = fork();
+
+ if (pid) { /* parent */
+
+ /* just return the pid */
+ rts_unlock();
+ return pid;
+
+ } else { /* child */
+
+
+ // delete all threads
+ run_queue_hd = run_queue_tl = END_TSO_QUEUE;
+
+ for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->link;
+
+ // don't allow threads to catch the ThreadKilled exception
+ deleteThreadImmediately(t);
+ }
+
+ // wipe the main thread list
+ while((m = main_threads) != NULL) {
+ main_threads = m->link;
+# ifdef THREADED_RTS
+ closeCondition(&m->bound_thread_cond);
+# endif
+ stgFree(m);
+ }
+
+# ifdef RTS_SUPPORTS_THREADS
+ resetTaskManagerAfterFork(); // tell startTask() and friends that
+ startingWorkerThread = rtsFalse; // we have no worker threads any more
+ resetWorkerWakeupPipeAfterFork();
+# endif
+
+ rc = rts_evalStableIO(entry, NULL); // run the action
+ rts_checkSchedStatus("forkProcess",rc);
+
+ rts_unlock();
+
+ hs_exit(); // clean up and exit
+ stg_exit(0);
+ }
+#else /* !FORKPROCESS_PRIMOP_SUPPORTED */
+ barf("forkProcess#: primop not supported, sorry!\n");
+ return -1;
+#endif
+}
+
+/* ---------------------------------------------------------------------------
+ * deleteAllThreads(): kill all the live threads.
+ *
+ * This is used when we catch a user interrupt (^C), before performing
+ * any necessary cleanups and running finalizers.
+ *
+ * Locks: sched_mutex held.
+ * ------------------------------------------------------------------------- */
+
+void
+deleteAllThreads ( void )
+{
+ StgTSO* t, *next;
+ IF_DEBUG(scheduler,sched_belch("deleting all threads"));
+ for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->global_link;
+ deleteThread(t);
+ }
+
+ // The run queue now contains a bunch of ThreadKilled threads. We
+ // must not throw these away: the main thread(s) will be in there
+ // somewhere, and the main scheduler loop has to deal with it.
+ // Also, the run queue is the only thing keeping these threads from
+ // being GC'd, and we don't want the "main thread has been GC'd" panic.
+
+ ASSERT(blocked_queue_hd == END_TSO_QUEUE);
+ ASSERT(sleeping_queue == END_TSO_QUEUE);
+}
+
+/* startThread and insertThread are now in GranSim.c -- HWL */
+
+
+/* ---------------------------------------------------------------------------
+ * Suspending & resuming Haskell threads.
+ *
+ * When making a "safe" call to C (aka _ccall_GC), the task gives back
+ * its capability before calling the C function. This allows another
+ * task to pick up the capability and carry on running Haskell
+ * threads. It also means that if the C call blocks, it won't lock
+ * the whole system.
+ *
+ * The Haskell thread making the C call is put to sleep for the
+ * duration of the call, on the susepended_ccalling_threads queue. We
+ * give out a token to the task, which it can use to resume the thread
+ * on return from the C function.
+ * ------------------------------------------------------------------------- */
+
+StgInt
+suspendThread( StgRegTable *reg )
+{
+ nat tok;
+ Capability *cap;
+ int saved_errno = errno;
+
+ /* assume that *reg is a pointer to the StgRegTable part
+ * of a Capability.
+ */
+ cap = (Capability *)((void *)((unsigned char*)reg - sizeof(StgFunTable)));
+
+ ACQUIRE_LOCK(&sched_mutex);
+
+ IF_DEBUG(scheduler,
+ sched_belch("thread %d did a _ccall_gc", cap->r.rCurrentTSO->id));
+
+ // XXX this might not be necessary --SDM
+ cap->r.rCurrentTSO->what_next = ThreadRunGHC;
+
+ threadPaused(cap->r.rCurrentTSO);
+ cap->r.rCurrentTSO->link = suspended_ccalling_threads;
+ suspended_ccalling_threads = cap->r.rCurrentTSO;
+
+ if(cap->r.rCurrentTSO->blocked_exceptions == NULL) {
+ cap->r.rCurrentTSO->why_blocked = BlockedOnCCall;
+ cap->r.rCurrentTSO->blocked_exceptions = END_TSO_QUEUE;
+ } else {
+ cap->r.rCurrentTSO->why_blocked = BlockedOnCCall_NoUnblockExc;
+ }
+
+ /* Use the thread ID as the token; it should be unique */
+ tok = cap->r.rCurrentTSO->id;
+
+ /* Hand back capability */
+ releaseCapability(cap);
+
+#if defined(RTS_SUPPORTS_THREADS)
+ /* Preparing to leave the RTS, so ensure there's a native thread/task
+ waiting to take over.
+ */
+ IF_DEBUG(scheduler, sched_belch("worker (token %d): leaving RTS", tok));
+#endif
+
+ /* Other threads _might_ be available for execution; signal this */
+ THREAD_RUNNABLE();
+ RELEASE_LOCK(&sched_mutex);
+
+ errno = saved_errno;
+ return tok;
+}
+
+StgRegTable *
+resumeThread( StgInt tok )
+{
+ StgTSO *tso, **prev;
+ Capability *cap;
+ int saved_errno = errno;
+
+#if defined(RTS_SUPPORTS_THREADS)
+ /* Wait for permission to re-enter the RTS with the result. */
+ ACQUIRE_LOCK(&sched_mutex);
+ waitForReturnCapability(&sched_mutex, &cap);
+
+ IF_DEBUG(scheduler, sched_belch("worker (token %d): re-entering RTS", tok));
+#else
+ grabCapability(&cap);
+#endif
+
+ /* Remove the thread off of the suspended list */
+ prev = &suspended_ccalling_threads;
+ for (tso = suspended_ccalling_threads;
+ tso != END_TSO_QUEUE;
+ prev = &tso->link, tso = tso->link) {
+ if (tso->id == (StgThreadID)tok) {
+ *prev = tso->link;
+ break;
+ }
+ }
+ if (tso == END_TSO_QUEUE) {
+ barf("resumeThread: thread not found");
+ }
+ tso->link = END_TSO_QUEUE;
+
+ if(tso->why_blocked == BlockedOnCCall) {
+ awakenBlockedQueueNoLock(tso->blocked_exceptions);
+ tso->blocked_exceptions = NULL;
+ }
+
+ /* Reset blocking status */
+ tso->why_blocked = NotBlocked;
+
+ cap->r.rCurrentTSO = tso;
+ RELEASE_LOCK(&sched_mutex);
+ errno = saved_errno;
+ return &cap->r;
+}
+
+
+/* ---------------------------------------------------------------------------
+ * Static functions
+ * ------------------------------------------------------------------------ */
+static void unblockThread(StgTSO *tso);
+
+/* ---------------------------------------------------------------------------
+ * Comparing Thread ids.
+ *
+ * This is used from STG land in the implementation of the
+ * instances of Eq/Ord for ThreadIds.
+ * ------------------------------------------------------------------------ */
+
+int
+cmp_thread(StgPtr tso1, StgPtr tso2)
+{
+ StgThreadID id1 = ((StgTSO *)tso1)->id;
+ StgThreadID id2 = ((StgTSO *)tso2)->id;
+
+ if (id1 < id2) return (-1);
+ if (id1 > id2) return 1;
+ return 0;
+}
+
+/* ---------------------------------------------------------------------------
+ * Fetching the ThreadID from an StgTSO.
+ *
+ * This is used in the implementation of Show for ThreadIds.
+ * ------------------------------------------------------------------------ */
+int
+rts_getThreadId(StgPtr tso)
+{
+ return ((StgTSO *)tso)->id;
+}
+
+#ifdef DEBUG
+void
+labelThread(StgPtr tso, char *label)
+{
+ int len;
+ void *buf;
+
+ /* Caveat: Once set, you can only set the thread name to "" */
+ len = strlen(label)+1;
+ buf = stgMallocBytes(len * sizeof(char), "Schedule.c:labelThread()");
+ strncpy(buf,label,len);
+ /* Update will free the old memory for us */
+ updateThreadLabel(((StgTSO *)tso)->id,buf);
+}
+#endif /* DEBUG */
+
+/* ---------------------------------------------------------------------------
+ Create a new thread.
+
+ The new thread starts with the given stack size. Before the
+ scheduler can run, however, this thread needs to have a closure
+ (and possibly some arguments) pushed on its stack. See
+ pushClosure() in Schedule.h.
+
+ createGenThread() and createIOThread() (in SchedAPI.h) are
+ convenient packaged versions of this function.
+
+ currently pri (priority) is only used in a GRAN setup -- HWL
+ ------------------------------------------------------------------------ */
+#if defined(GRAN)
+/* currently pri (priority) is only used in a GRAN setup -- HWL */
+StgTSO *
+createThread(nat size, StgInt pri)
+#else
+StgTSO *
+createThread(nat size)
+#endif
+{
+
+ StgTSO *tso;
+ nat stack_size;
+
+ /* First check whether we should create a thread at all */
+#if defined(PAR)
+ /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
+ if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
+ threadsIgnored++;
+ belch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
+ RtsFlags.ParFlags.maxThreads, advisory_thread_count);
+ return END_TSO_QUEUE;
+ }
+ threadsCreated++;
+#endif
+
+#if defined(GRAN)
+ ASSERT(!RtsFlags.GranFlags.Light || CurrentProc==0);
+#endif
+
+ // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW
+
+ /* catch ridiculously small stack sizes */
+ if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) {
+ size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW;
+ }
+
+ stack_size = size - TSO_STRUCT_SIZEW;
+
+ tso = (StgTSO *)allocate(size);
+ TICK_ALLOC_TSO(stack_size, 0);
+
+ SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);
+#if defined(GRAN)
+ SET_GRAN_HDR(tso, ThisPE);
+#endif
+
+ // Always start with the compiled code evaluator
+ tso->what_next = ThreadRunGHC;
+
+ tso->id = next_thread_id++;
+ tso->why_blocked = NotBlocked;
+ tso->blocked_exceptions = NULL;
+
+ tso->saved_errno = 0;
+ tso->main = NULL;
+
+ tso->stack_size = stack_size;
+ tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize)
+ - TSO_STRUCT_SIZEW;
+ tso->sp = (P_)&(tso->stack) + stack_size;
+
+#ifdef PROFILING
+ tso->prof.CCCS = CCS_MAIN;
+#endif
+
+ /* put a stop frame on the stack */
+ tso->sp -= sizeofW(StgStopFrame);
+ SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
+ tso->link = END_TSO_QUEUE;
+
+ // ToDo: check this
+#if defined(GRAN)
+ /* uses more flexible routine in GranSim */
+ insertThread(tso, CurrentProc);
+#else
+ /* In a non-GranSim setup the pushing of a TSO onto the runq is separated
+ * from its creation
+ */
+#endif
+
+#if defined(GRAN)
+ if (RtsFlags.GranFlags.GranSimStats.Full)
+ DumpGranEvent(GR_START,tso);
+#elif defined(PAR)
+ if (RtsFlags.ParFlags.ParStats.Full)
+ DumpGranEvent(GR_STARTQ,tso);
+ /* HACk to avoid SCHEDULE
+ LastTSO = tso; */
+#endif
+
+ /* Link the new thread on the global thread list.
+ */
+ tso->global_link = all_threads;
+ all_threads = tso;
+
+#if defined(DIST)
+ tso->dist.priority = MandatoryPriority; //by default that is...
+#endif
+
+#if defined(GRAN)
+ tso->gran.pri = pri;
+# if defined(DEBUG)
+ tso->gran.magic = TSO_MAGIC; // debugging only
+# endif
+ tso->gran.sparkname = 0;
+ tso->gran.startedat = CURRENT_TIME;
+ tso->gran.exported = 0;
+ tso->gran.basicblocks = 0;
+ tso->gran.allocs = 0;
+ tso->gran.exectime = 0;
+ tso->gran.fetchtime = 0;
+ tso->gran.fetchcount = 0;
+ tso->gran.blocktime = 0;
+ tso->gran.blockcount = 0;
+ tso->gran.blockedat = 0;
+ tso->gran.globalsparks = 0;
+ tso->gran.localsparks = 0;
+ if (RtsFlags.GranFlags.Light)
+ tso->gran.clock = Now; /* local clock */
+ else
+ tso->gran.clock = 0;
+
+ IF_DEBUG(gran,printTSO(tso));
+#elif defined(PAR)
+# if defined(DEBUG)
+ tso->par.magic = TSO_MAGIC; // debugging only
+# endif
+ tso->par.sparkname = 0;
+ tso->par.startedat = CURRENT_TIME;
+ tso->par.exported = 0;
+ tso->par.basicblocks = 0;
+ tso->par.allocs = 0;
+ tso->par.exectime = 0;
+ tso->par.fetchtime = 0;
+ tso->par.fetchcount = 0;
+ tso->par.blocktime = 0;
+ tso->par.blockcount = 0;
+ tso->par.blockedat = 0;
+ tso->par.globalsparks = 0;
+ tso->par.localsparks = 0;
+#endif
+
+#if defined(GRAN)
+ globalGranStats.tot_threads_created++;
+ globalGranStats.threads_created_on_PE[CurrentProc]++;
+ globalGranStats.tot_sq_len += spark_queue_len(CurrentProc);
+ globalGranStats.tot_sq_probes++;
+#elif defined(PAR)
+ // collect parallel global statistics (currently done together with GC stats)
+ if (RtsFlags.ParFlags.ParStats.Global &&
+ RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+ //fprintf(stderr, "Creating thread %d @ %11.2f\n", tso->id, usertime());
+ globalParStats.tot_threads_created++;
+ }
+#endif
+
+#if defined(GRAN)
+ IF_GRAN_DEBUG(pri,
+ belch("==__ schedule: Created TSO %d (%p);",
+ CurrentProc, tso, tso->id));
+#elif defined(PAR)
+ IF_PAR_DEBUG(verbose,
+ belch("==__ schedule: Created TSO %d (%p); %d threads active",
+ tso->id, tso, advisory_thread_count));
+#else
+ IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words",
+ tso->id, tso->stack_size));
+#endif
+ return tso;
+}
+
+#if defined(PAR)
+/* RFP:
+ all parallel thread creation calls should fall through the following routine.
+*/
+StgTSO *
+createSparkThread(rtsSpark spark)
+{ StgTSO *tso;
+ ASSERT(spark != (rtsSpark)NULL);
+ if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads)
+ { threadsIgnored++;
+ barf("{createSparkThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
+ RtsFlags.ParFlags.maxThreads, advisory_thread_count);
+ return END_TSO_QUEUE;
+ }
+ else
+ { threadsCreated++;
+ tso = createThread(RtsFlags.GcFlags.initialStkSize);
+ if (tso==END_TSO_QUEUE)
+ barf("createSparkThread: Cannot create TSO");
+#if defined(DIST)
+ tso->priority = AdvisoryPriority;
+#endif
+ pushClosure(tso,spark);
+ PUSH_ON_RUN_QUEUE(tso);
+ advisory_thread_count++;
+ }
+ return tso;
+}
+#endif
+
+/*
+ Turn a spark into a thread.
+ ToDo: fix for SMP (needs to acquire SCHED_MUTEX!)
+*/
+#if defined(PAR)
+StgTSO *
+activateSpark (rtsSpark spark)
+{
+ StgTSO *tso;
+
+ tso = createSparkThread(spark);
+ if (RtsFlags.ParFlags.ParStats.Full) {
+ //ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ...
+ IF_PAR_DEBUG(verbose,
+ belch("==^^ activateSpark: turning spark of closure %p (%s) into a thread",
+ (StgClosure *)spark, info_type((StgClosure *)spark)));
+ }
+ // ToDo: fwd info on local/global spark to thread -- HWL
+ // tso->gran.exported = spark->exported;
+ // tso->gran.locked = !spark->global;
+ // tso->gran.sparkname = spark->name;
+
+ return tso;
+}
+#endif
+
+static SchedulerStatus waitThread_(/*out*/StgMainThread* m,
+ Capability *initialCapability
+ );
+
+
+/* ---------------------------------------------------------------------------
+ * scheduleThread()
+ *
+ * scheduleThread puts a thread on the head of the runnable queue.
+ * This will usually be done immediately after a thread is created.
+ * The caller of scheduleThread must create the thread using e.g.
+ * createThread and push an appropriate closure
+ * on this thread's stack before the scheduler is invoked.
+ * ------------------------------------------------------------------------ */
+
+static void scheduleThread_ (StgTSO* tso);
+
+void
+scheduleThread_(StgTSO *tso)
+{
+ // Precondition: sched_mutex must be held.
+ // The thread goes at the *end* of the run-queue, to avoid possible
+ // starvation of any threads already on the queue.
+ APPEND_TO_RUN_QUEUE(tso);
+ THREAD_RUNNABLE();
+}
+
+void
+scheduleThread(StgTSO* tso)
+{
+ ACQUIRE_LOCK(&sched_mutex);
+ scheduleThread_(tso);
+ RELEASE_LOCK(&sched_mutex);
+}
+
+#if defined(RTS_SUPPORTS_THREADS)
+static Condition bound_cond_cache;
+static int bound_cond_cache_full = 0;
+#endif
+
+
+SchedulerStatus
+scheduleWaitThread(StgTSO* tso, /*[out]*/HaskellObj* ret,
+ Capability *initialCapability)
+{
+ // Precondition: sched_mutex must be held
+ StgMainThread *m;
+
+ m = stgMallocBytes(sizeof(StgMainThread), "waitThread");
+ m->tso = tso;
+ tso->main = m;
+ m->ret = ret;
+ m->stat = NoStatus;
+ m->link = main_threads;
+ m->prev = NULL;
+ if (main_threads != NULL) {
+ main_threads->prev = m;
+ }
+ main_threads = m;
+
+#if defined(RTS_SUPPORTS_THREADS)
+ // Allocating a new condition for each thread is expensive, so we
+ // cache one. This is a pretty feeble hack, but it helps speed up
+ // consecutive call-ins quite a bit.
+ if (bound_cond_cache_full) {
+ m->bound_thread_cond = bound_cond_cache;
+ bound_cond_cache_full = 0;
+ } else {
+ initCondition(&m->bound_thread_cond);
+ }
+#endif
+
+ /* Put the thread on the main-threads list prior to scheduling the TSO.
+ Failure to do so introduces a race condition in the MT case (as
+ identified by Wolfgang Thaller), whereby the new task/OS thread
+ created by scheduleThread_() would complete prior to the thread
+ that spawned it managed to put 'itself' on the main-threads list.
+ The upshot of it all being that the worker thread wouldn't get to
+ signal the completion of the its work item for the main thread to
+ see (==> it got stuck waiting.) -- sof 6/02.
+ */
+ IF_DEBUG(scheduler, sched_belch("waiting for thread (%d)", tso->id));
+
+ APPEND_TO_RUN_QUEUE(tso);
+ // NB. Don't call THREAD_RUNNABLE() here, because the thread is
+ // bound and only runnable by *this* OS thread, so waking up other
+ // workers will just slow things down.
+
+ return waitThread_(m, initialCapability);
+}
+
+/* ---------------------------------------------------------------------------
+ * initScheduler()
+ *
+ * Initialise the scheduler. This resets all the queues - if the
+ * queues contained any threads, they'll be garbage collected at the
+ * next pass.
+ *
+ * ------------------------------------------------------------------------ */
+
+void
+initScheduler(void)
+{
+#if defined(GRAN)
+ nat i;
+
+ for (i=0; i<=MAX_PROC; i++) {
+ run_queue_hds[i] = END_TSO_QUEUE;
+ run_queue_tls[i] = END_TSO_QUEUE;
+ blocked_queue_hds[i] = END_TSO_QUEUE;
+ blocked_queue_tls[i] = END_TSO_QUEUE;
+ ccalling_threadss[i] = END_TSO_QUEUE;
+ sleeping_queue = END_TSO_QUEUE;
+ }
+#else
+ run_queue_hd = END_TSO_QUEUE;
+ run_queue_tl = END_TSO_QUEUE;
+ blocked_queue_hd = END_TSO_QUEUE;
+ blocked_queue_tl = END_TSO_QUEUE;
+ sleeping_queue = END_TSO_QUEUE;
+#endif
+
+ suspended_ccalling_threads = END_TSO_QUEUE;
+
+ main_threads = NULL;
+ all_threads = END_TSO_QUEUE;
+
+ context_switch = 0;
+ interrupted = 0;
+
+ RtsFlags.ConcFlags.ctxtSwitchTicks =
+ RtsFlags.ConcFlags.ctxtSwitchTime / TICK_MILLISECS;
+
+#if defined(RTS_SUPPORTS_THREADS)
+ /* Initialise the mutex and condition variables used by
+ * the scheduler. */
+ initMutex(&sched_mutex);
+ initMutex(&term_mutex);
+#endif
+
+ ACQUIRE_LOCK(&sched_mutex);
+
+ /* A capability holds the state a native thread needs in
+ * order to execute STG code. At least one capability is
+ * floating around (only SMP builds have more than one).
+ */
+ initCapabilities();
+
+#if defined(RTS_SUPPORTS_THREADS)
+ /* start our haskell execution tasks */
+ startTaskManager(0,taskStart);
+#endif
+
+#if /* defined(SMP) ||*/ defined(PAR)
+ initSparkPools();
+#endif
+
+ RELEASE_LOCK(&sched_mutex);
+}
+
+void
+exitScheduler( void )
+{
+#if defined(RTS_SUPPORTS_THREADS)
+ stopTaskManager();
+#endif
+ shutting_down_scheduler = rtsTrue;
+}
+
+/* ----------------------------------------------------------------------------
+ Managing the per-task allocation areas.
+
+ Each capability comes with an allocation area. These are
+ fixed-length block lists into which allocation can be done.
+
+ ToDo: no support for two-space collection at the moment???
+ ------------------------------------------------------------------------- */
+
+static
+SchedulerStatus
+waitThread_(StgMainThread* m, Capability *initialCapability)
+{
+ SchedulerStatus stat;
+
+ // Precondition: sched_mutex must be held.
+ IF_DEBUG(scheduler, sched_belch("new main thread (%d)", m->tso->id));
+
+#if defined(GRAN)
+ /* GranSim specific init */
+ CurrentTSO = m->tso; // the TSO to run
+ procStatus[MainProc] = Busy; // status of main PE
+ CurrentProc = MainProc; // PE to run it on
+ schedule(m,initialCapability);
+#else
+ schedule(m,initialCapability);
+ ASSERT(m->stat != NoStatus);
+#endif
+
+ stat = m->stat;
+
+#if defined(RTS_SUPPORTS_THREADS)
+ // Free the condition variable, returning it to the cache if possible.
+ if (!bound_cond_cache_full) {
+ bound_cond_cache = m->bound_thread_cond;
+ bound_cond_cache_full = 1;
+ } else {
+ closeCondition(&m->bound_thread_cond);
+ }
+#endif
+
+ IF_DEBUG(scheduler, sched_belch("main thread (%d) finished", m->tso->id));
+ stgFree(m);
+
+ // Postcondition: sched_mutex still held
+ return stat;
+}
+
+/* ---------------------------------------------------------------------------
+ Where are the roots that we know about?
+
+ - all the threads on the runnable queue
+ - all the threads on the blocked queue
+ - all the threads on the sleeping queue
+ - all the thread currently executing a _ccall_GC
+ - all the "main threads"
+
+ ------------------------------------------------------------------------ */
+
+/* This has to be protected either by the scheduler monitor, or by the
+ garbage collection monitor (probably the latter).
+ KH @ 25/10/99
+*/
+
+void
+GetRoots( evac_fn evac )
+{
+#if defined(GRAN)
+ {
+ nat i;
+ for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
+ if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
+ evac((StgClosure **)&run_queue_hds[i]);
+ if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
+ evac((StgClosure **)&run_queue_tls[i]);
+
+ if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
+ evac((StgClosure **)&blocked_queue_hds[i]);
+ if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
+ evac((StgClosure **)&blocked_queue_tls[i]);
+ if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
+ evac((StgClosure **)&ccalling_threads[i]);
+ }
+ }
+
+ markEventQueue();
+
+#else /* !GRAN */
+ if (run_queue_hd != END_TSO_QUEUE) {
+ ASSERT(run_queue_tl != END_TSO_QUEUE);
+ evac((StgClosure **)&run_queue_hd);
+ evac((StgClosure **)&run_queue_tl);
+ }
+
+ if (blocked_queue_hd != END_TSO_QUEUE) {
+ ASSERT(blocked_queue_tl != END_TSO_QUEUE);
+ evac((StgClosure **)&blocked_queue_hd);
+ evac((StgClosure **)&blocked_queue_tl);
+ }
+
+ if (sleeping_queue != END_TSO_QUEUE) {
+ evac((StgClosure **)&sleeping_queue);
+ }
+#endif
+
+ if (suspended_ccalling_threads != END_TSO_QUEUE) {
+ evac((StgClosure **)&suspended_ccalling_threads);
+ }
+
+#if defined(PAR) || defined(GRAN)
+ markSparkQueue(evac);
+#endif
+
+#if defined(RTS_USER_SIGNALS)
+ // mark the signal handlers (signals should be already blocked)
+ markSignalHandlers(evac);
+#endif
+}
+
+/* -----------------------------------------------------------------------------
+ performGC
+
+ This is the interface to the garbage collector from Haskell land.
+ We provide this so that external C code can allocate and garbage
+ collect when called from Haskell via _ccall_GC.
+
+ It might be useful to provide an interface whereby the programmer
+ can specify more roots (ToDo).
+
+ This needs to be protected by the GC condition variable above. KH.
+ -------------------------------------------------------------------------- */
+
+static void (*extra_roots)(evac_fn);
+
+void
+performGC(void)
+{
+ /* Obligated to hold this lock upon entry */
+ ACQUIRE_LOCK(&sched_mutex);
+ GarbageCollect(GetRoots,rtsFalse);
+ RELEASE_LOCK(&sched_mutex);
+}
+
+void
+performMajorGC(void)
+{
+ ACQUIRE_LOCK(&sched_mutex);
+ GarbageCollect(GetRoots,rtsTrue);
+ RELEASE_LOCK(&sched_mutex);
+}
+
+static void
+AllRoots(evac_fn evac)
+{
+ GetRoots(evac); // the scheduler's roots
+ extra_roots(evac); // the user's roots
+}
+
+void
+performGCWithRoots(void (*get_roots)(evac_fn))
+{
+ ACQUIRE_LOCK(&sched_mutex);
+ extra_roots = get_roots;
+ GarbageCollect(AllRoots,rtsFalse);
+ RELEASE_LOCK(&sched_mutex);
+}
+
+/* -----------------------------------------------------------------------------
+ Stack overflow
+
+ If the thread has reached its maximum stack size, then raise the
+ StackOverflow exception in the offending thread. Otherwise
+ relocate the TSO into a larger chunk of memory and adjust its stack
+ size appropriately.
+ -------------------------------------------------------------------------- */
+
+static StgTSO *
+threadStackOverflow(StgTSO *tso)
+{
+ nat new_stack_size, new_tso_size, stack_words;
+ StgPtr new_sp;
+ StgTSO *dest;
+
+ IF_DEBUG(sanity,checkTSO(tso));
+ if (tso->stack_size >= tso->max_stack_size) {
+
+ IF_DEBUG(gc,
+ belch("@@ threadStackOverflow of TSO %d (%p): stack too large (now %ld; max is %ld)",
+ tso->id, tso, tso->stack_size, tso->max_stack_size);
+ /* If we're debugging, just print out the top of the stack */
+ printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
+ tso->sp+64)));
+
+ /* Send this thread the StackOverflow exception */
+ raiseAsync(tso, (StgClosure *)stackOverflow_closure);
+ return tso;
+ }
+
+ /* Try to double the current stack size. If that takes us over the
+ * maximum stack size for this thread, then use the maximum instead.
+ * Finally round up so the TSO ends up as a whole number of blocks.
+ */
+ new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+ new_tso_size = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) +
+ TSO_STRUCT_SIZE)/sizeof(W_);
+ new_tso_size = round_to_mblocks(new_tso_size); /* Be MBLOCK-friendly */
+ new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
+
+ IF_DEBUG(scheduler, fprintf(stderr,"== sched: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
+
+ dest = (StgTSO *)allocate(new_tso_size);
+ TICK_ALLOC_TSO(new_stack_size,0);
+
+ /* copy the TSO block and the old stack into the new area */
+ memcpy(dest,tso,TSO_STRUCT_SIZE);
+ stack_words = tso->stack + tso->stack_size - tso->sp;
+ new_sp = (P_)dest + new_tso_size - stack_words;
+ memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
+
+ /* relocate the stack pointers... */
+ dest->sp = new_sp;
+ dest->stack_size = new_stack_size;
+
+ /* Mark the old TSO as relocated. We have to check for relocated
+ * TSOs in the garbage collector and any primops that deal with TSOs.
+ *
+ * It's important to set the sp value to just beyond the end
+ * of the stack, so we don't attempt to scavenge any part of the
+ * dead TSO's stack.
+ */
+ tso->what_next = ThreadRelocated;
+ tso->link = dest;
+ tso->sp = (P_)&(tso->stack[tso->stack_size]);
+ tso->why_blocked = NotBlocked;
+ dest->mut_link = NULL;
+
+ IF_PAR_DEBUG(verbose,
+ belch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld",
+ tso->id, tso, tso->stack_size);
+ /* If we're debugging, just print out the top of the stack */
+ printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size,
+ tso->sp+64)));
+
+ IF_DEBUG(sanity,checkTSO(tso));
+#if 0
+ IF_DEBUG(scheduler,printTSO(dest));
+#endif
+
+ return dest;
+}
+
+/* ---------------------------------------------------------------------------
+ Wake up a queue that was blocked on some resource.
+ ------------------------------------------------------------------------ */
+
+#if defined(GRAN)
+STATIC_INLINE void
+unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
+{
+}
+#elif defined(PAR)
+STATIC_INLINE void
+unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
+{
+ /* write RESUME events to log file and
+ update blocked and fetch time (depending on type of the orig closure) */
+ if (RtsFlags.ParFlags.ParStats.Full) {
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_RESUMEQ, ((StgTSO *)bqe), ((StgTSO *)bqe)->block_info.closure,
+ 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
+ if (EMPTY_RUN_QUEUE())
+ emitSchedule = rtsTrue;
+
+ switch (get_itbl(node)->type) {
+ case FETCH_ME_BQ:
+ ((StgTSO *)bqe)->par.fetchtime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
+ break;
+ case RBH:
+ case FETCH_ME:
+ case BLACKHOLE_BQ:
+ ((StgTSO *)bqe)->par.blocktime += CURRENT_TIME-((StgTSO *)bqe)->par.blockedat;
+ break;
+#ifdef DIST
+ case MVAR:
+ break;
+#endif
+ default:
+ barf("{unblockOneLocked}Daq Qagh: unexpected closure in blocking queue");
+ }
+ }
+}
+#endif
+
+#if defined(GRAN)
+static StgBlockingQueueElement *
+unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
+{
+ StgTSO *tso;
+ PEs node_loc, tso_loc;
+
+ node_loc = where_is(node); // should be lifted out of loop
+ tso = (StgTSO *)bqe; // wastes an assignment to get the type right
+ tso_loc = where_is((StgClosure *)tso);
+ if (IS_LOCAL_TO(PROCS(node),tso_loc)) { // TSO is local
+ /* !fake_fetch => TSO is on CurrentProc is same as IS_LOCAL_TO */
+ ASSERT(CurrentProc!=node_loc || tso_loc==CurrentProc);
+ CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.lunblocktime;
+ // insertThread(tso, node_loc);
+ new_event(tso_loc, tso_loc, CurrentTime[CurrentProc],
+ ResumeThread,
+ tso, node, (rtsSpark*)NULL);
+ tso->link = END_TSO_QUEUE; // overwrite link just to be sure
+ // len_local++;
+ // len++;
+ } else { // TSO is remote (actually should be FMBQ)
+ CurrentTime[CurrentProc] += RtsFlags.GranFlags.Costs.mpacktime +
+ RtsFlags.GranFlags.Costs.gunblocktime +
+ RtsFlags.GranFlags.Costs.latency;
+ new_event(tso_loc, CurrentProc, CurrentTime[CurrentProc],
+ UnblockThread,
+ tso, node, (rtsSpark*)NULL);
+ tso->link = END_TSO_QUEUE; // overwrite link just to be sure
+ // len++;
+ }
+ /* the thread-queue-overhead is accounted for in either Resume or UnblockThread */
+ IF_GRAN_DEBUG(bq,
+ fprintf(stderr," %s TSO %d (%p) [PE %d] (block_info.closure=%p) (next=%p) ,",
+ (node_loc==tso_loc ? "Local" : "Global"),
+ tso->id, tso, CurrentProc, tso->block_info.closure, tso->link));
+ tso->block_info.closure = NULL;
+ IF_DEBUG(scheduler,belch("-- Waking up thread %ld (%p)",
+ tso->id, tso));
+}
+#elif defined(PAR)
+static StgBlockingQueueElement *
+unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
+{
+ StgBlockingQueueElement *next;
+
+ switch (get_itbl(bqe)->type) {
+ case TSO:
+ ASSERT(((StgTSO *)bqe)->why_blocked != NotBlocked);
+ /* if it's a TSO just push it onto the run_queue */
+ next = bqe->link;
+ ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
+ APPEND_TO_RUN_QUEUE((StgTSO *)bqe);
+ THREAD_RUNNABLE();
+ unblockCount(bqe, node);
+ /* reset blocking status after dumping event */
+ ((StgTSO *)bqe)->why_blocked = NotBlocked;
+ break;
+
+ case BLOCKED_FETCH:
+ /* if it's a BLOCKED_FETCH put it on the PendingFetches list */
+ next = bqe->link;
+ bqe->link = (StgBlockingQueueElement *)PendingFetches;
+ PendingFetches = (StgBlockedFetch *)bqe;
+ break;
+
+# if defined(DEBUG)
+ /* can ignore this case in a non-debugging setup;
+ see comments on RBHSave closures above */
+ case CONSTR:
+ /* check that the closure is an RBHSave closure */
+ ASSERT(get_itbl((StgClosure *)bqe) == &stg_RBH_Save_0_info ||
+ get_itbl((StgClosure *)bqe) == &stg_RBH_Save_1_info ||
+ get_itbl((StgClosure *)bqe) == &stg_RBH_Save_2_info);
+ break;
+
+ default:
+ barf("{unblockOneLocked}Daq Qagh: Unexpected IP (%#lx; %s) in blocking queue at %#lx\n",
+ get_itbl((StgClosure *)bqe), info_type((StgClosure *)bqe),
+ (StgClosure *)bqe);
+# endif
+ }
+ IF_PAR_DEBUG(bq, fprintf(stderr, ", %p (%s)", bqe, info_type((StgClosure*)bqe)));
+ return next;
+}
+
+#else /* !GRAN && !PAR */
+static StgTSO *
+unblockOneLocked(StgTSO *tso)
+{
+ StgTSO *next;
+
+ ASSERT(get_itbl(tso)->type == TSO);
+ ASSERT(tso->why_blocked != NotBlocked);
+ tso->why_blocked = NotBlocked;
+ next = tso->link;
+ tso->link = END_TSO_QUEUE;
+ APPEND_TO_RUN_QUEUE(tso);
+ THREAD_RUNNABLE();
+ IF_DEBUG(scheduler,sched_belch("waking up thread %ld", tso->id));
+ return next;
+}
+#endif
+
+#if defined(GRAN) || defined(PAR)
+INLINE_ME StgBlockingQueueElement *
+unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
+{
+ ACQUIRE_LOCK(&sched_mutex);
+ bqe = unblockOneLocked(bqe, node);
+ RELEASE_LOCK(&sched_mutex);
+ return bqe;
+}
+#else
+INLINE_ME StgTSO *
+unblockOne(StgTSO *tso)
+{
+ ACQUIRE_LOCK(&sched_mutex);
+ tso = unblockOneLocked(tso);
+ RELEASE_LOCK(&sched_mutex);
+ return tso;
+}
+#endif
+
+#if defined(GRAN)
+void
+awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
+{
+ StgBlockingQueueElement *bqe;
+ PEs node_loc;
+ nat len = 0;
+
+ IF_GRAN_DEBUG(bq,
+ belch("##-_ AwBQ for node %p on PE %d @ %ld by TSO %d (%p): ", \
+ node, CurrentProc, CurrentTime[CurrentProc],
+ CurrentTSO->id, CurrentTSO));
+
+ node_loc = where_is(node);
+
+ ASSERT(q == END_BQ_QUEUE ||
+ get_itbl(q)->type == TSO || // q is either a TSO or an RBHSave
+ get_itbl(q)->type == CONSTR); // closure (type constructor)
+ ASSERT(is_unique(node));
+
+ /* FAKE FETCH: magically copy the node to the tso's proc;
+ no Fetch necessary because in reality the node should not have been
+ moved to the other PE in the first place
+ */
+ if (CurrentProc!=node_loc) {
+ IF_GRAN_DEBUG(bq,
+ belch("## node %p is on PE %d but CurrentProc is %d (TSO %d); assuming fake fetch and adjusting bitmask (old: %#x)",
+ node, node_loc, CurrentProc, CurrentTSO->id,
+ // CurrentTSO, where_is(CurrentTSO),
+ node->header.gran.procs));
+ node->header.gran.procs = (node->header.gran.procs) | PE_NUMBER(CurrentProc);
+ IF_GRAN_DEBUG(bq,
+ belch("## new bitmask of node %p is %#x",
+ node, node->header.gran.procs));
+ if (RtsFlags.GranFlags.GranSimStats.Global) {
+ globalGranStats.tot_fake_fetches++;
+ }
+ }
+
+ bqe = q;
+ // ToDo: check: ASSERT(CurrentProc==node_loc);
+ while (get_itbl(bqe)->type==TSO) { // q != END_TSO_QUEUE) {
+ //next = bqe->link;
+ /*
+ bqe points to the current element in the queue
+ next points to the next element in the queue
+ */
+ //tso = (StgTSO *)bqe; // wastes an assignment to get the type right
+ //tso_loc = where_is(tso);
+ len++;
+ bqe = unblockOneLocked(bqe, node);
+ }
+
+ /* if this is the BQ of an RBH, we have to put back the info ripped out of
+ the closure to make room for the anchor of the BQ */
+ if (bqe!=END_BQ_QUEUE) {
+ ASSERT(get_itbl(node)->type == RBH && get_itbl(bqe)->type == CONSTR);
+ /*
+ ASSERT((info_ptr==&RBH_Save_0_info) ||
+ (info_ptr==&RBH_Save_1_info) ||
+ (info_ptr==&RBH_Save_2_info));
+ */
+ /* cf. convertToRBH in RBH.c for writing the RBHSave closure */
+ ((StgRBH *)node)->blocking_queue = (StgBlockingQueueElement *)((StgRBHSave *)bqe)->payload[0];
+ ((StgRBH *)node)->mut_link = (StgMutClosure *)((StgRBHSave *)bqe)->payload[1];
+
+ IF_GRAN_DEBUG(bq,
+ belch("## Filled in RBH_Save for %p (%s) at end of AwBQ",
+ node, info_type(node)));
+ }
+
+ /* statistics gathering */
+ if (RtsFlags.GranFlags.GranSimStats.Global) {
+ // globalGranStats.tot_bq_processing_time += bq_processing_time;
+ globalGranStats.tot_bq_len += len; // total length of all bqs awakened
+ // globalGranStats.tot_bq_len_local += len_local; // same for local TSOs only
+ globalGranStats.tot_awbq++; // total no. of bqs awakened
+ }
+ IF_GRAN_DEBUG(bq,
+ fprintf(stderr,"## BQ Stats of %p: [%d entries] %s\n",
+ node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : ""));
+}
+#elif defined(PAR)
+void
+awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
+{
+ StgBlockingQueueElement *bqe;
+
+ ACQUIRE_LOCK(&sched_mutex);
+
+ IF_PAR_DEBUG(verbose,
+ belch("##-_ AwBQ for node %p on [%x]: ",
+ node, mytid));
+#ifdef DIST
+ //RFP
+ if(get_itbl(q)->type == CONSTR || q==END_BQ_QUEUE) {
+ IF_PAR_DEBUG(verbose, belch("## ... nothing to unblock so lets just return. RFP (BUG?)"));
+ return;
+ }
+#endif
+
+ ASSERT(q == END_BQ_QUEUE ||
+ get_itbl(q)->type == TSO ||
+ get_itbl(q)->type == BLOCKED_FETCH ||
+ get_itbl(q)->type == CONSTR);
+
+ bqe = q;
+ while (get_itbl(bqe)->type==TSO ||
+ get_itbl(bqe)->type==BLOCKED_FETCH) {
+ bqe = unblockOneLocked(bqe, node);
+ }
+ RELEASE_LOCK(&sched_mutex);
+}
+
+#else /* !GRAN && !PAR */
+
+void
+awakenBlockedQueueNoLock(StgTSO *tso)
+{
+ while (tso != END_TSO_QUEUE) {
+ tso = unblockOneLocked(tso);
+ }
+}
+
+void
+awakenBlockedQueue(StgTSO *tso)
+{
+ ACQUIRE_LOCK(&sched_mutex);
+ while (tso != END_TSO_QUEUE) {
+ tso = unblockOneLocked(tso);
+ }
+ RELEASE_LOCK(&sched_mutex);
+}
+#endif
+
+/* ---------------------------------------------------------------------------
+ Interrupt execution
+ - usually called inside a signal handler so it mustn't do anything fancy.
+ ------------------------------------------------------------------------ */
+
+void
+interruptStgRts(void)
+{
+ interrupted = 1;
+ context_switch = 1;
+#ifdef RTS_SUPPORTS_THREADS
+ wakeBlockedWorkerThread();
+#endif
+}