+ SchedulerStatus rc = cap->running_task->stat;
+ switch (rc) {
+ case Success:
+ return;
+ case Killed:
+ errorBelch("%s: uncaught exception",site);
+ stg_exit(EXIT_FAILURE);
+ case Interrupted:
+ errorBelch("%s: interrupted", site);
+ stg_exit(EXIT_FAILURE);
+ default:
+ errorBelch("%s: Return code (%d) not ok",(site),(rc));
+ stg_exit(EXIT_FAILURE);
+ }
+}
+
+SchedulerStatus
+rts_getSchedStatus (Capability *cap)
+{
+ return cap->running_task->stat;
+}
+
+Capability *
+rts_lock (void)
+{
+ Capability *cap;
+ Task *task;
+
+ // ToDo: get rid of this lock in the common case. We could store
+ // a free Task in thread-local storage, for example. That would
+ // leave just one lock on the path into the RTS: cap->lock when
+ // acquiring the Capability.
+ ACQUIRE_LOCK(&sched_mutex);
+ task = newBoundTask();
+ RELEASE_LOCK(&sched_mutex);
+
+ cap = NULL;
+ waitForReturnCapability(&cap, task);
+ return (Capability *)cap;
+}
+
+// Exiting the RTS: we hold a Capability that is not necessarily the
+// same one that was originally returned by rts_lock(), because
+// rts_evalIO() etc. may return a new one. Now that we have
+// investigated the return value, we can release the Capability,
+// and free the Task (in that order).
+
+void
+rts_unlock (Capability *cap)
+{
+ Task *task;
+
+ task = cap->running_task;
+ ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
+
+ // slightly delicate ordering of operations below, pay attention!
+
+ // We are no longer a bound task/thread. This is important,
+ // because the GC can run when we release the Capability below,
+ // and we don't want it to treat this as a live TSO pointer.
+ task->tso = NULL;
+
+ // Now release the Capability. With the capability released, GC
+ // may happen. NB. does not try to put the current Task on the
+ // worker queue.
+ releaseCapability(cap);
+
+ // Finally, we can release the Task to the free list.
+ boundTaskExiting(task);