[project @ 2006-01-18 10:06:36 by simonmar]
[ghc-hetmet.git] / ghc / rts / Capability.c
index 7aacc24..5ca2f51 100644 (file)
  * Only in an SMP build will there be multiple capabilities, for
  * the threaded RTS and other non-threaded builds, there is only
  * one global capability, namely MainCapability.
- * 
+ *
  * --------------------------------------------------------------------------*/
 
 #include "PosixSource.h"
 #include "Rts.h"
 #include "RtsUtils.h"
 #include "RtsFlags.h"
+#include "STM.h"
 #include "OSThreads.h"
 #include "Capability.h"
 #include "Schedule.h"
+#include "Sparks.h"
 
 #if !defined(SMP)
 Capability MainCapability;     // for non-SMP, we have one global capability
@@ -74,6 +76,8 @@ anyWorkForMe( Capability *cap, Task *task )
        } else {
            return (cap->run_queue_hd->bound == task);
        }
+    } else if (task->tso == NULL && !emptySparkPoolCap(cap)) {
+       return rtsTrue;
     }
     return globalWorkToDo();
 }
@@ -81,7 +85,7 @@ anyWorkForMe( Capability *cap, Task *task )
 
 /* -----------------------------------------------------------------------------
  * Manage the returning_tasks lists.
- * 
+ *
  * These functions require cap->lock
  * -------------------------------------------------------------------------- */
 
@@ -125,6 +129,8 @@ popReturningTask (Capability *cap)
 static void
 initCapability( Capability *cap, nat i )
 {
+    nat g;
+
     cap->no = i;
     cap->in_haskell        = rtsFalse;
 
@@ -143,9 +149,18 @@ initCapability( Capability *cap, nat i )
     cap->f.stgGCEnter1     = (F_)__stg_gc_enter_1;
     cap->f.stgGCFun        = (F_)__stg_gc_fun;
 
-    cap->mut_lists  = stgMallocBytes(sizeof(bdescr *) * 
+    cap->mut_lists  = stgMallocBytes(sizeof(bdescr *) *
                                     RtsFlags.GcFlags.generations,
                                     "initCapability");
+
+    for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+       cap->mut_lists[g] = NULL;
+    }
+
+    cap->free_tvar_wait_queues = END_STM_WAIT_QUEUE;
+    cap->free_trec_chunks = END_STM_CHUNK_LIST;
+    cap->free_trec_headers = NO_TREC;
+    cap->transaction_tokens = 0;
 }
 
 /* ---------------------------------------------------------------------------
@@ -168,7 +183,7 @@ initCapabilities( void )
     for (i = 0; i < n; i++) {
        initCapability(&capabilities[i], i);
     }
-    
+
     IF_DEBUG(scheduler, sched_belch("allocated %d capabilities", n));
 #else
     n_capabilities = 1;
@@ -202,10 +217,9 @@ giveCapabilityToTask (Capability *cap, Task *task)
 {
     ASSERT_LOCK_HELD(&cap->lock);
     ASSERT(task->cap == cap);
-    // We are not modifying task->cap, so we do not need to take task->lock.
-    IF_DEBUG(scheduler, 
+    IF_DEBUG(scheduler,
             sched_belch("passing capability %d to %s %p",
-                        cap->no, task->tso ? "bound task" : "worker", 
+                        cap->no, task->tso ? "bound task" : "worker",
                         (void *)task->id));
     ACQUIRE_LOCK(&task->lock);
     task->wakeup = rtsTrue;
@@ -233,7 +247,7 @@ releaseCapability_ (Capability* cap)
 
     task = cap->running_task;
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
 
     cap->running_task = NULL;
 
@@ -243,7 +257,7 @@ releaseCapability_ (Capability* cap)
        giveCapabilityToTask(cap,cap->returning_tasks_hd);
        // The Task pops itself from the queue (see waitForReturnCapability())
        return;
-    } 
+    }
 
     // If the next thread on the run queue is a bound thread,
     // give this Capability to the appropriate Task.
@@ -253,23 +267,23 @@ releaseCapability_ (Capability* cap)
        task = cap->run_queue_hd->bound;
        giveCapabilityToTask(cap,task);
        return;
-    } 
+    }
 
     // If we have an unbound thread on the run queue, or if there's
     // anything else to do, give the Capability to a worker thread.
-    if (!emptyRunQueue(cap) || globalWorkToDo()) {
+    if (!emptyRunQueue(cap) || !emptySparkPoolCap(cap) || globalWorkToDo()) {
        if (cap->spare_workers) {
            giveCapabilityToTask(cap,cap->spare_workers);
            // The worker Task pops itself from the queue;
            return;
-       } 
+       }
 
        // Create a worker thread if we don't have one.  If the system
        // is interrupted, we only create a worker task if there
        // are threads that need to be completed.  If the system is
        // shutting down, we never create a new worker.
        if (!shutting_down_scheduler) {
-           IF_DEBUG(scheduler, 
+           IF_DEBUG(scheduler,
                     sched_belch("starting new worker on capability %d", cap->no));
            startWorkerTask(cap, workerStart);
            return;
@@ -327,7 +341,7 @@ releaseCapabilityAndQueueWorker (Capability* cap UNUSED_IF_NOT_THREADS)
  *
  * ------------------------------------------------------------------------- */
 void
-waitForReturnCapability (Capability **pCap, 
+waitForReturnCapability (Capability **pCap,
                         Task *task UNUSED_IF_NOT_THREADS)
 {
 #if !defined(THREADED_RTS)
@@ -364,7 +378,7 @@ waitForReturnCapability (Capability **pCap,
 
     ACQUIRE_LOCK(&cap->lock);
 
-    IF_DEBUG(scheduler, 
+    IF_DEBUG(scheduler,
             sched_belch("returning; I want capability %d", cap->no));
 
     if (!cap->running_task) {
@@ -401,9 +415,9 @@ waitForReturnCapability (Capability **pCap,
 
     }
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
-    IF_DEBUG(scheduler, 
+    IF_DEBUG(scheduler,
             sched_belch("returning; got capability %d", cap->no));
 
     *pCap = cap;
@@ -420,15 +434,14 @@ yieldCapability (Capability** pCap, Task *task)
 {
     Capability *cap = *pCap;
 
-    // The fast path; no locking
-    if ( cap->returning_tasks_hd == NULL && anyWorkForMe(cap,task) )
-       return;
+    // The fast path has no locking, if we don't enter this while loop
 
     while ( cap->returning_tasks_hd != NULL || !anyWorkForMe(cap,task) ) {
        IF_DEBUG(scheduler, sched_belch("giving up capability %d", cap->no));
 
        // We must now release the capability and wait to be woken up
-       // again.  
+       // again.
+       task->wakeup = rtsFalse;
        releaseCapabilityAndQueueWorker(cap);
 
        for (;;) {
@@ -442,6 +455,7 @@ yieldCapability (Capability** pCap, Task *task)
            IF_DEBUG(scheduler, sched_belch("woken up on capability %d", cap->no));
            ACQUIRE_LOCK(&cap->lock);
            if (cap->running_task != NULL) {
+               IF_DEBUG(scheduler, sched_belch("capability %d is owned by another task", cap->no));
                RELEASE_LOCK(&cap->lock);
                continue;
            }
@@ -469,7 +483,7 @@ yieldCapability (Capability** pCap, Task *task)
 
     *pCap = cap;
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     return;
 }
@@ -488,7 +502,7 @@ prodCapabilities(rtsBool all)
     nat i;
     Capability *cap;
     Task *task;
-    
+
     for (i=0; i < n_capabilities; i++) {
        cap = &capabilities[i];
        ACQUIRE_LOCK(&cap->lock);
@@ -525,7 +539,7 @@ void
 prodOneCapability (void)
 {
     prodCapabilities(rtsFalse);
-}          
+}
 
 /* ----------------------------------------------------------------------------
  * shutdownCapability
@@ -539,7 +553,7 @@ prodOneCapability (void)
  * will exit the scheduler and call taskStop(), and any bound thread
  * that wakes up will return to its caller.  Runnable threads are
  * killed.
- * 
+ *
  * ------------------------------------------------------------------------- */
 
 void
@@ -580,7 +594,7 @@ shutdownCapability (Capability *cap, Task *task)
  * tryGrabCapability
  *
  * Attempt to gain control of a Capability if it is free.
- * 
+ *
  * ------------------------------------------------------------------------- */
 
 rtsBool