Improve error messsage when argument count varies
[ghc-hetmet.git] / ghc / rts / Capability.c
index ad3339c..f3bbefe 100644 (file)
@@ -1,6 +1,6 @@
 /* ---------------------------------------------------------------------------
  *
- * (c) The GHC Team, 2003-2005
+ * (c) The GHC Team, 2003-2006
  *
  * Capabilities
  *
 #include "Rts.h"
 #include "RtsUtils.h"
 #include "RtsFlags.h"
+#include "STM.h"
 #include "OSThreads.h"
 #include "Capability.h"
 #include "Schedule.h"
+#include "Sparks.h"
 
 #if !defined(SMP)
 Capability MainCapability;     // for non-SMP, we have one global capability
@@ -37,29 +39,15 @@ Capability *capabilities = NULL;
 // locking, so we don't do that.
 Capability *last_free_capability;
 
-#ifdef SMP
-#define UNUSED_IF_NOT_SMP
-#else
-#define UNUSED_IF_NOT_SMP STG_UNUSED
-#endif
-
-#ifdef RTS_USER_SIGNALS
-#define UNUSED_IF_NOT_THREADS
-#else
-#define UNUSED_IF_NOT_THREADS STG_UNUSED
-#endif
-
-
+#if defined(THREADED_RTS)
 STATIC_INLINE rtsBool
 globalWorkToDo (void)
 {
     return blackholes_need_checking
        || interrupted
-#if defined(RTS_USER_SIGNALS)
-       || signals_pending()
-#endif
        ;
 }
+#endif
 
 #if defined(THREADED_RTS)
 STATIC_INLINE rtsBool
@@ -74,6 +62,8 @@ anyWorkForMe( Capability *cap, Task *task )
        } else {
            return (cap->run_queue_hd->bound == task);
        }
+    } else if (task->tso == NULL && !emptySparkPoolCap(cap)) {
+       return rtsTrue;
     }
     return globalWorkToDo();
 }
@@ -125,7 +115,7 @@ popReturningTask (Capability *cap)
 static void
 initCapability( Capability *cap, nat i )
 {
-       nat g;
+    nat g;
 
     cap->no = i;
     cap->in_haskell        = rtsFalse;
@@ -148,9 +138,15 @@ initCapability( Capability *cap, nat i )
     cap->mut_lists  = stgMallocBytes(sizeof(bdescr *) *
                                     RtsFlags.GcFlags.generations,
                                     "initCapability");
-       for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
-             cap->mut_lists[g] = NULL;
+
+    for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+       cap->mut_lists[g] = NULL;
     }
+
+    cap->free_tvar_wait_queues = END_STM_WAIT_QUEUE;
+    cap->free_trec_chunks = END_STM_CHUNK_LIST;
+    cap->free_trec_headers = NO_TREC;
+    cap->transaction_tokens = 0;
 }
 
 /* ---------------------------------------------------------------------------
@@ -203,11 +199,10 @@ initCapabilities( void )
 
 #if defined(THREADED_RTS)
 STATIC_INLINE void
-giveCapabilityToTask (Capability *cap, Task *task)
+giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
 {
     ASSERT_LOCK_HELD(&cap->lock);
     ASSERT(task->cap == cap);
-    // We are not modifying task->cap, so we do not need to take task->lock.
     IF_DEBUG(scheduler,
             sched_belch("passing capability %d to %s %p",
                         cap->no, task->tso ? "bound task" : "worker",
@@ -238,7 +233,7 @@ releaseCapability_ (Capability* cap)
 
     task = cap->running_task;
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
 
     cap->running_task = NULL;
 
@@ -262,7 +257,7 @@ releaseCapability_ (Capability* cap)
 
     // If we have an unbound thread on the run queue, or if there's
     // anything else to do, give the Capability to a worker thread.
-    if (!emptyRunQueue(cap) || globalWorkToDo()) {
+    if (!emptyRunQueue(cap) || !emptySparkPoolCap(cap) || globalWorkToDo()) {
        if (cap->spare_workers) {
            giveCapabilityToTask(cap,cap->spare_workers);
            // The worker Task pops itself from the queue;
@@ -286,7 +281,7 @@ releaseCapability_ (Capability* cap)
 }
 
 void
-releaseCapability (Capability* cap UNUSED_IF_NOT_THREADS)
+releaseCapability (Capability* cap USED_IF_THREADS)
 {
     ACQUIRE_LOCK(&cap->lock);
     releaseCapability_(cap);
@@ -294,7 +289,7 @@ releaseCapability (Capability* cap UNUSED_IF_NOT_THREADS)
 }
 
 static void
-releaseCapabilityAndQueueWorker (Capability* cap UNUSED_IF_NOT_THREADS)
+releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
 {
     Task *task;
 
@@ -332,8 +327,7 @@ releaseCapabilityAndQueueWorker (Capability* cap UNUSED_IF_NOT_THREADS)
  *
  * ------------------------------------------------------------------------- */
 void
-waitForReturnCapability (Capability **pCap,
-                        Task *task UNUSED_IF_NOT_THREADS)
+waitForReturnCapability (Capability **pCap, Task *task)
 {
 #if !defined(THREADED_RTS)
 
@@ -406,7 +400,7 @@ waitForReturnCapability (Capability **pCap,
 
     }
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     IF_DEBUG(scheduler,
             sched_belch("returning; got capability %d", cap->no));
@@ -425,15 +419,14 @@ yieldCapability (Capability** pCap, Task *task)
 {
     Capability *cap = *pCap;
 
-    // The fast path; no locking
-    if ( cap->returning_tasks_hd == NULL && anyWorkForMe(cap,task) )
-       return;
+    // The fast path has no locking, if we don't enter this while loop
 
     while ( cap->returning_tasks_hd != NULL || !anyWorkForMe(cap,task) ) {
        IF_DEBUG(scheduler, sched_belch("giving up capability %d", cap->no));
 
        // We must now release the capability and wait to be woken up
        // again.
+       task->wakeup = rtsFalse;
        releaseCapabilityAndQueueWorker(cap);
 
        for (;;) {
@@ -447,6 +440,7 @@ yieldCapability (Capability** pCap, Task *task)
            IF_DEBUG(scheduler, sched_belch("woken up on capability %d", cap->no));
            ACQUIRE_LOCK(&cap->lock);
            if (cap->running_task != NULL) {
+               IF_DEBUG(scheduler, sched_belch("capability %d is owned by another task", cap->no));
                RELEASE_LOCK(&cap->lock);
                continue;
            }
@@ -474,7 +468,7 @@ yieldCapability (Capability** pCap, Task *task)
 
     *pCap = cap;
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     return;
 }