[project @ 2005-11-25 14:03:00 by simonmar]
authorsimonmar <unknown>
Fri, 25 Nov 2005 14:03:00 +0000 (14:03 +0000)
committersimonmar <unknown>
Fri, 25 Nov 2005 14:03:00 +0000 (14:03 +0000)
Modify some assertions that were occasionally incorrect

ghc/rts/Capability.c
ghc/rts/Capability.h
ghc/rts/RtsAPI.c
ghc/rts/Schedule.c

index 2379c32..5ca2f51 100644 (file)
@@ -217,7 +217,6 @@ giveCapabilityToTask (Capability *cap, Task *task)
 {
     ASSERT_LOCK_HELD(&cap->lock);
     ASSERT(task->cap == cap);
-    // We are not modifying task->cap, so we do not need to take task->lock.
     IF_DEBUG(scheduler,
             sched_belch("passing capability %d to %s %p",
                         cap->no, task->tso ? "bound task" : "worker",
@@ -248,7 +247,7 @@ releaseCapability_ (Capability* cap)
 
     task = cap->running_task;
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
 
     cap->running_task = NULL;
 
@@ -416,7 +415,7 @@ waitForReturnCapability (Capability **pCap,
 
     }
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     IF_DEBUG(scheduler,
             sched_belch("returning; got capability %d", cap->no));
@@ -435,15 +434,14 @@ yieldCapability (Capability** pCap, Task *task)
 {
     Capability *cap = *pCap;
 
-    // The fast path; no locking
-    if ( cap->returning_tasks_hd == NULL && anyWorkForMe(cap,task) )
-       return;
+    // The fast path has no locking, if we don't enter this while loop
 
     while ( cap->returning_tasks_hd != NULL || !anyWorkForMe(cap,task) ) {
        IF_DEBUG(scheduler, sched_belch("giving up capability %d", cap->no));
 
        // We must now release the capability and wait to be woken up
        // again.
+       task->wakeup = rtsFalse;
        releaseCapabilityAndQueueWorker(cap);
 
        for (;;) {
@@ -457,6 +455,7 @@ yieldCapability (Capability** pCap, Task *task)
            IF_DEBUG(scheduler, sched_belch("woken up on capability %d", cap->no));
            ACQUIRE_LOCK(&cap->lock);
            if (cap->running_task != NULL) {
+               IF_DEBUG(scheduler, sched_belch("capability %d is owned by another task", cap->no));
                RELEASE_LOCK(&cap->lock);
                continue;
            }
@@ -484,7 +483,7 @@ yieldCapability (Capability** pCap, Task *task)
 
     *pCap = cap;
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     return;
 }
index 2a04e41..dcab351 100644 (file)
@@ -97,12 +97,20 @@ struct Capability_ {
 #endif
 
 // These properties should be true when a Task is holding a Capability
-#define ASSERT_CAPABILITY_INVARIANTS(cap,task)                         \
+#define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task)                    \
   ASSERT(cap->running_task != NULL && cap->running_task == task);      \
   ASSERT(task->cap == cap);                                            \
-  ASSERT(cap->run_queue_hd == END_TSO_QUEUE ?                          \
-           cap->run_queue_tl == END_TSO_QUEUE : 1);                    \
-  ASSERT(myTask() == task);                                            \
+  ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)
+
+// Sometimes a Task holds a Capability, but the Task is not associated
+// with that Capability (ie. task->cap != cap).  This happens when
+// (a) a Task holds multiple Capabilities, and (b) when the current
+// Task is bound, its thread has just blocked, and it may have been
+// moved to another Capability.
+#define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task) \
+  ASSERT(cap->run_queue_hd == END_TSO_QUEUE ?          \
+           cap->run_queue_tl == END_TSO_QUEUE : 1);    \
+  ASSERT(myTask() == task);                            \
   ASSERT_TASK_ID(task);
 
 // Converts a *StgRegTable into a *Capability.
index dec0db1..b1b1d9c 100644 (file)
@@ -578,7 +578,7 @@ rts_unlock (Capability *cap)
     Task *task;
 
     task = cap->running_task;
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     // slightly delicate ordering of operations below, pay attention!
 
index 84df020..8fe711a 100644 (file)
@@ -387,7 +387,7 @@ schedule (Capability *initialCapability, Task *task)
          // thread for a bit, even if there are others banging at the
          // door.
          first = rtsFalse;
-         ASSERT_CAPABILITY_INVARIANTS(cap,task);
+         ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
       } else {
          // Yield the capability to higher-priority tasks if necessary.
          yieldCapability(&cap, task);
@@ -639,7 +639,7 @@ run_thread:
     }
 #endif
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     // ----------------------------------------------------------------------
     
@@ -681,7 +681,7 @@ run_thread:
 
     case ThreadFinished:
        if (scheduleHandleThreadFinished(cap, task, t)) return cap;
-       ASSERT_CAPABILITY_INVARIANTS(cap,task);
+       ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
        break;
 
     default:
@@ -795,6 +795,7 @@ schedulePushWork(Capability *cap USED_WHEN_SMP,
                    prev->link = t;
                    prev = t;
                } else {
+                   IF_DEBUG(scheduler, sched_belch("pushing thread %d to capability %d", t->id, free_caps[i]->no));
                    appendToRunQueue(free_caps[i],t);
                    if (t->bound) { t->bound->cap = free_caps[i]; }
                    i++;
@@ -2599,7 +2600,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
     cap = schedule(cap,task);
 
     ASSERT(task->stat != NoStatus);
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     IF_DEBUG(scheduler, sched_belch("bound thread (%d) finished", task->tso->id));
     return cap;