Add ASSERTs to all calls of nameModule
[ghc-hetmet.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2  *
3  * (c) The GHC Team, 2003-2006
4  *
5  * Capabilities
6  *
7  * A Capability represent the token required to execute STG code,
8  * and all the state an OS thread/task needs to run Haskell code:
9  * its STG registers, a pointer to its TSO, a nursery etc. During
10  * STG execution, a pointer to the capabilitity is kept in a
11  * register (BaseReg; actually it is a pointer to cap->r).
12  *
13  * Only in an THREADED_RTS build will there be multiple capabilities,
14  * for non-threaded builds there is only one global capability, namely
15  * MainCapability.
16  *
17  * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21 #include "RtsUtils.h"
22 #include "RtsFlags.h"
23 #include "STM.h"
24 #include "OSThreads.h"
25 #include "Capability.h"
26 #include "Schedule.h"
27 #include "Sparks.h"
28 #include "Trace.h"
29
30 // one global capability, this is the Capability for non-threaded
31 // builds, and for +RTS -N1
32 Capability MainCapability;
33
34 nat n_capabilities;
35 Capability *capabilities = NULL;
36
37 // Holds the Capability which last became free.  This is used so that
38 // an in-call has a chance of quickly finding a free Capability.
39 // Maintaining a global free list of Capabilities would require global
40 // locking, so we don't do that.
41 Capability *last_free_capability;
42
43 /* GC indicator, in scope for the scheduler, init'ed to false */
44 volatile StgWord waiting_for_gc = 0;
45
46 #if defined(THREADED_RTS)
47 STATIC_INLINE rtsBool
48 globalWorkToDo (void)
49 {
50     return blackholes_need_checking
51         || sched_state >= SCHED_INTERRUPTING
52         ;
53 }
54 #endif
55
56 #if defined(THREADED_RTS)
57 STATIC_INLINE rtsBool
58 anyWorkForMe( Capability *cap, Task *task )
59 {
60     if (task->tso != NULL) {
61         // A bound task only runs if its thread is on the run queue of
62         // the capability on which it was woken up.  Otherwise, we
63         // can't be sure that we have the right capability: the thread
64         // might be woken up on some other capability, and task->cap
65         // could change under our feet.
66         return !emptyRunQueue(cap) && cap->run_queue_hd->bound == task;
67     } else {
68         // A vanilla worker task runs if either there is a lightweight
69         // thread at the head of the run queue, or the run queue is
70         // empty and (there are sparks to execute, or there is some
71         // other global condition to check, such as threads blocked on
72         // blackholes).
73         if (emptyRunQueue(cap)) {
74             return !emptySparkPoolCap(cap)
75                 || !emptyWakeupQueue(cap)
76                 || globalWorkToDo();
77         } else
78             return cap->run_queue_hd->bound == NULL;
79     }
80 }
81 #endif
82
83 /* -----------------------------------------------------------------------------
84  * Manage the returning_tasks lists.
85  *
86  * These functions require cap->lock
87  * -------------------------------------------------------------------------- */
88
89 #if defined(THREADED_RTS)
90 STATIC_INLINE void
91 newReturningTask (Capability *cap, Task *task)
92 {
93     ASSERT_LOCK_HELD(&cap->lock);
94     ASSERT(task->return_link == NULL);
95     if (cap->returning_tasks_hd) {
96         ASSERT(cap->returning_tasks_tl->return_link == NULL);
97         cap->returning_tasks_tl->return_link = task;
98     } else {
99         cap->returning_tasks_hd = task;
100     }
101     cap->returning_tasks_tl = task;
102 }
103
104 STATIC_INLINE Task *
105 popReturningTask (Capability *cap)
106 {
107     ASSERT_LOCK_HELD(&cap->lock);
108     Task *task;
109     task = cap->returning_tasks_hd;
110     ASSERT(task);
111     cap->returning_tasks_hd = task->return_link;
112     if (!cap->returning_tasks_hd) {
113         cap->returning_tasks_tl = NULL;
114     }
115     task->return_link = NULL;
116     return task;
117 }
118 #endif
119
120 /* ----------------------------------------------------------------------------
121  * Initialisation
122  *
123  * The Capability is initially marked not free.
124  * ------------------------------------------------------------------------- */
125
126 static void
127 initCapability( Capability *cap, nat i )
128 {
129     nat g;
130
131     cap->no = i;
132     cap->in_haskell        = rtsFalse;
133
134     cap->run_queue_hd      = END_TSO_QUEUE;
135     cap->run_queue_tl      = END_TSO_QUEUE;
136
137 #if defined(THREADED_RTS)
138     initMutex(&cap->lock);
139     cap->running_task      = NULL; // indicates cap is free
140     cap->spare_workers     = NULL;
141     cap->suspended_ccalling_tasks = NULL;
142     cap->returning_tasks_hd = NULL;
143     cap->returning_tasks_tl = NULL;
144     cap->wakeup_queue_hd    = END_TSO_QUEUE;
145     cap->wakeup_queue_tl    = END_TSO_QUEUE;
146 #endif
147
148     cap->f.stgGCEnter1     = (F_)__stg_gc_enter_1;
149     cap->f.stgGCFun        = (F_)__stg_gc_fun;
150
151     cap->mut_lists  = stgMallocBytes(sizeof(bdescr *) *
152                                      RtsFlags.GcFlags.generations,
153                                      "initCapability");
154
155     for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
156         cap->mut_lists[g] = NULL;
157     }
158
159     cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
160     cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
161     cap->free_trec_chunks = END_STM_CHUNK_LIST;
162     cap->free_trec_headers = NO_TREC;
163     cap->transaction_tokens = 0;
164     cap->context_switch = 0;
165 }
166
167 /* ---------------------------------------------------------------------------
168  * Function:  initCapabilities()
169  *
170  * Purpose:   set up the Capability handling. For the THREADED_RTS build,
171  *            we keep a table of them, the size of which is
172  *            controlled by the user via the RTS flag -N.
173  *
174  * ------------------------------------------------------------------------- */
175 void
176 initCapabilities( void )
177 {
178 #if defined(THREADED_RTS)
179     nat i;
180
181 #ifndef REG_Base
182     // We can't support multiple CPUs if BaseReg is not a register
183     if (RtsFlags.ParFlags.nNodes > 1) {
184         errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
185         RtsFlags.ParFlags.nNodes = 1;
186     }
187 #endif
188
189     n_capabilities = RtsFlags.ParFlags.nNodes;
190
191     if (n_capabilities == 1) {
192         capabilities = &MainCapability;
193         // THREADED_RTS must work on builds that don't have a mutable
194         // BaseReg (eg. unregisterised), so in this case
195         // capabilities[0] must coincide with &MainCapability.
196     } else {
197         capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
198                                       "initCapabilities");
199     }
200
201     for (i = 0; i < n_capabilities; i++) {
202         initCapability(&capabilities[i], i);
203     }
204
205     debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
206
207 #else /* !THREADED_RTS */
208
209     n_capabilities = 1;
210     capabilities = &MainCapability;
211     initCapability(&MainCapability, 0);
212
213 #endif
214
215     // There are no free capabilities to begin with.  We will start
216     // a worker Task to each Capability, which will quickly put the
217     // Capability on the free list when it finds nothing to do.
218     last_free_capability = &capabilities[0];
219 }
220
221 /* ----------------------------------------------------------------------------
222  * setContextSwitches: cause all capabilities to context switch as
223  * soon as possible.
224  * ------------------------------------------------------------------------- */
225
226 void setContextSwitches(void)
227 {
228   nat i;
229   for (i=0; i < n_capabilities; i++) {
230     capabilities[i].context_switch = 1;
231   }
232 }
233
234 /* ----------------------------------------------------------------------------
235  * Give a Capability to a Task.  The task must currently be sleeping
236  * on its condition variable.
237  *
238  * Requires cap->lock (modifies cap->running_task).
239  *
240  * When migrating a Task, the migrater must take task->lock before
241  * modifying task->cap, to synchronise with the waking up Task.
242  * Additionally, the migrater should own the Capability (when
243  * migrating the run queue), or cap->lock (when migrating
244  * returning_workers).
245  *
246  * ------------------------------------------------------------------------- */
247
248 #if defined(THREADED_RTS)
249 STATIC_INLINE void
250 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
251 {
252     ASSERT_LOCK_HELD(&cap->lock);
253     ASSERT(task->cap == cap);
254     trace(TRACE_sched | DEBUG_sched,
255           "passing capability %d to %s %p",
256           cap->no, task->tso ? "bound task" : "worker",
257           (void *)task->id);
258     ACQUIRE_LOCK(&task->lock);
259     task->wakeup = rtsTrue;
260     // the wakeup flag is needed because signalCondition() doesn't
261     // flag the condition if the thread is already runniing, but we want
262     // it to be sticky.
263     signalCondition(&task->cond);
264     RELEASE_LOCK(&task->lock);
265 }
266 #endif
267
268 /* ----------------------------------------------------------------------------
269  * Function:  releaseCapability(Capability*)
270  *
271  * Purpose:   Letting go of a capability. Causes a
272  *            'returning worker' thread or a 'waiting worker'
273  *            to wake up, in that order.
274  * ------------------------------------------------------------------------- */
275
276 #if defined(THREADED_RTS)
277 void
278 releaseCapability_ (Capability* cap)
279 {
280     Task *task;
281
282     task = cap->running_task;
283
284     ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
285
286     cap->running_task = NULL;
287
288     // Check to see whether a worker thread can be given
289     // the go-ahead to return the result of an external call..
290     if (cap->returning_tasks_hd != NULL) {
291         giveCapabilityToTask(cap,cap->returning_tasks_hd);
292         // The Task pops itself from the queue (see waitForReturnCapability())
293         return;
294     }
295
296     /* if waiting_for_gc was the reason to release the cap: thread
297        comes from yieldCap->releaseAndQueueWorker. Unconditionally set
298        cap. free and return (see default after the if-protected other
299        special cases). Thread will wait on cond.var and re-acquire the
300        same cap after GC (GC-triggering cap. calls releaseCap and
301        enters the spare_workers case)
302     */
303     if (waiting_for_gc) {
304       last_free_capability = cap; // needed?
305       trace(TRACE_sched | DEBUG_sched, 
306             "GC pending, set capability %d free", cap->no);
307       return;
308     } 
309
310
311     // If the next thread on the run queue is a bound thread,
312     // give this Capability to the appropriate Task.
313     if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
314         // Make sure we're not about to try to wake ourselves up
315         ASSERT(task != cap->run_queue_hd->bound);
316         task = cap->run_queue_hd->bound;
317         giveCapabilityToTask(cap,task);
318         return;
319     }
320
321     if (!cap->spare_workers) {
322         // Create a worker thread if we don't have one.  If the system
323         // is interrupted, we only create a worker task if there
324         // are threads that need to be completed.  If the system is
325         // shutting down, we never create a new worker.
326         if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
327             debugTrace(DEBUG_sched,
328                        "starting new worker on capability %d", cap->no);
329             startWorkerTask(cap, workerStart);
330             return;
331         }
332     }
333
334     // If we have an unbound thread on the run queue, or if there's
335     // anything else to do, give the Capability to a worker thread.
336     if (!emptyRunQueue(cap) || !emptyWakeupQueue(cap)
337               || !emptySparkPoolCap(cap) || globalWorkToDo()) {
338         if (cap->spare_workers) {
339             giveCapabilityToTask(cap,cap->spare_workers);
340             // The worker Task pops itself from the queue;
341             return;
342         }
343     }
344
345     last_free_capability = cap;
346     trace(TRACE_sched | DEBUG_sched, "freeing capability %d", cap->no);
347 }
348
349 void
350 releaseCapability (Capability* cap USED_IF_THREADS)
351 {
352     ACQUIRE_LOCK(&cap->lock);
353     releaseCapability_(cap);
354     RELEASE_LOCK(&cap->lock);
355 }
356
357 static void
358 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
359 {
360     Task *task;
361
362     ACQUIRE_LOCK(&cap->lock);
363
364     task = cap->running_task;
365
366     // If the current task is a worker, save it on the spare_workers
367     // list of this Capability.  A worker can mark itself as stopped,
368     // in which case it is not replaced on the spare_worker queue.
369     // This happens when the system is shutting down (see
370     // Schedule.c:workerStart()).
371     // Also, be careful to check that this task hasn't just exited
372     // Haskell to do a foreign call (task->suspended_tso).
373     if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
374         task->next = cap->spare_workers;
375         cap->spare_workers = task;
376     }
377     // Bound tasks just float around attached to their TSOs.
378
379     releaseCapability_(cap);
380
381     RELEASE_LOCK(&cap->lock);
382 }
383 #endif
384
385 /* ----------------------------------------------------------------------------
386  * waitForReturnCapability( Task *task )
387  *
388  * Purpose:  when an OS thread returns from an external call,
389  * it calls waitForReturnCapability() (via Schedule.resumeThread())
390  * to wait for permission to enter the RTS & communicate the
391  * result of the external call back to the Haskell thread that
392  * made it.
393  *
394  * ------------------------------------------------------------------------- */
395 void
396 waitForReturnCapability (Capability **pCap, Task *task)
397 {
398 #if !defined(THREADED_RTS)
399
400     MainCapability.running_task = task;
401     task->cap = &MainCapability;
402     *pCap = &MainCapability;
403
404 #else
405     Capability *cap = *pCap;
406
407     if (cap == NULL) {
408         // Try last_free_capability first
409         cap = last_free_capability;
410         if (!cap->running_task) {
411             nat i;
412             // otherwise, search for a free capability
413             for (i = 0; i < n_capabilities; i++) {
414                 cap = &capabilities[i];
415                 if (!cap->running_task) {
416                     break;
417                 }
418             }
419             // Can't find a free one, use last_free_capability.
420             cap = last_free_capability;
421         }
422
423         // record the Capability as the one this Task is now assocated with.
424         task->cap = cap;
425
426     } else {
427         ASSERT(task->cap == cap);
428     }
429
430     ACQUIRE_LOCK(&cap->lock);
431
432     debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
433
434     if (!cap->running_task) {
435         // It's free; just grab it
436         cap->running_task = task;
437         RELEASE_LOCK(&cap->lock);
438     } else {
439         newReturningTask(cap,task);
440         RELEASE_LOCK(&cap->lock);
441
442         for (;;) {
443             ACQUIRE_LOCK(&task->lock);
444             // task->lock held, cap->lock not held
445             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
446             cap = task->cap;
447             task->wakeup = rtsFalse;
448             RELEASE_LOCK(&task->lock);
449
450             // now check whether we should wake up...
451             ACQUIRE_LOCK(&cap->lock);
452             if (cap->running_task == NULL) {
453                 if (cap->returning_tasks_hd != task) {
454                     giveCapabilityToTask(cap,cap->returning_tasks_hd);
455                     RELEASE_LOCK(&cap->lock);
456                     continue;
457                 }
458                 cap->running_task = task;
459                 popReturningTask(cap);
460                 RELEASE_LOCK(&cap->lock);
461                 break;
462             }
463             RELEASE_LOCK(&cap->lock);
464         }
465
466     }
467
468     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
469
470     trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
471
472     *pCap = cap;
473 #endif
474 }
475
476 #if defined(THREADED_RTS)
477 /* ----------------------------------------------------------------------------
478  * yieldCapability
479  * ------------------------------------------------------------------------- */
480
481 void
482 yieldCapability (Capability** pCap, Task *task)
483 {
484     Capability *cap = *pCap;
485
486     // The fast path has no locking, if we don't enter this while loop
487
488     while ( waiting_for_gc
489             /* i.e. another capability triggered HeapOverflow, is busy
490                getting capabilities (stopping their owning tasks) */
491             || cap->returning_tasks_hd != NULL 
492                 /* cap reserved for another task */
493             || !anyWorkForMe(cap,task) 
494                 /* cap/task have no work */
495             ) {
496         debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
497
498         // We must now release the capability and wait to be woken up
499         // again.
500         task->wakeup = rtsFalse;
501         releaseCapabilityAndQueueWorker(cap);
502
503         for (;;) {
504             ACQUIRE_LOCK(&task->lock);
505             // task->lock held, cap->lock not held
506             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
507             cap = task->cap;
508             task->wakeup = rtsFalse;
509             RELEASE_LOCK(&task->lock);
510
511             debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
512
513             ACQUIRE_LOCK(&cap->lock);
514             if (cap->running_task != NULL) {
515                 debugTrace(DEBUG_sched, 
516                            "capability %d is owned by another task", cap->no);
517                 RELEASE_LOCK(&cap->lock);
518                 continue;
519             }
520
521             if (task->tso == NULL) {
522                 ASSERT(cap->spare_workers != NULL);
523                 // if we're not at the front of the queue, release it
524                 // again.  This is unlikely to happen.
525                 if (cap->spare_workers != task) {
526                     giveCapabilityToTask(cap,cap->spare_workers);
527                     RELEASE_LOCK(&cap->lock);
528                     continue;
529                 }
530                 cap->spare_workers = task->next;
531                 task->next = NULL;
532             }
533             cap->running_task = task;
534             RELEASE_LOCK(&cap->lock);
535             break;
536         }
537
538         trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
539         ASSERT(cap->running_task == task);
540     }
541
542     *pCap = cap;
543
544     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
545
546     return;
547 }
548
549 /* ----------------------------------------------------------------------------
550  * Wake up a thread on a Capability.
551  *
552  * This is used when the current Task is running on a Capability and
553  * wishes to wake up a thread on a different Capability.
554  * ------------------------------------------------------------------------- */
555
556 void
557 wakeupThreadOnCapability (Capability *my_cap, 
558                           Capability *other_cap, 
559                           StgTSO *tso)
560 {
561     ACQUIRE_LOCK(&other_cap->lock);
562
563     // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
564     if (tso->bound) {
565         ASSERT(tso->bound->cap == tso->cap);
566         tso->bound->cap = other_cap;
567     }
568     tso->cap = other_cap;
569
570     ASSERT(tso->bound ? tso->bound->cap == other_cap : 1);
571
572     if (other_cap->running_task == NULL) {
573         // nobody is running this Capability, we can add our thread
574         // directly onto the run queue and start up a Task to run it.
575
576         other_cap->running_task = myTask(); 
577             // precond for releaseCapability_() and appendToRunQueue()
578
579         appendToRunQueue(other_cap,tso);
580
581         trace(TRACE_sched, "resuming capability %d", other_cap->no);
582         releaseCapability_(other_cap);
583     } else {
584         appendToWakeupQueue(my_cap,other_cap,tso);
585         other_cap->context_switch = 1;
586         // someone is running on this Capability, so it cannot be
587         // freed without first checking the wakeup queue (see
588         // releaseCapability_).
589     }
590
591     RELEASE_LOCK(&other_cap->lock);
592 }
593
594 /* ----------------------------------------------------------------------------
595  * prodCapabilities
596  *
597  * Used to indicate that the interrupted flag is now set, or some
598  * other global condition that might require waking up a Task on each
599  * Capability.
600  * ------------------------------------------------------------------------- */
601
602 static void
603 prodCapabilities(rtsBool all)
604 {
605     nat i;
606     Capability *cap;
607     Task *task;
608
609     for (i=0; i < n_capabilities; i++) {
610         cap = &capabilities[i];
611         ACQUIRE_LOCK(&cap->lock);
612         if (!cap->running_task) {
613             if (cap->spare_workers) {
614                 trace(TRACE_sched, "resuming capability %d", cap->no);
615                 task = cap->spare_workers;
616                 ASSERT(!task->stopped);
617                 giveCapabilityToTask(cap,task);
618                 if (!all) {
619                     RELEASE_LOCK(&cap->lock);
620                     return;
621                 }
622             }
623         }
624         RELEASE_LOCK(&cap->lock);
625     }
626     return;
627 }
628
629 void
630 prodAllCapabilities (void)
631 {
632     prodCapabilities(rtsTrue);
633 }
634
635 /* ----------------------------------------------------------------------------
636  * prodOneCapability
637  *
638  * Like prodAllCapabilities, but we only require a single Task to wake
639  * up in order to service some global event, such as checking for
640  * deadlock after some idle time has passed.
641  * ------------------------------------------------------------------------- */
642
643 void
644 prodOneCapability (void)
645 {
646     prodCapabilities(rtsFalse);
647 }
648
649 /* ----------------------------------------------------------------------------
650  * shutdownCapability
651  *
652  * At shutdown time, we want to let everything exit as cleanly as
653  * possible.  For each capability, we let its run queue drain, and
654  * allow the workers to stop.
655  *
656  * This function should be called when interrupted and
657  * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
658  * will exit the scheduler and call taskStop(), and any bound thread
659  * that wakes up will return to its caller.  Runnable threads are
660  * killed.
661  *
662  * ------------------------------------------------------------------------- */
663
664 void
665 shutdownCapability (Capability *cap, Task *task, rtsBool safe)
666 {
667     nat i;
668
669     ASSERT(sched_state == SCHED_SHUTTING_DOWN);
670
671     task->cap = cap;
672
673     // Loop indefinitely until all the workers have exited and there
674     // are no Haskell threads left.  We used to bail out after 50
675     // iterations of this loop, but that occasionally left a worker
676     // running which caused problems later (the closeMutex() below
677     // isn't safe, for one thing).
678
679     for (i = 0; /* i < 50 */; i++) {
680         debugTrace(DEBUG_sched, 
681                    "shutting down capability %d, attempt %d", cap->no, i);
682         ACQUIRE_LOCK(&cap->lock);
683         if (cap->running_task) {
684             RELEASE_LOCK(&cap->lock);
685             debugTrace(DEBUG_sched, "not owner, yielding");
686             yieldThread();
687             continue;
688         }
689         cap->running_task = task;
690
691         if (cap->spare_workers) {
692             // Look for workers that have died without removing
693             // themselves from the list; this could happen if the OS
694             // summarily killed the thread, for example.  This
695             // actually happens on Windows when the system is
696             // terminating the program, and the RTS is running in a
697             // DLL.
698             Task *t, *prev;
699             prev = NULL;
700             for (t = cap->spare_workers; t != NULL; t = t->next) {
701                 if (!osThreadIsAlive(t->id)) {
702                     debugTrace(DEBUG_sched, 
703                                "worker thread %p has died unexpectedly", (void *)t->id);
704                         if (!prev) {
705                             cap->spare_workers = t->next;
706                         } else {
707                             prev->next = t->next;
708                         }
709                         prev = t;
710                 }
711             }
712         }
713
714         if (!emptyRunQueue(cap) || cap->spare_workers) {
715             debugTrace(DEBUG_sched, 
716                        "runnable threads or workers still alive, yielding");
717             releaseCapability_(cap); // this will wake up a worker
718             RELEASE_LOCK(&cap->lock);
719             yieldThread();
720             continue;
721         }
722
723         // If "safe", then busy-wait for any threads currently doing
724         // foreign calls.  If we're about to unload this DLL, for
725         // example, we need to be sure that there are no OS threads
726         // that will try to return to code that has been unloaded.
727         // We can be a bit more relaxed when this is a standalone
728         // program that is about to terminate, and let safe=false.
729         if (cap->suspended_ccalling_tasks && safe) {
730             debugTrace(DEBUG_sched, 
731                        "thread(s) are involved in foreign calls, yielding");
732             cap->running_task = NULL;
733             RELEASE_LOCK(&cap->lock);
734             yieldThread();
735             continue;
736         }
737             
738         debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
739         freeCapability(cap);
740         RELEASE_LOCK(&cap->lock);
741         break;
742     }
743     // we now have the Capability, its run queue and spare workers
744     // list are both empty.
745
746     // ToDo: we can't drop this mutex, because there might still be
747     // threads performing foreign calls that will eventually try to 
748     // return via resumeThread() and attempt to grab cap->lock.
749     // closeMutex(&cap->lock);
750 }
751
752 /* ----------------------------------------------------------------------------
753  * tryGrabCapability
754  *
755  * Attempt to gain control of a Capability if it is free.
756  *
757  * ------------------------------------------------------------------------- */
758
759 rtsBool
760 tryGrabCapability (Capability *cap, Task *task)
761 {
762     if (cap->running_task != NULL) return rtsFalse;
763     ACQUIRE_LOCK(&cap->lock);
764     if (cap->running_task != NULL) {
765         RELEASE_LOCK(&cap->lock);
766         return rtsFalse;
767     }
768     task->cap = cap;
769     cap->running_task = task;
770     RELEASE_LOCK(&cap->lock);
771     return rtsTrue;
772 }
773
774
775 #endif /* THREADED_RTS */
776
777 void
778 freeCapability (Capability *cap) {
779     stgFree(cap->mut_lists);
780 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
781     freeSparkPool(&cap->r.rSparks);
782 #endif
783 }
784
785 /* ---------------------------------------------------------------------------
786    Mark everything directly reachable from the Capabilities.  When
787    using multiple GC threads, each GC thread marks all Capabilities
788    for which (c `mod` n == 0), for Capability c and thread n.
789    ------------------------------------------------------------------------ */
790
791 void
792 markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta)
793 {
794     nat i;
795     Capability *cap;
796     Task *task;
797
798     // Each GC thread is responsible for following roots from the
799     // Capability of the same number.  There will usually be the same
800     // or fewer Capabilities as GC threads, but just in case there
801     // are more, we mark every Capability whose number is the GC
802     // thread's index plus a multiple of the number of GC threads.
803     for (i = i0; i < n_capabilities; i += delta) {
804         cap = &capabilities[i];
805         evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
806         evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
807 #if defined(THREADED_RTS)
808         evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd);
809         evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl);
810 #endif
811         for (task = cap->suspended_ccalling_tasks; task != NULL; 
812              task=task->next) {
813             debugTrace(DEBUG_sched,
814                        "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
815             evac(user, (StgClosure **)(void *)&task->suspended_tso);
816         }
817
818 #if defined(THREADED_RTS)
819         traverseSparkQueue (evac, user, cap);
820 #endif
821     }
822
823 #if !defined(THREADED_RTS)
824     evac(user, (StgClosure **)(void *)&blocked_queue_hd);
825     evac(user, (StgClosure **)(void *)&blocked_queue_tl);
826     evac(user, (StgClosure **)(void *)&sleeping_queue);
827 #endif 
828 }
829
830 // This function is used by the compacting GC to thread all the
831 // pointers from spark queues.
832 void
833 traverseSparkQueues (evac_fn evac USED_IF_THREADS, void *user USED_IF_THREADS)
834 {
835 #if defined(THREADED_RTS)
836     nat i;
837     for (i = 0; i < n_capabilities; i++) {
838         traverseSparkQueue (evac, user, &capabilities[i]);
839     }
840 #endif // THREADED_RTS
841
842 }
843
844 void
845 markCapabilities (evac_fn evac, void *user)
846 {
847     markSomeCapabilities(evac, user, 0, 1);
848 }