rts_stop_on_exception is a C int, not a W_
[ghc-hetmet.git] / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2  *
3  * (c) The GHC Team, 2003-2006
4  *
5  * Capabilities
6  *
7  * A Capability represent the token required to execute STG code,
8  * and all the state an OS thread/task needs to run Haskell code:
9  * its STG registers, a pointer to its TSO, a nursery etc. During
10  * STG execution, a pointer to the capabilitity is kept in a
11  * register (BaseReg; actually it is a pointer to cap->r).
12  *
13  * Only in an THREADED_RTS build will there be multiple capabilities,
14  * for non-threaded builds there is only one global capability, namely
15  * MainCapability.
16  *
17  * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21 #include "RtsUtils.h"
22 #include "RtsFlags.h"
23 #include "STM.h"
24 #include "OSThreads.h"
25 #include "Capability.h"
26 #include "Schedule.h"
27 #include "Sparks.h"
28 #include "Trace.h"
29 #include "GC.h"
30
31 // one global capability, this is the Capability for non-threaded
32 // builds, and for +RTS -N1
33 Capability MainCapability;
34
35 nat n_capabilities;
36 Capability *capabilities = NULL;
37
38 // Holds the Capability which last became free.  This is used so that
39 // an in-call has a chance of quickly finding a free Capability.
40 // Maintaining a global free list of Capabilities would require global
41 // locking, so we don't do that.
42 Capability *last_free_capability;
43
44 /* GC indicator, in scope for the scheduler, init'ed to false */
45 volatile StgWord waiting_for_gc = 0;
46
47 /* Let foreign code get the current Capability -- assuming there is one!
48  * This is useful for unsafe foreign calls because they are called with
49  * the current Capability held, but they are not passed it. For example,
50  * see see the integer-gmp package which calls allocateLocal() in its
51  * stgAllocForGMP() function (which gets called by gmp functions).
52  * */
53 Capability * rts_unsafeGetMyCapability (void)
54 {
55 #if defined(THREADED_RTS)
56   return myTask()->cap;
57 #else
58   return &MainCapability;
59 #endif
60 }
61
62 #if defined(THREADED_RTS)
63 STATIC_INLINE rtsBool
64 globalWorkToDo (void)
65 {
66     return blackholes_need_checking
67         || sched_state >= SCHED_INTERRUPTING
68         ;
69 }
70 #endif
71
72 #if defined(THREADED_RTS)
73 StgClosure *
74 findSpark (Capability *cap)
75 {
76   Capability *robbed;
77   StgClosurePtr spark;
78   rtsBool retry;
79   nat i = 0;
80
81   if (!emptyRunQueue(cap)) {
82       // If there are other threads, don't try to run any new
83       // sparks: sparks might be speculative, we don't want to take
84       // resources away from the main computation.
85       return 0;
86   }
87
88   // first try to get a spark from our own pool.
89   // We should be using reclaimSpark(), because it works without
90   // needing any atomic instructions:
91   //   spark = reclaimSpark(cap->sparks);
92   // However, measurements show that this makes at least one benchmark
93   // slower (prsa) and doesn't affect the others.
94   spark = tryStealSpark(cap);
95   if (spark != NULL) {
96       cap->sparks_converted++;
97
98       // Post event for running a spark from capability's own pool.
99       postEvent(cap, EVENT_RUN_SPARK, cap->r.rCurrentTSO->id, 0);
100
101       return spark;
102   }
103
104   if (n_capabilities == 1) { return NULL; } // makes no sense...
105
106   debugTrace(DEBUG_sched,
107              "cap %d: Trying to steal work from other capabilities", 
108              cap->no);
109
110   do {
111       retry = rtsFalse;
112
113       /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
114       start at a random place instead of 0 as well.  */
115       for ( i=0 ; i < n_capabilities ; i++ ) {
116           robbed = &capabilities[i];
117           if (cap == robbed)  // ourselves...
118               continue;
119
120           if (emptySparkPoolCap(robbed)) // nothing to steal here
121               continue;
122
123           spark = tryStealSpark(robbed);
124           if (spark == NULL && !emptySparkPoolCap(robbed)) {
125               // we conflicted with another thread while trying to steal;
126               // try again later.
127               retry = rtsTrue;
128           }
129
130           if (spark != NULL) {
131               debugTrace(DEBUG_sched,
132                  "cap %d: Stole a spark from capability %d",
133                          cap->no, robbed->no);
134               cap->sparks_converted++;
135
136               postEvent(cap, EVENT_STEAL_SPARK, 
137                         cap->r.rCurrentTSO->id, robbed->no);
138                         
139               
140               return spark;
141           }
142           // otherwise: no success, try next one
143       }
144   } while (retry);
145
146   debugTrace(DEBUG_sched, "No sparks stolen");
147   return NULL;
148 }
149
150 // Returns True if any spark pool is non-empty at this moment in time
151 // The result is only valid for an instant, of course, so in a sense
152 // is immediately invalid, and should not be relied upon for
153 // correctness.
154 rtsBool
155 anySparks (void)
156 {
157     nat i;
158
159     for (i=0; i < n_capabilities; i++) {
160         if (!emptySparkPoolCap(&capabilities[i])) {
161             return rtsTrue;
162         }
163     }
164     return rtsFalse;
165 }
166 #endif
167
168 /* -----------------------------------------------------------------------------
169  * Manage the returning_tasks lists.
170  *
171  * These functions require cap->lock
172  * -------------------------------------------------------------------------- */
173
174 #if defined(THREADED_RTS)
175 STATIC_INLINE void
176 newReturningTask (Capability *cap, Task *task)
177 {
178     ASSERT_LOCK_HELD(&cap->lock);
179     ASSERT(task->return_link == NULL);
180     if (cap->returning_tasks_hd) {
181         ASSERT(cap->returning_tasks_tl->return_link == NULL);
182         cap->returning_tasks_tl->return_link = task;
183     } else {
184         cap->returning_tasks_hd = task;
185     }
186     cap->returning_tasks_tl = task;
187 }
188
189 STATIC_INLINE Task *
190 popReturningTask (Capability *cap)
191 {
192     ASSERT_LOCK_HELD(&cap->lock);
193     Task *task;
194     task = cap->returning_tasks_hd;
195     ASSERT(task);
196     cap->returning_tasks_hd = task->return_link;
197     if (!cap->returning_tasks_hd) {
198         cap->returning_tasks_tl = NULL;
199     }
200     task->return_link = NULL;
201     return task;
202 }
203 #endif
204
205 /* ----------------------------------------------------------------------------
206  * Initialisation
207  *
208  * The Capability is initially marked not free.
209  * ------------------------------------------------------------------------- */
210
211 static void
212 initCapability( Capability *cap, nat i )
213 {
214     nat g;
215
216     cap->no = i;
217     cap->in_haskell        = rtsFalse;
218     cap->in_gc             = rtsFalse;
219
220     cap->run_queue_hd      = END_TSO_QUEUE;
221     cap->run_queue_tl      = END_TSO_QUEUE;
222
223 #if defined(THREADED_RTS)
224     initMutex(&cap->lock);
225     cap->running_task      = NULL; // indicates cap is free
226     cap->spare_workers     = NULL;
227     cap->suspended_ccalling_tasks = NULL;
228     cap->returning_tasks_hd = NULL;
229     cap->returning_tasks_tl = NULL;
230     cap->wakeup_queue_hd    = END_TSO_QUEUE;
231     cap->wakeup_queue_tl    = END_TSO_QUEUE;
232     cap->sparks_created     = 0;
233     cap->sparks_converted   = 0;
234     cap->sparks_pruned      = 0;
235 #endif
236
237     cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
238     cap->f.stgGCEnter1     = (F_)__stg_gc_enter_1;
239     cap->f.stgGCFun        = (F_)__stg_gc_fun;
240
241     cap->mut_lists  = stgMallocBytes(sizeof(bdescr *) *
242                                      RtsFlags.GcFlags.generations,
243                                      "initCapability");
244     cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
245                                           RtsFlags.GcFlags.generations,
246                                           "initCapability");
247
248     for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
249         cap->mut_lists[g] = NULL;
250     }
251
252     cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
253     cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
254     cap->free_trec_chunks = END_STM_CHUNK_LIST;
255     cap->free_trec_headers = NO_TREC;
256     cap->transaction_tokens = 0;
257     cap->context_switch = 0;
258 }
259
260 /* ---------------------------------------------------------------------------
261  * Function:  initCapabilities()
262  *
263  * Purpose:   set up the Capability handling. For the THREADED_RTS build,
264  *            we keep a table of them, the size of which is
265  *            controlled by the user via the RTS flag -N.
266  *
267  * ------------------------------------------------------------------------- */
268 void
269 initCapabilities( void )
270 {
271 #if defined(THREADED_RTS)
272     nat i;
273
274 #ifndef REG_Base
275     // We can't support multiple CPUs if BaseReg is not a register
276     if (RtsFlags.ParFlags.nNodes > 1) {
277         errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
278         RtsFlags.ParFlags.nNodes = 1;
279     }
280 #endif
281
282     n_capabilities = RtsFlags.ParFlags.nNodes;
283
284     if (n_capabilities == 1) {
285         capabilities = &MainCapability;
286         // THREADED_RTS must work on builds that don't have a mutable
287         // BaseReg (eg. unregisterised), so in this case
288         // capabilities[0] must coincide with &MainCapability.
289     } else {
290         capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
291                                       "initCapabilities");
292     }
293
294     for (i = 0; i < n_capabilities; i++) {
295         initCapability(&capabilities[i], i);
296     }
297
298     debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
299
300 #else /* !THREADED_RTS */
301
302     n_capabilities = 1;
303     capabilities = &MainCapability;
304     initCapability(&MainCapability, 0);
305
306 #endif
307
308     // There are no free capabilities to begin with.  We will start
309     // a worker Task to each Capability, which will quickly put the
310     // Capability on the free list when it finds nothing to do.
311     last_free_capability = &capabilities[0];
312 }
313
314 /* ----------------------------------------------------------------------------
315  * setContextSwitches: cause all capabilities to context switch as
316  * soon as possible.
317  * ------------------------------------------------------------------------- */
318
319 void setContextSwitches(void)
320 {
321     nat i;
322     for (i=0; i < n_capabilities; i++) {
323         contextSwitchCapability(&capabilities[i]);
324     }
325 }
326
327 /* ----------------------------------------------------------------------------
328  * Give a Capability to a Task.  The task must currently be sleeping
329  * on its condition variable.
330  *
331  * Requires cap->lock (modifies cap->running_task).
332  *
333  * When migrating a Task, the migrater must take task->lock before
334  * modifying task->cap, to synchronise with the waking up Task.
335  * Additionally, the migrater should own the Capability (when
336  * migrating the run queue), or cap->lock (when migrating
337  * returning_workers).
338  *
339  * ------------------------------------------------------------------------- */
340
341 #if defined(THREADED_RTS)
342 STATIC_INLINE void
343 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
344 {
345     ASSERT_LOCK_HELD(&cap->lock);
346     ASSERT(task->cap == cap);
347     debugTrace(DEBUG_sched, "passing capability %d to %s %p",
348                cap->no, task->tso ? "bound task" : "worker",
349                (void *)task->id);
350     ACQUIRE_LOCK(&task->lock);
351     task->wakeup = rtsTrue;
352     // the wakeup flag is needed because signalCondition() doesn't
353     // flag the condition if the thread is already runniing, but we want
354     // it to be sticky.
355     signalCondition(&task->cond);
356     RELEASE_LOCK(&task->lock);
357 }
358 #endif
359
360 /* ----------------------------------------------------------------------------
361  * Function:  releaseCapability(Capability*)
362  *
363  * Purpose:   Letting go of a capability. Causes a
364  *            'returning worker' thread or a 'waiting worker'
365  *            to wake up, in that order.
366  * ------------------------------------------------------------------------- */
367
368 #if defined(THREADED_RTS)
369 void
370 releaseCapability_ (Capability* cap, 
371                     rtsBool always_wakeup)
372 {
373     Task *task;
374
375     task = cap->running_task;
376
377     ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
378
379     cap->running_task = NULL;
380
381     // Check to see whether a worker thread can be given
382     // the go-ahead to return the result of an external call..
383     if (cap->returning_tasks_hd != NULL) {
384         giveCapabilityToTask(cap,cap->returning_tasks_hd);
385         // The Task pops itself from the queue (see waitForReturnCapability())
386         return;
387     }
388
389     if (waiting_for_gc == PENDING_GC_SEQ) {
390       last_free_capability = cap; // needed?
391       debugTrace(DEBUG_sched, "GC pending, set capability %d free", cap->no);
392       return;
393     } 
394
395
396     // If the next thread on the run queue is a bound thread,
397     // give this Capability to the appropriate Task.
398     if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
399         // Make sure we're not about to try to wake ourselves up
400         ASSERT(task != cap->run_queue_hd->bound);
401         task = cap->run_queue_hd->bound;
402         giveCapabilityToTask(cap,task);
403         return;
404     }
405
406     if (!cap->spare_workers) {
407         // Create a worker thread if we don't have one.  If the system
408         // is interrupted, we only create a worker task if there
409         // are threads that need to be completed.  If the system is
410         // shutting down, we never create a new worker.
411         if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
412             debugTrace(DEBUG_sched,
413                        "starting new worker on capability %d", cap->no);
414             startWorkerTask(cap, workerStart);
415             return;
416         }
417     }
418
419     // If we have an unbound thread on the run queue, or if there's
420     // anything else to do, give the Capability to a worker thread.
421     if (always_wakeup || 
422         !emptyRunQueue(cap) || !emptyWakeupQueue(cap) ||
423         !emptySparkPoolCap(cap) || globalWorkToDo()) {
424         if (cap->spare_workers) {
425             giveCapabilityToTask(cap,cap->spare_workers);
426             // The worker Task pops itself from the queue;
427             return;
428         }
429     }
430
431     last_free_capability = cap;
432     debugTrace(DEBUG_sched, "freeing capability %d", cap->no);
433 }
434
435 void
436 releaseCapability (Capability* cap USED_IF_THREADS)
437 {
438     ACQUIRE_LOCK(&cap->lock);
439     releaseCapability_(cap, rtsFalse);
440     RELEASE_LOCK(&cap->lock);
441 }
442
443 void
444 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
445 {
446     ACQUIRE_LOCK(&cap->lock);
447     releaseCapability_(cap, rtsTrue);
448     RELEASE_LOCK(&cap->lock);
449 }
450
451 static void
452 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
453 {
454     Task *task;
455
456     ACQUIRE_LOCK(&cap->lock);
457
458     task = cap->running_task;
459
460     // If the current task is a worker, save it on the spare_workers
461     // list of this Capability.  A worker can mark itself as stopped,
462     // in which case it is not replaced on the spare_worker queue.
463     // This happens when the system is shutting down (see
464     // Schedule.c:workerStart()).
465     // Also, be careful to check that this task hasn't just exited
466     // Haskell to do a foreign call (task->suspended_tso).
467     if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
468         task->next = cap->spare_workers;
469         cap->spare_workers = task;
470     }
471     // Bound tasks just float around attached to their TSOs.
472
473     releaseCapability_(cap,rtsFalse);
474
475     RELEASE_LOCK(&cap->lock);
476 }
477 #endif
478
479 /* ----------------------------------------------------------------------------
480  * waitForReturnCapability( Task *task )
481  *
482  * Purpose:  when an OS thread returns from an external call,
483  * it calls waitForReturnCapability() (via Schedule.resumeThread())
484  * to wait for permission to enter the RTS & communicate the
485  * result of the external call back to the Haskell thread that
486  * made it.
487  *
488  * ------------------------------------------------------------------------- */
489 void
490 waitForReturnCapability (Capability **pCap, Task *task)
491 {
492 #if !defined(THREADED_RTS)
493
494     MainCapability.running_task = task;
495     task->cap = &MainCapability;
496     *pCap = &MainCapability;
497
498 #else
499     Capability *cap = *pCap;
500
501     if (cap == NULL) {
502         // Try last_free_capability first
503         cap = last_free_capability;
504         if (!cap->running_task) {
505             nat i;
506             // otherwise, search for a free capability
507             cap = NULL;
508             for (i = 0; i < n_capabilities; i++) {
509                 if (!capabilities[i].running_task) {
510                     cap = &capabilities[i];
511                     break;
512                 }
513             }
514             if (cap == NULL) {
515                 // Can't find a free one, use last_free_capability.
516                 cap = last_free_capability;
517             }
518         }
519
520         // record the Capability as the one this Task is now assocated with.
521         task->cap = cap;
522
523     } else {
524         ASSERT(task->cap == cap);
525     }
526
527     ACQUIRE_LOCK(&cap->lock);
528
529     debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
530
531     if (!cap->running_task) {
532         // It's free; just grab it
533         cap->running_task = task;
534         RELEASE_LOCK(&cap->lock);
535     } else {
536         newReturningTask(cap,task);
537         RELEASE_LOCK(&cap->lock);
538
539         for (;;) {
540             ACQUIRE_LOCK(&task->lock);
541             // task->lock held, cap->lock not held
542             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
543             cap = task->cap;
544             task->wakeup = rtsFalse;
545             RELEASE_LOCK(&task->lock);
546
547             // now check whether we should wake up...
548             ACQUIRE_LOCK(&cap->lock);
549             if (cap->running_task == NULL) {
550                 if (cap->returning_tasks_hd != task) {
551                     giveCapabilityToTask(cap,cap->returning_tasks_hd);
552                     RELEASE_LOCK(&cap->lock);
553                     continue;
554                 }
555                 cap->running_task = task;
556                 popReturningTask(cap);
557                 RELEASE_LOCK(&cap->lock);
558                 break;
559             }
560             RELEASE_LOCK(&cap->lock);
561         }
562
563     }
564
565     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
566
567     debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
568
569     *pCap = cap;
570 #endif
571 }
572
573 #if defined(THREADED_RTS)
574 /* ----------------------------------------------------------------------------
575  * yieldCapability
576  * ------------------------------------------------------------------------- */
577
578 void
579 yieldCapability (Capability** pCap, Task *task)
580 {
581     Capability *cap = *pCap;
582
583     if (waiting_for_gc == PENDING_GC_PAR) {
584         debugTrace(DEBUG_sched, "capability %d: becoming a GC thread", cap->no);
585         postEvent(cap, EVENT_GC_START, 0, 0);
586         gcWorkerThread(cap);
587         postEvent(cap, EVENT_GC_END, 0, 0);
588         return;
589     }
590
591         debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
592
593         // We must now release the capability and wait to be woken up
594         // again.
595         task->wakeup = rtsFalse;
596         releaseCapabilityAndQueueWorker(cap);
597
598         for (;;) {
599             ACQUIRE_LOCK(&task->lock);
600             // task->lock held, cap->lock not held
601             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
602             cap = task->cap;
603             task->wakeup = rtsFalse;
604             RELEASE_LOCK(&task->lock);
605
606             debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
607
608             ACQUIRE_LOCK(&cap->lock);
609             if (cap->running_task != NULL) {
610                 debugTrace(DEBUG_sched, 
611                            "capability %d is owned by another task", cap->no);
612                 RELEASE_LOCK(&cap->lock);
613                 continue;
614             }
615
616             if (task->tso == NULL) {
617                 ASSERT(cap->spare_workers != NULL);
618                 // if we're not at the front of the queue, release it
619                 // again.  This is unlikely to happen.
620                 if (cap->spare_workers != task) {
621                     giveCapabilityToTask(cap,cap->spare_workers);
622                     RELEASE_LOCK(&cap->lock);
623                     continue;
624                 }
625                 cap->spare_workers = task->next;
626                 task->next = NULL;
627             }
628             cap->running_task = task;
629             RELEASE_LOCK(&cap->lock);
630             break;
631         }
632
633         debugTrace(DEBUG_sched, "resuming capability %d", cap->no);
634         ASSERT(cap->running_task == task);
635
636     *pCap = cap;
637
638     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
639
640     return;
641 }
642
643 /* ----------------------------------------------------------------------------
644  * Wake up a thread on a Capability.
645  *
646  * This is used when the current Task is running on a Capability and
647  * wishes to wake up a thread on a different Capability.
648  * ------------------------------------------------------------------------- */
649
650 void
651 wakeupThreadOnCapability (Capability *my_cap, 
652                           Capability *other_cap, 
653                           StgTSO *tso)
654 {
655     ACQUIRE_LOCK(&other_cap->lock);
656
657     // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
658     if (tso->bound) {
659         ASSERT(tso->bound->cap == tso->cap);
660         tso->bound->cap = other_cap;
661     }
662     tso->cap = other_cap;
663
664     ASSERT(tso->bound ? tso->bound->cap == other_cap : 1);
665
666     if (other_cap->running_task == NULL) {
667         // nobody is running this Capability, we can add our thread
668         // directly onto the run queue and start up a Task to run it.
669
670         other_cap->running_task = myTask(); 
671             // precond for releaseCapability_() and appendToRunQueue()
672
673         appendToRunQueue(other_cap,tso);
674
675         releaseCapability_(other_cap,rtsFalse);
676     } else {
677         appendToWakeupQueue(my_cap,other_cap,tso);
678         other_cap->context_switch = 1;
679         // someone is running on this Capability, so it cannot be
680         // freed without first checking the wakeup queue (see
681         // releaseCapability_).
682     }
683
684     RELEASE_LOCK(&other_cap->lock);
685 }
686
687 /* ----------------------------------------------------------------------------
688  * prodCapability
689  *
690  * If a Capability is currently idle, wake up a Task on it.  Used to 
691  * get every Capability into the GC.
692  * ------------------------------------------------------------------------- */
693
694 void
695 prodCapability (Capability *cap, Task *task)
696 {
697     ACQUIRE_LOCK(&cap->lock);
698     if (!cap->running_task) {
699         cap->running_task = task;
700         releaseCapability_(cap,rtsTrue);
701     }
702     RELEASE_LOCK(&cap->lock);
703 }
704
705 /* ----------------------------------------------------------------------------
706  * shutdownCapability
707  *
708  * At shutdown time, we want to let everything exit as cleanly as
709  * possible.  For each capability, we let its run queue drain, and
710  * allow the workers to stop.
711  *
712  * This function should be called when interrupted and
713  * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
714  * will exit the scheduler and call taskStop(), and any bound thread
715  * that wakes up will return to its caller.  Runnable threads are
716  * killed.
717  *
718  * ------------------------------------------------------------------------- */
719
720 void
721 shutdownCapability (Capability *cap, Task *task, rtsBool safe)
722 {
723     nat i;
724
725     task->cap = cap;
726
727     // Loop indefinitely until all the workers have exited and there
728     // are no Haskell threads left.  We used to bail out after 50
729     // iterations of this loop, but that occasionally left a worker
730     // running which caused problems later (the closeMutex() below
731     // isn't safe, for one thing).
732
733     for (i = 0; /* i < 50 */; i++) {
734         ASSERT(sched_state == SCHED_SHUTTING_DOWN);
735
736         debugTrace(DEBUG_sched, 
737                    "shutting down capability %d, attempt %d", cap->no, i);
738         ACQUIRE_LOCK(&cap->lock);
739         if (cap->running_task) {
740             RELEASE_LOCK(&cap->lock);
741             debugTrace(DEBUG_sched, "not owner, yielding");
742             yieldThread();
743             continue;
744         }
745         cap->running_task = task;
746
747         if (cap->spare_workers) {
748             // Look for workers that have died without removing
749             // themselves from the list; this could happen if the OS
750             // summarily killed the thread, for example.  This
751             // actually happens on Windows when the system is
752             // terminating the program, and the RTS is running in a
753             // DLL.
754             Task *t, *prev;
755             prev = NULL;
756             for (t = cap->spare_workers; t != NULL; t = t->next) {
757                 if (!osThreadIsAlive(t->id)) {
758                     debugTrace(DEBUG_sched, 
759                                "worker thread %p has died unexpectedly", (void *)t->id);
760                         if (!prev) {
761                             cap->spare_workers = t->next;
762                         } else {
763                             prev->next = t->next;
764                         }
765                         prev = t;
766                 }
767             }
768         }
769
770         if (!emptyRunQueue(cap) || cap->spare_workers) {
771             debugTrace(DEBUG_sched, 
772                        "runnable threads or workers still alive, yielding");
773             releaseCapability_(cap,rtsFalse); // this will wake up a worker
774             RELEASE_LOCK(&cap->lock);
775             yieldThread();
776             continue;
777         }
778
779         // If "safe", then busy-wait for any threads currently doing
780         // foreign calls.  If we're about to unload this DLL, for
781         // example, we need to be sure that there are no OS threads
782         // that will try to return to code that has been unloaded.
783         // We can be a bit more relaxed when this is a standalone
784         // program that is about to terminate, and let safe=false.
785         if (cap->suspended_ccalling_tasks && safe) {
786             debugTrace(DEBUG_sched, 
787                        "thread(s) are involved in foreign calls, yielding");
788             cap->running_task = NULL;
789             RELEASE_LOCK(&cap->lock);
790             yieldThread();
791             continue;
792         }
793             
794         postEvent(cap, EVENT_SHUTDOWN, 0, 0);
795         debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
796         RELEASE_LOCK(&cap->lock);
797         break;
798     }
799     // we now have the Capability, its run queue and spare workers
800     // list are both empty.
801
802     // ToDo: we can't drop this mutex, because there might still be
803     // threads performing foreign calls that will eventually try to 
804     // return via resumeThread() and attempt to grab cap->lock.
805     // closeMutex(&cap->lock);
806 }
807
808 /* ----------------------------------------------------------------------------
809  * tryGrabCapability
810  *
811  * Attempt to gain control of a Capability if it is free.
812  *
813  * ------------------------------------------------------------------------- */
814
815 rtsBool
816 tryGrabCapability (Capability *cap, Task *task)
817 {
818     if (cap->running_task != NULL) return rtsFalse;
819     ACQUIRE_LOCK(&cap->lock);
820     if (cap->running_task != NULL) {
821         RELEASE_LOCK(&cap->lock);
822         return rtsFalse;
823     }
824     task->cap = cap;
825     cap->running_task = task;
826     RELEASE_LOCK(&cap->lock);
827     return rtsTrue;
828 }
829
830
831 #endif /* THREADED_RTS */
832
833 static void
834 freeCapability (Capability *cap)
835 {
836     stgFree(cap->mut_lists);
837 #if defined(THREADED_RTS)
838     freeSparkPool(cap->sparks);
839 #endif
840 }
841
842 void
843 freeCapabilities (void)
844 {
845 #if defined(THREADED_RTS)
846     nat i;
847     for (i=0; i < n_capabilities; i++) {
848         freeCapability(&capabilities[i]);
849     }
850 #else
851     freeCapability(&MainCapability);
852 #endif
853 }
854
855 /* ---------------------------------------------------------------------------
856    Mark everything directly reachable from the Capabilities.  When
857    using multiple GC threads, each GC thread marks all Capabilities
858    for which (c `mod` n == 0), for Capability c and thread n.
859    ------------------------------------------------------------------------ */
860
861 void
862 markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta, 
863                       rtsBool prune_sparks USED_IF_THREADS)
864 {
865     nat i;
866     Capability *cap;
867     Task *task;
868
869     // Each GC thread is responsible for following roots from the
870     // Capability of the same number.  There will usually be the same
871     // or fewer Capabilities as GC threads, but just in case there
872     // are more, we mark every Capability whose number is the GC
873     // thread's index plus a multiple of the number of GC threads.
874     for (i = i0; i < n_capabilities; i += delta) {
875         cap = &capabilities[i];
876         evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
877         evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
878 #if defined(THREADED_RTS)
879         evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd);
880         evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl);
881 #endif
882         for (task = cap->suspended_ccalling_tasks; task != NULL; 
883              task=task->next) {
884             debugTrace(DEBUG_sched,
885                        "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
886             evac(user, (StgClosure **)(void *)&task->suspended_tso);
887         }
888
889 #if defined(THREADED_RTS)
890         if (prune_sparks) {
891             pruneSparkQueue (evac, user, cap);
892         } else {
893             traverseSparkQueue (evac, user, cap);
894         }
895 #endif
896     }
897
898 #if !defined(THREADED_RTS)
899     evac(user, (StgClosure **)(void *)&blocked_queue_hd);
900     evac(user, (StgClosure **)(void *)&blocked_queue_tl);
901     evac(user, (StgClosure **)(void *)&sleeping_queue);
902 #endif 
903 }
904
905 void
906 markCapabilities (evac_fn evac, void *user)
907 {
908     markSomeCapabilities(evac, user, 0, 1, rtsFalse);
909 }