cb921d127f532c01d0e309374b08d14d56407e5f
[ghc-hetmet.git] / ghc / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2  *
3  * (c) The GHC Team, 2003-2005
4  *
5  * Capabilities
6  *
7  * A Capability represent the token required to execute STG code,
8  * and all the state an OS thread/task needs to run Haskell code:
9  * its STG registers, a pointer to its TSO, a nursery etc. During
10  * STG execution, a pointer to the capabilitity is kept in a
11  * register (BaseReg; actually it is a pointer to cap->r).
12  *
13  * Only in an SMP build will there be multiple capabilities, for
14  * the threaded RTS and other non-threaded builds, there is only
15  * one global capability, namely MainCapability.
16  * 
17  * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21 #include "RtsUtils.h"
22 #include "RtsFlags.h"
23 #include "OSThreads.h"
24 #include "Capability.h"
25 #include "Schedule.h"
26
27 #if !defined(SMP)
28 Capability MainCapability;     // for non-SMP, we have one global capability
29 #endif
30
31 nat n_capabilities;
32 Capability *capabilities = NULL;
33
34 // Holds the Capability which last became free.  This is used so that
35 // an in-call has a chance of quickly finding a free Capability.
36 // Maintaining a global free list of Capabilities would require global
37 // locking, so we don't do that.
38 Capability *last_free_capability;
39
40 #ifdef SMP
41 #define UNUSED_IF_NOT_SMP
42 #else
43 #define UNUSED_IF_NOT_SMP STG_UNUSED
44 #endif
45
46 #ifdef RTS_USER_SIGNALS
47 #define UNUSED_IF_NOT_THREADS
48 #else
49 #define UNUSED_IF_NOT_THREADS STG_UNUSED
50 #endif
51
52
53 STATIC_INLINE rtsBool
54 globalWorkToDo (void)
55 {
56     return blackholes_need_checking
57         || interrupted
58 #if defined(RTS_USER_SIGNALS)
59         || signals_pending()
60 #endif
61         ;
62 }
63
64 #if defined(THREADED_RTS)
65 STATIC_INLINE rtsBool
66 anyWorkForMe( Capability *cap, Task *task )
67 {
68     // If the run queue is not empty, then we only wake up the guy who
69     // can run the thread at the head, even if there is some other
70     // reason for this task to run (eg. interrupted=rtsTrue).
71     if (!emptyRunQueue(cap)) {
72         if (cap->run_queue_hd->bound == NULL) {
73             return (task->tso == NULL);
74         } else {
75             return (cap->run_queue_hd->bound == task);
76         }
77     }
78     return globalWorkToDo();
79 }
80 #endif
81
82 /* -----------------------------------------------------------------------------
83  * Manage the returning_tasks lists.
84  * 
85  * These functions require cap->lock
86  * -------------------------------------------------------------------------- */
87
88 #if defined(THREADED_RTS)
89 STATIC_INLINE void
90 newReturningTask (Capability *cap, Task *task)
91 {
92     ASSERT_LOCK_HELD(&cap->lock);
93     ASSERT(task->return_link == NULL);
94     if (cap->returning_tasks_hd) {
95         ASSERT(cap->returning_tasks_tl->return_link == NULL);
96         cap->returning_tasks_tl->return_link = task;
97     } else {
98         cap->returning_tasks_hd = task;
99     }
100     cap->returning_tasks_tl = task;
101 }
102
103 STATIC_INLINE Task *
104 popReturningTask (Capability *cap)
105 {
106     ASSERT_LOCK_HELD(&cap->lock);
107     Task *task;
108     task = cap->returning_tasks_hd;
109     ASSERT(task);
110     cap->returning_tasks_hd = task->return_link;
111     if (!cap->returning_tasks_hd) {
112         cap->returning_tasks_tl = NULL;
113     }
114     task->return_link = NULL;
115     return task;
116 }
117 #endif
118
119 /* ----------------------------------------------------------------------------
120  * Initialisation
121  *
122  * The Capability is initially marked not free.
123  * ------------------------------------------------------------------------- */
124
125 static void
126 initCapability( Capability *cap, nat i )
127 {
128     cap->no = i;
129     cap->in_haskell        = rtsFalse;
130
131     cap->run_queue_hd      = END_TSO_QUEUE;
132     cap->run_queue_tl      = END_TSO_QUEUE;
133
134 #if defined(THREADED_RTS)
135     initMutex(&cap->lock);
136     cap->running_task      = NULL; // indicates cap is free
137     cap->spare_workers     = NULL;
138     cap->suspended_ccalling_tasks = NULL;
139     cap->returning_tasks_hd = NULL;
140     cap->returning_tasks_tl = NULL;
141 #endif
142
143     cap->f.stgGCEnter1     = (F_)__stg_gc_enter_1;
144     cap->f.stgGCFun        = (F_)__stg_gc_fun;
145 }
146
147 /* ---------------------------------------------------------------------------
148  * Function:  initCapabilities()
149  *
150  * Purpose:   set up the Capability handling. For the SMP build,
151  *            we keep a table of them, the size of which is
152  *            controlled by the user via the RTS flag -N.
153  *
154  * ------------------------------------------------------------------------- */
155 void
156 initCapabilities( void )
157 {
158 #if defined(SMP)
159     nat i,n;
160
161     n_capabilities = n = RtsFlags.ParFlags.nNodes;
162     capabilities = stgMallocBytes(n * sizeof(Capability), "initCapabilities");
163
164     for (i = 0; i < n; i++) {
165         initCapability(&capabilities[i], i);
166     }
167     
168     IF_DEBUG(scheduler, sched_belch("allocated %d capabilities", n));
169 #else
170     n_capabilities = 1;
171     capabilities = &MainCapability;
172     initCapability(&MainCapability, 0);
173 #endif
174
175     // There are no free capabilities to begin with.  We will start
176     // a worker Task to each Capability, which will quickly put the
177     // Capability on the free list when it finds nothing to do.
178     last_free_capability = &capabilities[0];
179 }
180
181 /* ----------------------------------------------------------------------------
182  * Give a Capability to a Task.  The task must currently be sleeping
183  * on its condition variable.
184  *
185  * Requires cap->lock (modifies cap->running_task).
186  *
187  * When migrating a Task, the migrater must take task->lock before
188  * modifying task->cap, to synchronise with the waking up Task.
189  * Additionally, the migrater should own the Capability (when
190  * migrating the run queue), or cap->lock (when migrating
191  * returning_workers).
192  *
193  * ------------------------------------------------------------------------- */
194
195 #if defined(THREADED_RTS)
196 STATIC_INLINE void
197 giveCapabilityToTask (Capability *cap, Task *task)
198 {
199     ASSERT_LOCK_HELD(&cap->lock);
200     ASSERT(task->cap == cap);
201     // We are not modifying task->cap, so we do not need to take task->lock.
202     IF_DEBUG(scheduler, 
203              sched_belch("passing capability %d to %s %p",
204                          cap->no, task->tso ? "bound task" : "worker", 
205                          (void *)task->id));
206     ACQUIRE_LOCK(&task->lock);
207     task->wakeup = rtsTrue;
208     // the wakeup flag is needed because signalCondition() doesn't
209     // flag the condition if the thread is already runniing, but we want
210     // it to be sticky.
211     signalCondition(&task->cond);
212     RELEASE_LOCK(&task->lock);
213 }
214 #endif
215
216 /* ----------------------------------------------------------------------------
217  * Function:  releaseCapability(Capability*)
218  *
219  * Purpose:   Letting go of a capability. Causes a
220  *            'returning worker' thread or a 'waiting worker'
221  *            to wake up, in that order.
222  * ------------------------------------------------------------------------- */
223
224 #if defined(THREADED_RTS)
225 void
226 releaseCapability_ (Capability* cap)
227 {
228     Task *task;
229
230     task = cap->running_task;
231
232     ASSERT_CAPABILITY_INVARIANTS(cap,task);
233
234     cap->running_task = NULL;
235
236     // Check to see whether a worker thread can be given
237     // the go-ahead to return the result of an external call..
238     if (cap->returning_tasks_hd != NULL) {
239         giveCapabilityToTask(cap,cap->returning_tasks_hd);
240         // The Task pops itself from the queue (see waitForReturnCapability())
241         return;
242     } 
243
244     // If the next thread on the run queue is a bound thread,
245     // give this Capability to the appropriate Task.
246     if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
247         // Make sure we're not about to try to wake ourselves up
248         ASSERT(task != cap->run_queue_hd->bound);
249         task = cap->run_queue_hd->bound;
250         giveCapabilityToTask(cap,task);
251         return;
252     } 
253
254     // If we have an unbound thread on the run queue, or if there's
255     // anything else to do, give the Capability to a worker thread.
256     if (!emptyRunQueue(cap) || globalWorkToDo()) {
257         if (cap->spare_workers) {
258             giveCapabilityToTask(cap,cap->spare_workers);
259             // The worker Task pops itself from the queue;
260             return;
261         } 
262
263         // Create a worker thread if we don't have one.  If the system
264         // is interrupted, we only create a worker task if there
265         // are threads that need to be completed.  If the system is
266         // shutting down, we never create a new worker.
267         if (!shutting_down_scheduler) {
268             IF_DEBUG(scheduler, 
269                      sched_belch("starting new worker on capability %d", cap->no));
270             startWorkerTask(cap, workerStart);
271             return;
272         }
273     }
274
275     last_free_capability = cap;
276     IF_DEBUG(scheduler, sched_belch("freeing capability %d", cap->no));
277 }
278
279 void
280 releaseCapability (Capability* cap UNUSED_IF_NOT_THREADS)
281 {
282     ACQUIRE_LOCK(&cap->lock);
283     releaseCapability_(cap);
284     RELEASE_LOCK(&cap->lock);
285 }
286
287 static void
288 releaseCapabilityAndQueueWorker (Capability* cap UNUSED_IF_NOT_THREADS)
289 {
290     Task *task;
291
292     ACQUIRE_LOCK(&cap->lock);
293
294     task = cap->running_task;
295
296     // If the current task is a worker, save it on the spare_workers
297     // list of this Capability.  A worker can mark itself as stopped,
298     // in which case it is not replaced on the spare_worker queue.
299     // This happens when the system is shutting down (see
300     // Schedule.c:workerStart()).
301     // Also, be careful to check that this task hasn't just exited
302     // Haskell to do a foreign call (task->suspended_tso).
303     if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
304         task->next = cap->spare_workers;
305         cap->spare_workers = task;
306     }
307     // Bound tasks just float around attached to their TSOs.
308
309     releaseCapability_(cap);
310
311     RELEASE_LOCK(&cap->lock);
312 }
313 #endif
314
315 /* ----------------------------------------------------------------------------
316  * waitForReturnCapability( Task *task )
317  *
318  * Purpose:  when an OS thread returns from an external call,
319  * it calls waitForReturnCapability() (via Schedule.resumeThread())
320  * to wait for permission to enter the RTS & communicate the
321  * result of the external call back to the Haskell thread that
322  * made it.
323  *
324  * ------------------------------------------------------------------------- */
325 void
326 waitForReturnCapability (Capability **pCap, 
327                          Task *task UNUSED_IF_NOT_THREADS)
328 {
329 #if !defined(THREADED_RTS)
330
331     MainCapability.running_task = task;
332     task->cap = &MainCapability;
333     *pCap = &MainCapability;
334
335 #else
336     Capability *cap = *pCap;
337
338     if (cap == NULL) {
339         // Try last_free_capability first
340         cap = last_free_capability;
341         if (!cap->running_task) {
342             nat i;
343             // otherwise, search for a free capability
344             for (i = 0; i < n_capabilities; i++) {
345                 cap = &capabilities[i];
346                 if (!cap->running_task) {
347                     break;
348                 }
349             }
350             // Can't find a free one, use last_free_capability.
351             cap = last_free_capability;
352         }
353
354         // record the Capability as the one this Task is now assocated with.
355         task->cap = cap;
356
357     } else {
358         ASSERT(task->cap == cap);
359     }
360
361     ACQUIRE_LOCK(&cap->lock);
362
363     IF_DEBUG(scheduler, 
364              sched_belch("returning; I want capability %d", cap->no));
365
366     if (!cap->running_task) {
367         // It's free; just grab it
368         cap->running_task = task;
369         RELEASE_LOCK(&cap->lock);
370     } else {
371         newReturningTask(cap,task);
372         RELEASE_LOCK(&cap->lock);
373
374         for (;;) {
375             ACQUIRE_LOCK(&task->lock);
376             // task->lock held, cap->lock not held
377             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
378             cap = task->cap;
379             task->wakeup = rtsFalse;
380             RELEASE_LOCK(&task->lock);
381
382             // now check whether we should wake up...
383             ACQUIRE_LOCK(&cap->lock);
384             if (cap->running_task == NULL) {
385                 if (cap->returning_tasks_hd != task) {
386                     giveCapabilityToTask(cap,cap->returning_tasks_hd);
387                     RELEASE_LOCK(&cap->lock);
388                     continue;
389                 }
390                 cap->running_task = task;
391                 popReturningTask(cap);
392                 RELEASE_LOCK(&cap->lock);
393                 break;
394             }
395             RELEASE_LOCK(&cap->lock);
396         }
397
398     }
399
400     ASSERT_CAPABILITY_INVARIANTS(cap,task);
401
402     IF_DEBUG(scheduler, 
403              sched_belch("returning; got capability %d", cap->no));
404
405     *pCap = cap;
406 #endif
407 }
408
409 #if defined(THREADED_RTS)
410 /* ----------------------------------------------------------------------------
411  * yieldCapability
412  * ------------------------------------------------------------------------- */
413
414 void
415 yieldCapability (Capability** pCap, Task *task)
416 {
417     Capability *cap = *pCap;
418
419     // The fast path; no locking
420     if ( cap->returning_tasks_hd == NULL && anyWorkForMe(cap,task) )
421         return;
422
423     while ( cap->returning_tasks_hd != NULL || !anyWorkForMe(cap,task) ) {
424         IF_DEBUG(scheduler, sched_belch("giving up capability %d", cap->no));
425
426         // We must now release the capability and wait to be woken up
427         // again.  
428         releaseCapabilityAndQueueWorker(cap);
429
430         for (;;) {
431             ACQUIRE_LOCK(&task->lock);
432             // task->lock held, cap->lock not held
433             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
434             cap = task->cap;
435             task->wakeup = rtsFalse;
436             RELEASE_LOCK(&task->lock);
437
438             IF_DEBUG(scheduler, sched_belch("woken up on capability %d", cap->no));
439             ACQUIRE_LOCK(&cap->lock);
440             if (cap->running_task != NULL) {
441                 RELEASE_LOCK(&cap->lock);
442                 continue;
443             }
444
445             if (task->tso == NULL) {
446                 ASSERT(cap->spare_workers != NULL);
447                 // if we're not at the front of the queue, release it
448                 // again.  This is unlikely to happen.
449                 if (cap->spare_workers != task) {
450                     giveCapabilityToTask(cap,cap->spare_workers);
451                     RELEASE_LOCK(&cap->lock);
452                     continue;
453                 }
454                 cap->spare_workers = task->next;
455                 task->next = NULL;
456             }
457             cap->running_task = task;
458             RELEASE_LOCK(&cap->lock);
459             break;
460         }
461
462         IF_DEBUG(scheduler, sched_belch("got capability %d", cap->no));
463         ASSERT(cap->running_task == task);
464     }
465
466     *pCap = cap;
467
468     ASSERT_CAPABILITY_INVARIANTS(cap,task);
469
470     return;
471 }
472
473 /* ----------------------------------------------------------------------------
474  * prodCapabilities
475  *
476  * Used to indicate that the interrupted flag is now set, or some
477  * other global condition that might require waking up a Task on each
478  * Capability.
479  * ------------------------------------------------------------------------- */
480
481 static void
482 prodCapabilities(rtsBool all)
483 {
484     nat i;
485     Capability *cap;
486     Task *task;
487     
488     for (i=0; i < n_capabilities; i++) {
489         cap = &capabilities[i];
490         ACQUIRE_LOCK(&cap->lock);
491         if (!cap->running_task) {
492             if (cap->spare_workers) {
493                 task = cap->spare_workers;
494                 ASSERT(!task->stopped);
495                 giveCapabilityToTask(cap,task);
496                 if (!all) {
497                     RELEASE_LOCK(&cap->lock);
498                     return;
499                 }
500             }
501         }
502         RELEASE_LOCK(&cap->lock);
503     }
504 }
505
506 void
507 prodAllCapabilities (void)
508 {
509     prodCapabilities(rtsTrue);
510 }
511
512 /* ----------------------------------------------------------------------------
513  * prodOneCapability
514  *
515  * Like prodAllCapabilities, but we only require a single Task to wake
516  * up in order to service some global event, such as checking for
517  * deadlock after some idle time has passed.
518  * ------------------------------------------------------------------------- */
519
520 void
521 prodOneCapability (void)
522 {
523     prodCapabilities(rtsFalse);
524 }           
525
526 /* ----------------------------------------------------------------------------
527  * shutdownCapability
528  *
529  * At shutdown time, we want to let everything exit as cleanly as
530  * possible.  For each capability, we let its run queue drain, and
531  * allow the workers to stop.
532  *
533  * This function should be called when interrupted and
534  * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
535  * will exit the scheduler and call taskStop(), and any bound thread
536  * that wakes up will return to its caller.  Runnable threads are
537  * killed.
538  * 
539  * ------------------------------------------------------------------------- */
540
541 void
542 shutdownCapability (Capability *cap, Task *task)
543 {
544     nat i;
545
546     ASSERT(interrupted && shutting_down_scheduler);
547
548     task->cap = cap;
549
550     for (i = 0; i < 50; i++) {
551         IF_DEBUG(scheduler, sched_belch("shutting down capability %d, attempt %d", cap->no, i));
552         ACQUIRE_LOCK(&cap->lock);
553         if (cap->running_task) {
554             RELEASE_LOCK(&cap->lock);
555             IF_DEBUG(scheduler, sched_belch("not owner, yielding"));
556             yieldThread();
557             continue;
558         }
559         cap->running_task = task;
560         if (!emptyRunQueue(cap) || cap->spare_workers) {
561             IF_DEBUG(scheduler, sched_belch("runnable threads or workers still alive, yielding"));
562             releaseCapability_(cap); // this will wake up a worker
563             RELEASE_LOCK(&cap->lock);
564             yieldThread();
565             continue;
566         }
567         IF_DEBUG(scheduler, sched_belch("capability %d is stopped.", cap->no));
568         RELEASE_LOCK(&cap->lock);
569         break;
570     }
571     // we now have the Capability, its run queue and spare workers
572     // list are both empty.
573 }
574
575 #endif /* THREADED_RTS */
576
577