[project @ 2005-10-26 15:22:08 by simonmar]
[ghc-hetmet.git] / ghc / rts / Capability.c
1 /* ---------------------------------------------------------------------------
2  *
3  * (c) The GHC Team, 2003-2005
4  *
5  * Capabilities
6  *
7  * A Capability represent the token required to execute STG code,
8  * and all the state an OS thread/task needs to run Haskell code:
9  * its STG registers, a pointer to its TSO, a nursery etc. During
10  * STG execution, a pointer to the capabilitity is kept in a
11  * register (BaseReg; actually it is a pointer to cap->r).
12  *
13  * Only in an SMP build will there be multiple capabilities, for
14  * the threaded RTS and other non-threaded builds, there is only
15  * one global capability, namely MainCapability.
16  * 
17  * --------------------------------------------------------------------------*/
18
19 #include "PosixSource.h"
20 #include "Rts.h"
21 #include "RtsUtils.h"
22 #include "RtsFlags.h"
23 #include "OSThreads.h"
24 #include "Capability.h"
25 #include "Schedule.h"
26
27 #if !defined(SMP)
28 Capability MainCapability;     // for non-SMP, we have one global capability
29 #endif
30
31 nat n_capabilities;
32 Capability *capabilities = NULL;
33
34 // Holds the Capability which last became free.  This is used so that
35 // an in-call has a chance of quickly finding a free Capability.
36 // Maintaining a global free list of Capabilities would require global
37 // locking, so we don't do that.
38 Capability *last_free_capability;
39
40 #ifdef SMP
41 #define UNUSED_IF_NOT_SMP
42 #else
43 #define UNUSED_IF_NOT_SMP STG_UNUSED
44 #endif
45
46 #ifdef RTS_USER_SIGNALS
47 #define UNUSED_IF_NOT_THREADS
48 #else
49 #define UNUSED_IF_NOT_THREADS STG_UNUSED
50 #endif
51
52
53 STATIC_INLINE rtsBool
54 globalWorkToDo (void)
55 {
56     return blackholes_need_checking
57         || interrupted
58 #if defined(RTS_USER_SIGNALS)
59         || signals_pending()
60 #endif
61         ;
62 }
63
64 #if defined(THREADED_RTS)
65 STATIC_INLINE rtsBool
66 anyWorkForMe( Capability *cap, Task *task )
67 {
68     // If the run queue is not empty, then we only wake up the guy who
69     // can run the thread at the head, even if there is some other
70     // reason for this task to run (eg. interrupted=rtsTrue).
71     if (!emptyRunQueue(cap)) {
72         if (cap->run_queue_hd->bound == NULL) {
73             return (task->tso == NULL);
74         } else {
75             return (cap->run_queue_hd->bound == task);
76         }
77     }
78     return globalWorkToDo();
79 }
80 #endif
81
82 /* -----------------------------------------------------------------------------
83  * Manage the returning_tasks lists.
84  * 
85  * These functions require cap->lock
86  * -------------------------------------------------------------------------- */
87
88 #if defined(THREADED_RTS)
89 STATIC_INLINE void
90 newReturningTask (Capability *cap, Task *task)
91 {
92     ASSERT_LOCK_HELD(&cap->lock);
93     ASSERT(task->return_link == NULL);
94     if (cap->returning_tasks_hd) {
95         ASSERT(cap->returning_tasks_tl->return_link == NULL);
96         cap->returning_tasks_tl->return_link = task;
97     } else {
98         cap->returning_tasks_hd = task;
99     }
100     cap->returning_tasks_tl = task;
101 }
102
103 STATIC_INLINE Task *
104 popReturningTask (Capability *cap)
105 {
106     ASSERT_LOCK_HELD(&cap->lock);
107     Task *task;
108     task = cap->returning_tasks_hd;
109     ASSERT(task);
110     cap->returning_tasks_hd = task->return_link;
111     if (!cap->returning_tasks_hd) {
112         cap->returning_tasks_tl = NULL;
113     }
114     task->return_link = NULL;
115     return task;
116 }
117 #endif
118
119 /* ----------------------------------------------------------------------------
120  * Initialisation
121  *
122  * The Capability is initially marked not free.
123  * ------------------------------------------------------------------------- */
124
125 static void
126 initCapability( Capability *cap, nat i )
127 {
128     cap->no = i;
129     cap->in_haskell        = rtsFalse;
130
131     cap->run_queue_hd      = END_TSO_QUEUE;
132     cap->run_queue_tl      = END_TSO_QUEUE;
133
134 #if defined(THREADED_RTS)
135     initMutex(&cap->lock);
136     cap->running_task      = NULL; // indicates cap is free
137     cap->spare_workers     = NULL;
138     cap->suspended_ccalling_tasks = NULL;
139     cap->returning_tasks_hd = NULL;
140     cap->returning_tasks_tl = NULL;
141 #endif
142
143     cap->f.stgGCEnter1     = (F_)__stg_gc_enter_1;
144     cap->f.stgGCFun        = (F_)__stg_gc_fun;
145 }
146
147 /* ---------------------------------------------------------------------------
148  * Function:  initCapabilities()
149  *
150  * Purpose:   set up the Capability handling. For the SMP build,
151  *            we keep a table of them, the size of which is
152  *            controlled by the user via the RTS flag -N.
153  *
154  * ------------------------------------------------------------------------- */
155 void
156 initCapabilities( void )
157 {
158 #if defined(SMP)
159     nat i,n;
160
161     n_capabilities = n = RtsFlags.ParFlags.nNodes;
162     capabilities = stgMallocBytes(n * sizeof(Capability), "initCapabilities");
163
164     for (i = 0; i < n; i++) {
165         initCapability(&capabilities[i], i);
166     }
167     
168     IF_DEBUG(scheduler, sched_belch("allocated %d capabilities", n));
169 #else
170     n_capabilities = 1;
171     capabilities = &MainCapability;
172     initCapability(&MainCapability, 0);
173 #endif
174
175     // There are no free capabilities to begin with.  We will start
176     // a worker Task to each Capability, which will quickly put the
177     // Capability on the free list when it finds nothing to do.
178     last_free_capability = &capabilities[0];
179 }
180
181 /* ----------------------------------------------------------------------------
182  * Give a Capability to a Task.  The task must currently be sleeping
183  * on its condition variable.
184  *
185  * Requires cap->lock (modifies cap->running_task).
186  *
187  * When migrating a Task, the migrater must take task->lock before
188  * modifying task->cap, to synchronise with the waking up Task.
189  * Additionally, the migrater should own the Capability (when
190  * migrating the run queue), or cap->lock (when migrating
191  * returning_workers).
192  *
193  * ------------------------------------------------------------------------- */
194
195 #if defined(THREADED_RTS)
196 STATIC_INLINE void
197 giveCapabilityToTask (Capability *cap, Task *task)
198 {
199     ASSERT_LOCK_HELD(&cap->lock);
200     ASSERT(task->cap == cap);
201     // We are not modifying task->cap, so we do not need to take task->lock.
202     IF_DEBUG(scheduler, 
203              sched_belch("passing capability %d to %s %p",
204                          cap->no, task->tso ? "bound task" : "worker", 
205                          (void *)task->id));
206     ACQUIRE_LOCK(&task->lock);
207     task->wakeup = rtsTrue;
208     // the wakeup flag is needed because signalCondition() doesn't
209     // flag the condition if the thread is already runniing, but we want
210     // it to be sticky.
211     signalCondition(&task->cond);
212     RELEASE_LOCK(&task->lock);
213 }
214 #endif
215
216 /* ----------------------------------------------------------------------------
217  * Function:  releaseCapability(Capability*)
218  *
219  * Purpose:   Letting go of a capability. Causes a
220  *            'returning worker' thread or a 'waiting worker'
221  *            to wake up, in that order.
222  * ------------------------------------------------------------------------- */
223
224 #if defined(THREADED_RTS)
225 void
226 releaseCapability_ (Capability* cap)
227 {
228     Task *task;
229
230     ASSERT(cap->running_task != NULL && myTask() == cap->running_task);
231
232     task = cap->running_task;
233     cap->running_task = NULL;
234
235     ASSERT(task->id == osThreadId());
236
237     // Check to see whether a worker thread can be given
238     // the go-ahead to return the result of an external call..
239     if (cap->returning_tasks_hd != NULL) {
240         giveCapabilityToTask(cap,cap->returning_tasks_hd);
241         // The Task pops itself from the queue (see waitForReturnCapability())
242         return;
243     } 
244
245     // If the next thread on the run queue is a bound thread,
246     // give this Capability to the appropriate Task.
247     if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
248         // Make sure we're not about to try to wake ourselves up
249         ASSERT(task != cap->run_queue_hd->bound);
250         task = cap->run_queue_hd->bound;
251         giveCapabilityToTask(cap,task);
252         return;
253     } 
254
255     // If we have an unbound thread on the run queue, or if there's
256     // anything else to do, give the Capability to a worker thread.
257     if (!emptyRunQueue(cap) || globalWorkToDo()) {
258         if (cap->spare_workers) {
259             giveCapabilityToTask(cap,cap->spare_workers);
260             // The worker Task pops itself from the queue;
261             return;
262         } 
263
264         // Create a worker thread if we don't have one.  If the system
265         // is interrupted, we only create a worker task if there
266         // are threads that need to be completed.  If the system is
267         // shutting down, we never create a new worker.
268         if (!shutting_down_scheduler) {
269             IF_DEBUG(scheduler, 
270                      sched_belch("starting new worker on capability %d", cap->no));
271             startWorkerTask(cap, workerStart);
272             return;
273         }
274     }
275
276     last_free_capability = cap;
277     IF_DEBUG(scheduler, sched_belch("freeing capability %d", cap->no));
278 }
279
280 void
281 releaseCapability (Capability* cap UNUSED_IF_NOT_THREADS)
282 {
283     ACQUIRE_LOCK(&cap->lock);
284     releaseCapability_(cap);
285     RELEASE_LOCK(&cap->lock);
286 }
287
288 static void
289 releaseCapabilityAndQueueWorker (Capability* cap UNUSED_IF_NOT_THREADS)
290 {
291     Task *task;
292
293     ACQUIRE_LOCK(&cap->lock);
294
295     task = cap->running_task;
296
297     // If the current task is a worker, save it on the spare_workers
298     // list of this Capability.  A worker can mark itself as stopped,
299     // in which case it is not replaced on the spare_worker queue.
300     // This happens when the system is shutting down (see
301     // Schedule.c:workerStart()).
302     // Also, be careful to check that this task hasn't just exited
303     // Haskell to do a foreign call (task->suspended_tso).
304     if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
305         task->next = cap->spare_workers;
306         cap->spare_workers = task;
307     }
308     // Bound tasks just float around attached to their TSOs.
309
310     releaseCapability_(cap);
311
312     RELEASE_LOCK(&cap->lock);
313 }
314 #endif
315
316 /* ----------------------------------------------------------------------------
317  * waitForReturnCapability( Task *task )
318  *
319  * Purpose:  when an OS thread returns from an external call,
320  * it calls waitForReturnCapability() (via Schedule.resumeThread())
321  * to wait for permission to enter the RTS & communicate the
322  * result of the external call back to the Haskell thread that
323  * made it.
324  *
325  * ------------------------------------------------------------------------- */
326 void
327 waitForReturnCapability (Capability **pCap, 
328                          Task *task UNUSED_IF_NOT_THREADS)
329 {
330 #if !defined(THREADED_RTS)
331
332     MainCapability.running_task = task;
333     task->cap = &MainCapability;
334     *pCap = &MainCapability;
335
336 #else
337     Capability *cap = *pCap;
338
339     if (cap == NULL) {
340         // Try last_free_capability first
341         cap = last_free_capability;
342         if (!cap->running_task) {
343             nat i;
344             // otherwise, search for a free capability
345             for (i = 0; i < n_capabilities; i++) {
346                 cap = &capabilities[i];
347                 if (!cap->running_task) {
348                     break;
349                 }
350             }
351             // Can't find a free one, use last_free_capability.
352             cap = last_free_capability;
353         }
354
355         // record the Capability as the one this Task is now assocated with.
356         task->cap = cap;
357
358     } else {
359         ASSERT(task->cap == cap);
360     }
361
362     ACQUIRE_LOCK(&cap->lock);
363
364     IF_DEBUG(scheduler, 
365              sched_belch("returning; I want capability %d", cap->no));
366
367     if (!cap->running_task) {
368         // It's free; just grab it
369         cap->running_task = task;
370         RELEASE_LOCK(&cap->lock);
371     } else {
372         newReturningTask(cap,task);
373         RELEASE_LOCK(&cap->lock);
374
375         for (;;) {
376             ACQUIRE_LOCK(&task->lock);
377             // task->lock held, cap->lock not held
378             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
379             cap = task->cap;
380             task->wakeup = rtsFalse;
381             RELEASE_LOCK(&task->lock);
382
383             // now check whether we should wake up...
384             ACQUIRE_LOCK(&cap->lock);
385             if (cap->running_task == NULL) {
386                 if (cap->returning_tasks_hd != task) {
387                     giveCapabilityToTask(cap,cap->returning_tasks_hd);
388                     RELEASE_LOCK(&cap->lock);
389                     continue;
390                 }
391                 cap->running_task = task;
392                 popReturningTask(cap);
393                 RELEASE_LOCK(&cap->lock);
394                 break;
395             }
396             RELEASE_LOCK(&cap->lock);
397         }
398
399     }
400
401     ASSERT(cap->running_task == task);
402
403     IF_DEBUG(scheduler, 
404              sched_belch("returning; got capability %d", cap->no));
405
406     *pCap = cap;
407 #endif
408 }
409
410 #if defined(THREADED_RTS)
411 /* ----------------------------------------------------------------------------
412  * yieldCapability
413  * ------------------------------------------------------------------------- */
414
415 void
416 yieldCapability (Capability** pCap, Task *task)
417 {
418     Capability *cap = *pCap;
419
420     // The fast path; no locking
421     if ( cap->returning_tasks_hd == NULL && anyWorkForMe(cap,task) )
422         return;
423
424     while ( cap->returning_tasks_hd != NULL || !anyWorkForMe(cap,task) ) {
425         IF_DEBUG(scheduler, sched_belch("giving up capability %d", cap->no));
426
427         // We must now release the capability and wait to be woken up
428         // again.  
429         releaseCapabilityAndQueueWorker(cap);
430
431         for (;;) {
432             ACQUIRE_LOCK(&task->lock);
433             // task->lock held, cap->lock not held
434             if (!task->wakeup) waitCondition(&task->cond, &task->lock);
435             cap = task->cap;
436             task->wakeup = rtsFalse;
437             RELEASE_LOCK(&task->lock);
438
439             IF_DEBUG(scheduler, sched_belch("woken up on capability %d", cap->no));
440             ACQUIRE_LOCK(&cap->lock);
441             if (cap->running_task != NULL) {
442                 RELEASE_LOCK(&cap->lock);
443                 continue;
444             }
445
446             if (task->tso == NULL) {
447                 ASSERT(cap->spare_workers != NULL);
448                 // if we're not at the front of the queue, release it
449                 // again.  This is unlikely to happen.
450                 if (cap->spare_workers != task) {
451                     giveCapabilityToTask(cap,cap->spare_workers);
452                     RELEASE_LOCK(&cap->lock);
453                     continue;
454                 }
455                 cap->spare_workers = task->next;
456                 task->next = NULL;
457             }
458             cap->running_task = task;
459             RELEASE_LOCK(&cap->lock);
460             break;
461         }
462
463         IF_DEBUG(scheduler, sched_belch("got capability %d", cap->no));
464         ASSERT(cap->running_task == task);
465     }
466
467     *pCap = cap;
468     return;
469 }
470
471 /* ----------------------------------------------------------------------------
472  * prodCapabilities
473  *
474  * Used to indicate that the interrupted flag is now set, or some
475  * other global condition that might require waking up a Task on each
476  * Capability.
477  * ------------------------------------------------------------------------- */
478
479 static void
480 prodCapabilities(rtsBool all)
481 {
482     nat i;
483     Capability *cap;
484     Task *task;
485     
486     for (i=0; i < n_capabilities; i++) {
487         cap = &capabilities[i];
488         ACQUIRE_LOCK(&cap->lock);
489         if (!cap->running_task) {
490             if (cap->spare_workers) {
491                 task = cap->spare_workers;
492                 ASSERT(!task->stopped);
493                 giveCapabilityToTask(cap,task);
494                 if (!all) {
495                     RELEASE_LOCK(&cap->lock);
496                     return;
497                 }
498             }
499         }
500         RELEASE_LOCK(&cap->lock);
501     }
502 }
503
504 void
505 prodAllCapabilities (void)
506 {
507     prodCapabilities(rtsTrue);
508 }
509
510 /* ----------------------------------------------------------------------------
511  * prodOneCapability
512  *
513  * Like prodAllCapabilities, but we only require a single Task to wake
514  * up in order to service some global event, such as checking for
515  * deadlock after some idle time has passed.
516  * ------------------------------------------------------------------------- */
517
518 void
519 prodOneCapability (void)
520 {
521     prodCapabilities(rtsFalse);
522 }           
523
524 /* ----------------------------------------------------------------------------
525  * shutdownCapability
526  *
527  * At shutdown time, we want to let everything exit as cleanly as
528  * possible.  For each capability, we let its run queue drain, and
529  * allow the workers to stop.
530  *
531  * This function should be called when interrupted and
532  * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
533  * will exit the scheduler and call taskStop(), and any bound thread
534  * that wakes up will return to its caller.  Runnable threads are
535  * killed.
536  * 
537  * ------------------------------------------------------------------------- */
538
539 void
540 shutdownCapability (Capability *cap, Task *task)
541 {
542     nat i;
543
544     ASSERT(interrupted && shutting_down_scheduler);
545
546     task->cap = cap;
547
548     for (i = 0; i < 50; i++) {
549         IF_DEBUG(scheduler, sched_belch("shutting down capability %d, attempt %d", cap->no, i));
550         ACQUIRE_LOCK(&cap->lock);
551         if (cap->running_task) {
552             RELEASE_LOCK(&cap->lock);
553             IF_DEBUG(scheduler, sched_belch("not owner, yielding"));
554             yieldThread();
555             continue;
556         }
557         cap->running_task = task;
558         if (!emptyRunQueue(cap) || cap->spare_workers) {
559             IF_DEBUG(scheduler, sched_belch("runnable threads or workers still alive, yielding"));
560             releaseCapability_(cap); // this will wake up a worker
561             RELEASE_LOCK(&cap->lock);
562             yieldThread();
563             continue;
564         }
565         IF_DEBUG(scheduler, sched_belch("capability %d is stopped.", cap->no));
566         RELEASE_LOCK(&cap->lock);
567         break;
568     }
569     // we now have the Capability, its run queue and spare workers
570     // list are both empty.
571 }
572
573 #endif /* THREADED_RTS */
574
575