1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 2001-2006
7 * The notion of a capability is used when operating in multi-threaded
8 * environments (which the THREADED_RTS build of the RTS does), to
9 * hold all the state an OS thread/task needs to run Haskell code:
10 * its STG registers, a pointer to its TSO, a nursery etc. During
11 * STG execution, a pointer to the capabilitity is kept in a
14 * Only in an THREADED_RTS build will there be multiple capabilities,
15 * in the non-threaded builds there is one global capability, namely
18 * This header file contains the functions for working with capabilities.
19 * (the main, and only, consumer of this interface is the scheduler).
21 * --------------------------------------------------------------------------*/
31 // State required by the STG virtual machine when running Haskell
32 // code. During STG execution, the BaseReg register always points
33 // to the StgRegTable of the current Capability (&cap->r).
37 nat no; // capability number.
39 // The Task currently holding this Capability. This task has
40 // exclusive access to the contents of this Capability (apart from
41 // returning_tasks_hd/returning_tasks_tl).
42 // Locks required: cap->lock.
45 // true if this Capability is running Haskell code, used for
46 // catching unsafe call-ins.
49 // true if this Capability is currently in the GC
52 // The run queue. The Task owning this Capability has exclusive
53 // access to its run queue, so can wake up threads without
54 // taking a lock, and the common path through the scheduler is
59 // Tasks currently making safe foreign calls. Doubly-linked.
60 // When returning, a task first acquires the Capability before
61 // removing itself from this list, so that the GC can find all
62 // the suspended TSOs easily. Hence, when migrating a Task from
63 // the returning_tasks list, we must also migrate its entry from
65 Task *suspended_ccalling_tasks;
67 // One mutable list per generation, so we don't need to take any
68 // locks when updating an old-generation thunk. This also lets us
69 // keep track of which closures this CPU has been mutating, so we
70 // can traverse them using the right thread during GC and avoid
71 // unnecessarily moving the data from one cache to another.
73 bdescr **saved_mut_lists; // tmp use during GC
75 // Context switch flag. We used to have one global flag, now one
76 // per capability. Locks required : none (conflicts are harmless)
79 #if defined(THREADED_RTS)
80 // Worker Tasks waiting in the wings. Singly-linked.
83 // This lock protects running_task, returning_tasks_{hd,tl}, wakeup_queue.
86 // Tasks waiting to return from a foreign call, or waiting to make
87 // a new call-in using this Capability (NULL if empty).
88 // NB. this field needs to be modified by tasks other than the
89 // running_task, so it requires cap->lock to modify. A task can
90 // check whether it is NULL without taking the lock, however.
91 Task *returning_tasks_hd; // Singly-linked, with head/tail
92 Task *returning_tasks_tl;
94 // A list of threads to append to this Capability's run queue at
95 // the earliest opportunity. These are threads that have been
96 // woken up by another Capability.
97 StgTSO *wakeup_queue_hd;
98 StgTSO *wakeup_queue_tl;
102 // Stats on spark creation/conversion
104 nat sparks_converted;
108 // Per-capability STM-related data
109 StgTVarWatchQueue *free_tvar_watch_queues;
110 StgInvariantCheckQueue *free_invariant_check_queues;
111 StgTRecChunk *free_trec_chunks;
112 StgTRecHeader *free_trec_headers;
113 nat transaction_tokens;
114 } // typedef Capability is defined in RtsAPI.h
115 // Capabilities are stored in an array, so make sure that adjacent
116 // Capabilities don't share any cache-lines:
117 #ifndef mingw32_HOST_OS
118 ATTRIBUTE_ALIGNED(64)
123 #if defined(THREADED_RTS)
124 #define ASSERT_TASK_ID(task) ASSERT(task->id == osThreadId())
126 #define ASSERT_TASK_ID(task) /*empty*/
129 // These properties should be true when a Task is holding a Capability
130 #define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task) \
131 ASSERT(cap->running_task != NULL && cap->running_task == task); \
132 ASSERT(task->cap == cap); \
133 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)
135 // Sometimes a Task holds a Capability, but the Task is not associated
136 // with that Capability (ie. task->cap != cap). This happens when
137 // (a) a Task holds multiple Capabilities, and (b) when the current
138 // Task is bound, its thread has just blocked, and it may have been
139 // moved to another Capability.
140 #define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task) \
141 ASSERT(cap->run_queue_hd == END_TSO_QUEUE ? \
142 cap->run_queue_tl == END_TSO_QUEUE : 1); \
143 ASSERT(myTask() == task); \
144 ASSERT_TASK_ID(task);
146 // Converts a *StgRegTable into a *Capability.
148 INLINE_HEADER Capability *
149 regTableToCapability (StgRegTable *reg)
151 return (Capability *)((void *)((unsigned char*)reg - STG_FIELD_OFFSET(Capability,r)));
154 // Initialise the available capabilities.
156 void initCapabilities (void);
158 // Release a capability. This is called by a Task that is exiting
159 // Haskell to make a foreign call, or in various other cases when we
160 // want to relinquish a Capability that we currently hold.
162 // ASSUMES: cap->running_task is the current Task.
164 #if defined(THREADED_RTS)
165 void releaseCapability (Capability* cap);
166 void releaseAndWakeupCapability (Capability* cap);
167 void releaseCapability_ (Capability* cap, rtsBool always_wakeup);
168 // assumes cap->lock is held
170 // releaseCapability() is empty in non-threaded RTS
171 INLINE_HEADER void releaseCapability (Capability* cap STG_UNUSED) {};
172 INLINE_HEADER void releaseAndWakeupCapability (Capability* cap STG_UNUSED) {};
173 INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
174 rtsBool always_wakeup STG_UNUSED) {};
178 // one global capability
179 extern Capability MainCapability;
182 // Array of all the capabilities
184 extern nat n_capabilities;
185 extern Capability *capabilities;
187 // The Capability that was last free. Used as a good guess for where
188 // to assign new threads.
190 extern Capability *last_free_capability;
192 // GC indicator, in scope for the scheduler
193 #define PENDING_GC_SEQ 1
194 #define PENDING_GC_PAR 2
195 extern volatile StgWord waiting_for_gc;
197 // Acquires a capability at a return point. If *cap is non-NULL, then
198 // this is taken as a preference for the Capability we wish to
201 // OS threads waiting in this function get priority over those waiting
202 // in waitForCapability().
204 // On return, *cap is non-NULL, and points to the Capability acquired.
206 void waitForReturnCapability (Capability **cap/*in/out*/, Task *task);
208 INLINE_HEADER void recordMutableCap (StgClosure *p, Capability *cap, nat gen);
210 #if defined(THREADED_RTS)
212 // Gives up the current capability IFF there is a higher-priority
213 // thread waiting for it. This happens in one of two ways:
215 // (a) we are passing the capability to another OS thread, so
216 // that it can run a bound Haskell thread, or
218 // (b) there is an OS thread waiting to return from a foreign call
220 // On return: *pCap is NULL if the capability was released. The
221 // current task should then re-acquire it using waitForCapability().
223 void yieldCapability (Capability** pCap, Task *task);
225 // Acquires a capability for doing some work.
227 // On return: pCap points to the capability.
229 void waitForCapability (Task *task, Mutex *mutex, Capability **pCap);
231 // Wakes up a thread on a Capability (probably a different Capability
232 // from the one held by the current Task).
234 void wakeupThreadOnCapability (Capability *my_cap, Capability *other_cap,
237 // Wakes up a worker thread on just one Capability, used when we
238 // need to service some global event.
240 void prodOneCapability (void);
241 void prodCapability (Capability *cap, Task *task);
243 // Similar to prodOneCapability(), but prods all of them.
245 void prodAllCapabilities (void);
247 // Waits for a capability to drain of runnable threads and workers,
248 // and then acquires it. Used at shutdown time.
250 void shutdownCapability (Capability *cap, Task *task, rtsBool wait_foreign);
252 // Attempt to gain control of a Capability if it is free.
254 rtsBool tryGrabCapability (Capability *cap, Task *task);
256 // Try to find a spark to run
258 StgClosure *findSpark (Capability *cap);
260 // True if any capabilities have sparks
262 rtsBool anySparks (void);
264 INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
265 INLINE_HEADER nat sparkPoolSizeCap (Capability *cap);
266 INLINE_HEADER void discardSparksCap (Capability *cap);
268 #else // !THREADED_RTS
270 // Grab a capability. (Only in the non-threaded RTS; in the threaded
271 // RTS one of the waitFor*Capability() functions must be used).
273 extern void grabCapability (Capability **pCap);
275 #endif /* !THREADED_RTS */
277 // cause all capabilities to context switch as soon as possible.
278 void setContextSwitches(void);
279 INLINE_HEADER void contextSwitchCapability(Capability *cap);
281 // Free all capabilities
282 void freeCapabilities (void);
285 void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
286 rtsBool prune_sparks);
287 void markCapabilities (evac_fn evac, void *user);
288 void traverseSparkQueues (evac_fn evac, void *user);
290 /* -----------------------------------------------------------------------------
291 * INLINE functions... private below here
292 * -------------------------------------------------------------------------- */
295 recordMutableCap (StgClosure *p, Capability *cap, nat gen)
299 // We must own this Capability in order to modify its mutable list.
300 ASSERT(cap->running_task == myTask());
301 bd = cap->mut_lists[gen];
302 if (bd->free >= bd->start + BLOCK_SIZE_W) {
304 new_bd = allocBlock_lock();
307 cap->mut_lists[gen] = bd;
309 *bd->free++ = (StgWord)p;
312 #if defined(THREADED_RTS)
313 INLINE_HEADER rtsBool
314 emptySparkPoolCap (Capability *cap)
315 { return looksEmpty(cap->sparks); }
318 sparkPoolSizeCap (Capability *cap)
319 { return sparkPoolSize(cap->sparks); }
322 discardSparksCap (Capability *cap)
323 { return discardSparks(cap->sparks); }
327 contextSwitchCapability (Capability *cap)
329 // setting HpLim to NULL ensures that the next heap check will
330 // fail, and the thread will return to the scheduler.
331 cap->r.rHpLim = NULL;
332 // But just in case it didn't work (the target thread might be
333 // modifying HpLim at the same time), we set the end-of-block
334 // context-switch flag too:
335 cap->context_switch = 1;
338 #endif /* CAPABILITY_H */