1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 2001-2006
7 * A Capability holds all the state an OS thread/task needs to run
8 * Haskell code: its STG registers, a pointer to its TSO, a nursery
9 * etc. During STG execution, a pointer to the Capabilitity is kept in
10 * a register (BaseReg).
12 * Only in a THREADED_RTS build will there be multiple capabilities,
13 * in the non-threaded RTS there is one global capability, called
16 * --------------------------------------------------------------------------*/
21 #include "sm/GC.h" // for evac_fn
28 // State required by the STG virtual machine when running Haskell
29 // code. During STG execution, the BaseReg register always points
30 // to the StgRegTable of the current Capability (&cap->r).
34 nat no; // capability number.
36 // The Task currently holding this Capability. This task has
37 // exclusive access to the contents of this Capability (apart from
38 // returning_tasks_hd/returning_tasks_tl).
39 // Locks required: cap->lock.
42 // true if this Capability is running Haskell code, used for
43 // catching unsafe call-ins.
46 // The run queue. The Task owning this Capability has exclusive
47 // access to its run queue, so can wake up threads without
48 // taking a lock, and the common path through the scheduler is
53 // Tasks currently making safe foreign calls. Doubly-linked.
54 // When returning, a task first acquires the Capability before
55 // removing itself from this list, so that the GC can find all
56 // the suspended TSOs easily. Hence, when migrating a Task from
57 // the returning_tasks list, we must also migrate its entry from
59 InCall *suspended_ccalls;
61 // One mutable list per generation, so we don't need to take any
62 // locks when updating an old-generation thunk. This also lets us
63 // keep track of which closures this CPU has been mutating, so we
64 // can traverse them using the right thread during GC and avoid
65 // unnecessarily moving the data from one cache to another.
67 bdescr **saved_mut_lists; // tmp use during GC
69 // block for allocating pinned objects into
70 bdescr *pinned_object_block;
72 // Context switch flag. We used to have one global flag, now one
73 // per capability. Locks required : none (conflicts are harmless)
76 #if defined(THREADED_RTS)
77 // Worker Tasks waiting in the wings. Singly-linked.
80 // This lock protects running_task, returning_tasks_{hd,tl}, wakeup_queue.
83 // Tasks waiting to return from a foreign call, or waiting to make
84 // a new call-in using this Capability (NULL if empty).
85 // NB. this field needs to be modified by tasks other than the
86 // running_task, so it requires cap->lock to modify. A task can
87 // check whether it is NULL without taking the lock, however.
88 Task *returning_tasks_hd; // Singly-linked, with head/tail
89 Task *returning_tasks_tl;
91 // Messages, or END_TSO_QUEUE.
96 // Stats on spark creation/conversion
102 // Per-capability STM-related data
103 StgTVarWatchQueue *free_tvar_watch_queues;
104 StgInvariantCheckQueue *free_invariant_check_queues;
105 StgTRecChunk *free_trec_chunks;
106 StgTRecHeader *free_trec_headers;
107 nat transaction_tokens;
108 } // typedef Capability is defined in RtsAPI.h
109 // Capabilities are stored in an array, so make sure that adjacent
110 // Capabilities don't share any cache-lines:
111 #ifndef mingw32_HOST_OS
112 ATTRIBUTE_ALIGNED(64)
117 #if defined(THREADED_RTS)
118 #define ASSERT_TASK_ID(task) ASSERT(task->id == osThreadId())
120 #define ASSERT_TASK_ID(task) /*empty*/
123 // These properties should be true when a Task is holding a Capability
124 #define ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task) \
125 ASSERT(cap->running_task != NULL && cap->running_task == task); \
126 ASSERT(task->cap == cap); \
127 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task)
129 // Sometimes a Task holds a Capability, but the Task is not associated
130 // with that Capability (ie. task->cap != cap). This happens when
131 // (a) a Task holds multiple Capabilities, and (b) when the current
132 // Task is bound, its thread has just blocked, and it may have been
133 // moved to another Capability.
134 #define ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task) \
135 ASSERT(cap->run_queue_hd == END_TSO_QUEUE ? \
136 cap->run_queue_tl == END_TSO_QUEUE : 1); \
137 ASSERT(myTask() == task); \
138 ASSERT_TASK_ID(task);
140 // Converts a *StgRegTable into a *Capability.
142 INLINE_HEADER Capability *
143 regTableToCapability (StgRegTable *reg)
145 return (Capability *)((void *)((unsigned char*)reg - STG_FIELD_OFFSET(Capability,r)));
148 // Initialise the available capabilities.
150 void initCapabilities (void);
152 // Release a capability. This is called by a Task that is exiting
153 // Haskell to make a foreign call, or in various other cases when we
154 // want to relinquish a Capability that we currently hold.
156 // ASSUMES: cap->running_task is the current Task.
158 #if defined(THREADED_RTS)
159 void releaseCapability (Capability* cap);
160 void releaseAndWakeupCapability (Capability* cap);
161 void releaseCapability_ (Capability* cap, rtsBool always_wakeup);
162 // assumes cap->lock is held
164 // releaseCapability() is empty in non-threaded RTS
165 INLINE_HEADER void releaseCapability (Capability* cap STG_UNUSED) {};
166 INLINE_HEADER void releaseAndWakeupCapability (Capability* cap STG_UNUSED) {};
167 INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
168 rtsBool always_wakeup STG_UNUSED) {};
171 // declared in includes/rts/Threads.h:
172 // extern Capability MainCapability;
174 // declared in includes/rts/Threads.h:
175 // extern nat n_capabilities;
177 // Array of all the capabilities
179 extern Capability *capabilities;
181 // The Capability that was last free. Used as a good guess for where
182 // to assign new threads.
184 extern Capability *last_free_capability;
186 // GC indicator, in scope for the scheduler
187 #define PENDING_GC_SEQ 1
188 #define PENDING_GC_PAR 2
189 extern volatile StgWord waiting_for_gc;
191 // Acquires a capability at a return point. If *cap is non-NULL, then
192 // this is taken as a preference for the Capability we wish to
195 // OS threads waiting in this function get priority over those waiting
196 // in waitForCapability().
198 // On return, *cap is non-NULL, and points to the Capability acquired.
200 void waitForReturnCapability (Capability **cap/*in/out*/, Task *task);
202 EXTERN_INLINE void recordMutableCap (StgClosure *p, Capability *cap, nat gen);
204 EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
206 #if defined(THREADED_RTS)
208 // Gives up the current capability IFF there is a higher-priority
209 // thread waiting for it. This happens in one of two ways:
211 // (a) we are passing the capability to another OS thread, so
212 // that it can run a bound Haskell thread, or
214 // (b) there is an OS thread waiting to return from a foreign call
216 // On return: *pCap is NULL if the capability was released. The
217 // current task should then re-acquire it using waitForCapability().
219 void yieldCapability (Capability** pCap, Task *task);
221 // Acquires a capability for doing some work.
223 // On return: pCap points to the capability.
225 void waitForCapability (Task *task, Mutex *mutex, Capability **pCap);
227 // Wakes up a worker thread on just one Capability, used when we
228 // need to service some global event.
230 void prodOneCapability (void);
231 void prodCapability (Capability *cap, Task *task);
233 // Similar to prodOneCapability(), but prods all of them.
235 void prodAllCapabilities (void);
237 // Waits for a capability to drain of runnable threads and workers,
238 // and then acquires it. Used at shutdown time.
240 void shutdownCapability (Capability *cap, Task *task, rtsBool wait_foreign);
242 // Attempt to gain control of a Capability if it is free.
244 rtsBool tryGrabCapability (Capability *cap, Task *task);
246 // Try to find a spark to run
248 StgClosure *findSpark (Capability *cap);
250 // True if any capabilities have sparks
252 rtsBool anySparks (void);
254 INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
255 INLINE_HEADER nat sparkPoolSizeCap (Capability *cap);
256 INLINE_HEADER void discardSparksCap (Capability *cap);
258 #else // !THREADED_RTS
260 // Grab a capability. (Only in the non-threaded RTS; in the threaded
261 // RTS one of the waitFor*Capability() functions must be used).
263 extern void grabCapability (Capability **pCap);
265 #endif /* !THREADED_RTS */
267 // cause all capabilities to context switch as soon as possible.
268 void setContextSwitches(void);
269 INLINE_HEADER void contextSwitchCapability(Capability *cap);
271 // Free all capabilities
272 void freeCapabilities (void);
275 void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
276 rtsBool prune_sparks);
277 void markCapabilities (evac_fn evac, void *user);
278 void traverseSparkQueues (evac_fn evac, void *user);
280 /* -----------------------------------------------------------------------------
282 -------------------------------------------------------------------------- */
286 INLINE_HEADER rtsBool emptyInbox(Capability *cap);;
288 #endif // THREADED_RTS
290 /* -----------------------------------------------------------------------------
291 * INLINE functions... private below here
292 * -------------------------------------------------------------------------- */
295 recordMutableCap (StgClosure *p, Capability *cap, nat gen)
299 // We must own this Capability in order to modify its mutable list.
300 // ASSERT(cap->running_task == myTask());
301 // NO: assertion is violated by performPendingThrowTos()
302 bd = cap->mut_lists[gen];
303 if (bd->free >= bd->start + BLOCK_SIZE_W) {
305 new_bd = allocBlock_lock();
308 cap->mut_lists[gen] = bd;
310 *bd->free++ = (StgWord)p;
314 recordClosureMutated (Capability *cap, StgClosure *p)
317 bd = Bdescr((StgPtr)p);
318 if (bd->gen_no != 0) recordMutableCap(p,cap,bd->gen_no);
322 #if defined(THREADED_RTS)
323 INLINE_HEADER rtsBool
324 emptySparkPoolCap (Capability *cap)
325 { return looksEmpty(cap->sparks); }
328 sparkPoolSizeCap (Capability *cap)
329 { return sparkPoolSize(cap->sparks); }
332 discardSparksCap (Capability *cap)
333 { return discardSparks(cap->sparks); }
337 contextSwitchCapability (Capability *cap)
339 // setting HpLim to NULL ensures that the next heap check will
340 // fail, and the thread will return to the scheduler.
341 cap->r.rHpLim = NULL;
342 // But just in case it didn't work (the target thread might be
343 // modifying HpLim at the same time), we set the end-of-block
344 // context-switch flag too:
345 cap->context_switch = 1;
350 INLINE_HEADER rtsBool emptyInbox(Capability *cap)
352 return (cap->inbox == (Message*)END_TSO_QUEUE);
359 #endif /* CAPABILITY_H */