1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
7 * -------------------------------------------------------------------------- */
13 * The Layout of a closure header depends on which kind of system we're
14 * compiling for: profiling, parallel, ticky, etc.
17 /* -----------------------------------------------------------------------------
19 -------------------------------------------------------------------------- */
24 struct _RetainerSet *rs; /* Retainer Set */
25 StgWord ldvw; /* Lag/Drag/Void Word */
29 /* -----------------------------------------------------------------------------
31 -------------------------------------------------------------------------- */
34 StgWord procs; /* bitmask indicating on which PEs this closure resides */
37 /* -----------------------------------------------------------------------------
40 A thunk has a padding word to take the updated value. This is so
41 that the update doesn't overwrite the payload, so we can avoid
42 needing to lock the thunk during entry and update.
44 Note: this doesn't apply to THUNK_STATICs, which have no payload.
46 Note: we leave this padding word in all ways, rather than just SMP,
47 so that we don't have to recompile all our libraries for SMP.
48 -------------------------------------------------------------------------- */
54 /* -----------------------------------------------------------------------------
55 The full fixed-size closure header
57 The size of the fixed header is the sum of the optional parts plus a single
58 word for the entry code pointer.
59 -------------------------------------------------------------------------- */
62 const struct _StgInfoTable* info;
72 const struct _StgInfoTable* info;
79 StgSMPThunkHeader smp;
82 #define THUNK_EXTRA_HEADER_W (sizeofW(StgThunkHeader)-sizeofW(StgHeader))
84 /* -----------------------------------------------------------------------------
87 For any given closure type (defined in InfoTables.h), there is a
88 corresponding structure defined below. The name of the structure
89 is obtained by concatenating the closure type with '_closure'
90 -------------------------------------------------------------------------- */
92 /* All closures follow the generic format */
96 struct StgClosure_ *payload[FLEXIBLE_ARRAY];
100 StgThunkHeader header;
101 struct StgClosure_ *payload[FLEXIBLE_ARRAY];
105 StgThunkHeader header;
106 StgClosure *selectee;
111 StgHalfWord arity; /* zero if it is an AP */
113 StgClosure *fun; /* really points to a fun */
114 StgClosure *payload[FLEXIBLE_ARRAY];
118 StgThunkHeader header;
119 StgHalfWord arity; /* zero if it is an AP */
121 StgClosure *fun; /* really points to a fun */
122 StgClosure *payload[FLEXIBLE_ARRAY];
126 StgThunkHeader header;
127 StgWord size; /* number of words in payload */
129 StgClosure *payload[FLEXIBLE_ARRAY]; /* contains a chunk of *stack* */
134 StgClosure *indirectee;
139 StgClosure *indirectee;
140 StgClosure *static_link;
141 struct _StgInfoTable *saved_info;
147 StgWord payload[FLEXIBLE_ARRAY];
153 StgClosure *payload[FLEXIBLE_ARRAY];
161 typedef struct _StgUpdateFrame {
168 StgInt exceptions_blocked;
179 } StgIntCharlikeClosure;
181 /* statically allocated */
186 typedef struct _StgStableName {
191 typedef struct _StgWeak { /* Weak v */
193 StgClosure *cfinalizer;
195 StgClosure *value; /* v */
196 StgClosure *finalizer;
197 struct _StgWeak *link;
200 typedef struct _StgDeadWeak { /* Weak v */
202 struct _StgWeak *link;
205 /* Byte code objects. These are fixed size objects with pointers to
206 * four arrays, designed so that a BCO can be easily "re-linked" to
207 * other BCOs, to facilitate GHC's intelligent recompilation. The
208 * array of instructions is static and not re-generated when the BCO
209 * is re-linked, but the other 3 arrays will be regenerated.
211 * A BCO represents either a function or a stack frame. In each case,
212 * it needs a bitmap to describe to the garbage collector the
213 * pointerhood of its arguments/free variables respectively, and in
214 * the case of a function it also needs an arity. These are stored
215 * directly in the BCO, rather than in the instrs array, for two
217 * (a) speed: we need to get at the bitmap info quickly when
218 * the GC is examining APs and PAPs that point to this BCO
219 * (b) a subtle interaction with the compacting GC. In compacting
220 * GC, the info that describes the size/layout of a closure
221 * cannot be in an object more than one level of indirection
222 * away from the current object, because of the order in
223 * which pointers are updated to point to their new locations.
228 StgArrWords *instrs; /* a pointer to an ArrWords */
229 StgArrWords *literals; /* a pointer to an ArrWords */
230 StgMutArrPtrs *ptrs; /* a pointer to a MutArrPtrs */
231 StgHalfWord arity; /* arity of this BCO */
232 StgHalfWord size; /* size of this BCO (in words) */
233 StgWord bitmap[FLEXIBLE_ARRAY]; /* an StgLargeBitmap */
236 #define BCO_BITMAP(bco) ((StgLargeBitmap *)((StgBCO *)(bco))->bitmap)
237 #define BCO_BITMAP_SIZE(bco) (BCO_BITMAP(bco)->size)
238 #define BCO_BITMAP_BITS(bco) (BCO_BITMAP(bco)->bitmap)
239 #define BCO_BITMAP_SIZEW(bco) ((BCO_BITMAP_SIZE(bco) + BITS_IN(StgWord) - 1) \
242 /* -----------------------------------------------------------------------------
243 Dynamic stack frames for generic heap checks.
245 These generic heap checks are slow, but have the advantage of being
246 usable in a variety of situations.
248 The one restriction is that any relevant SRTs must already be pointed
249 to from the stack. The return address doesn't need to have an info
250 table attached: hence it can be any old code pointer.
252 The liveness mask contains a 1 at bit n, if register Rn contains a
253 non-pointer. The contents of all 8 vanilla registers are always saved
254 on the stack; the liveness mask tells the GC which ones contain
257 Good places to use a generic heap check:
259 - case alternatives (the return address with an SRT is already
262 - primitives (no SRT required).
264 The stack frame layout for a RET_DYN is like this:
266 some pointers |-- RET_DYN_PTRS(liveness) words
267 some nonpointers |-- RET_DYN_NONPTRS(liveness) words
270 D1-2 |-- RET_DYN_NONPTR_REGS_SIZE words
273 R1-8 |-- RET_DYN_BITMAP_SIZE words
276 liveness mask |-- StgRetDyn structure
279 we assume that the size of a double is always 2 pointers (wasting a
280 word when it is only one pointer, but avoiding lots of #ifdefs).
282 See Liveness.h for the macros (RET_DYN_PTRS() etc.).
284 NOTE: if you change the layout of RET_DYN stack frames, then you
285 might also need to adjust the value of RESERVED_STACK_WORDS in
287 -------------------------------------------------------------------------- */
290 const struct _StgInfoTable* info;
293 StgClosure * payload[FLEXIBLE_ARRAY];
296 /* A function return stack frame: used when saving the state for a
297 * garbage collection at a function entry point. The function
298 * arguments are on the stack, and we also save the function (its
299 * info table describes the pointerhood of the arguments).
301 * The stack frame size is also cached in the frame for convenience.
304 const struct _StgInfoTable* info;
307 StgClosure * payload[FLEXIBLE_ARRAY];
310 /* Concurrent communication objects */
314 struct StgTSO_ *head;
315 struct StgTSO_ *tail;
320 /* STM data structures
322 * StgTVar defines the only type that can be updated through the STM
325 * Note that various optimisations may be possible in order to use less
326 * space for these data structures at the cost of more complexity in the
329 * - In StgTVar, current_value and first_watch_queue_entry could be held in
330 * the same field: if any thread is waiting then its expected_value for
331 * the tvar is the current value.
333 * - In StgTRecHeader, it might be worthwhile having separate chunks
334 * of read-only and read-write locations. This would save a
335 * new_value field in the read-only locations.
337 * - In StgAtomicallyFrame, we could combine the waiting bit into
338 * the header (maybe a different info tbl for a waiting transaction).
339 * This means we can specialise the code for the atomically frame
340 * (it immediately switches on frame->waiting anyway).
343 typedef struct StgTRecHeader_ StgTRecHeader;
345 typedef struct StgTVarWatchQueue_ {
347 StgClosure *closure; // StgTSO or StgAtomicInvariant
348 struct StgTVarWatchQueue_ *next_queue_entry;
349 struct StgTVarWatchQueue_ *prev_queue_entry;
354 StgClosure *volatile current_value;
355 StgTVarWatchQueue *volatile first_watch_queue_entry;
356 #if defined(THREADED_RTS)
357 StgInt volatile num_updates;
364 StgTRecHeader *last_execution;
366 } StgAtomicInvariant;
368 /* new_value == expected_value for read-only accesses */
369 /* new_value is a StgTVarWatchQueue entry when trec in state TREC_WAITING */
372 StgClosure *expected_value;
373 StgClosure *new_value;
374 #if defined(THREADED_RTS)
379 #define TREC_CHUNK_NUM_ENTRIES 16
381 typedef struct StgTRecChunk_ {
383 struct StgTRecChunk_ *prev_chunk;
384 StgWord next_entry_idx;
385 TRecEntry entries[TREC_CHUNK_NUM_ENTRIES];
389 TREC_ACTIVE, /* Transaction in progress, outcome undecided */
390 TREC_CONDEMNED, /* Transaction in progress, inconsistent / out of date reads */
391 TREC_COMMITTED, /* Transaction has committed, now updating tvars */
392 TREC_ABORTED, /* Transaction has aborted, now reverting tvars */
393 TREC_WAITING, /* Transaction currently waiting */
396 typedef struct StgInvariantCheckQueue_ {
398 StgAtomicInvariant *invariant;
399 StgTRecHeader *my_execution;
400 struct StgInvariantCheckQueue_ *next_queue_entry;
401 } StgInvariantCheckQueue;
403 struct StgTRecHeader_ {
406 struct StgTRecHeader_ *enclosing_trec;
407 StgTRecChunk *current_chunk;
408 StgInvariantCheckQueue *invariants_to_check;
414 StgTVarWatchQueue *next_invariant_to_check;
415 } StgAtomicallyFrame;
425 StgBool running_alt_code;
426 StgClosure *first_code;
427 StgClosure *alt_code;
428 } StgCatchRetryFrame;
430 #if defined(PAR) || defined(GRAN)
432 StgBlockingQueueElement is a ``collective type'' representing the types
433 of closures that can be found on a blocking queue: StgTSO, StgRBHSave,
434 StgBlockedFetch. (StgRBHSave can only appear at the end of a blocking
435 queue). Logically, this is a union type, but defining another struct
436 with a common layout is easier to handle in the code.
437 Note that in the standard setup only StgTSOs can be on a blocking queue.
438 This is one of the main reasons for slightly different code in files
441 typedef struct StgBlockingQueueElement_ {
443 struct StgBlockingQueueElement_ *link; /* next elem in BQ */
444 struct StgClosure_ *payload[FLEXIBLE_ARRAY];/* contents of the closure */
445 } StgBlockingQueueElement;
447 /* only difference to std code is type of the elem in the BQ */
448 typedef struct StgBlockingQueue_ {
450 struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
453 /* this closure is hanging at the end of a blocking queue in (see RBH.c) */
454 typedef struct StgRBHSave_ {
456 StgClosure *payload[FLEXIBLE_ARRAY]; /* 2 words ripped out of the guts of the */
457 } StgRBHSave; /* closure holding the blocking queue */
459 typedef struct StgRBH_ {
461 struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
467 /* global indirections aka FETCH_ME closures */
468 typedef struct StgFetchMe_ {
470 globalAddr *ga; /* ptr to unique id for a closure */
473 /* same contents as an ordinary StgBlockingQueue */
474 typedef struct StgFetchMeBlockingQueue_ {
476 struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
477 } StgFetchMeBlockingQueue;
479 /* This is an entry in a blocking queue. It indicates a fetch request from a
480 TSO on another PE demanding the value of this closur. Note that a
481 StgBlockedFetch can only occur in a BQ. Once the node is evaluated and
482 updated with the result, the result will be sent back (the PE is encoded
483 in the globalAddr) and the StgBlockedFetch closure will be nuked.
485 typedef struct StgBlockedFetch_ {
487 struct StgBlockingQueueElement_ *link; /* next elem in the BQ */
488 StgClosure *node; /* node to fetch */
489 globalAddr ga; /* where to send the result to */
490 } StgBlockedFetch; /* NB: not just a ptr to a GA */
493 #endif /* CLOSURES_H */