1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
7 * -------------------------------------------------------------------------- */
13 * The Layout of a closure header depends on which kind of system we're
14 * compiling for: profiling, parallel, ticky, etc.
17 /* -----------------------------------------------------------------------------
19 -------------------------------------------------------------------------- */
24 struct _RetainerSet *rs; /* Retainer Set */
25 StgWord ldvw; /* Lag/Drag/Void Word */
29 /* -----------------------------------------------------------------------------
31 -------------------------------------------------------------------------- */
34 StgWord procs; /* bitmask indicating on which PEs this closure resides */
37 /* -----------------------------------------------------------------------------
40 In SMP mode, we have an extra word of padding in a thunk's header.
41 (Note: thunks only; other closures do not have this padding word).
42 -------------------------------------------------------------------------- */
48 /* -----------------------------------------------------------------------------
49 The full fixed-size closure header
51 The size of the fixed header is the sum of the optional parts plus a single
52 word for the entry code pointer.
53 -------------------------------------------------------------------------- */
56 const struct _StgInfoTable* info;
66 * In SMP mode, a thunk has a padding word to take the updated value.
67 * This is so that the update doesn't overwrite the payload, so we can
68 * avoid needing to lock the thunk during entry and update.
70 * Note: this doesn't apply to THUNK_STATICs, which have no payload.
73 const struct _StgInfoTable* info;
81 StgSMPThunkHeader smp;
85 /* -----------------------------------------------------------------------------
88 For any given closure type (defined in InfoTables.h), there is a
89 corresponding structure defined below. The name of the structure
90 is obtained by concatenating the closure type with '_closure'
91 -------------------------------------------------------------------------- */
93 /* All closures follow the generic format */
97 struct StgClosure_ *payload[FLEXIBLE_ARRAY];
101 StgThunkHeader header;
102 struct StgClosure_ *payload[FLEXIBLE_ARRAY];
106 StgThunkHeader header;
107 StgClosure *selectee;
112 StgHalfWord arity; /* zero if it is an AP */
114 StgClosure *fun; /* really points to a fun */
115 StgClosure *payload[FLEXIBLE_ARRAY];
119 StgThunkHeader header;
120 StgHalfWord arity; /* zero if it is an AP */
122 StgClosure *fun; /* really points to a fun */
123 StgClosure *payload[FLEXIBLE_ARRAY];
127 StgThunkHeader header;
128 StgWord size; /* number of words in payload */
130 StgClosure *payload[FLEXIBLE_ARRAY]; /* contains a chunk of *stack* */
135 StgClosure *indirectee;
140 StgClosure *indirectee;
141 StgClosure *static_link;
142 struct _StgInfoTable *saved_info;
148 StgWord payload[FLEXIBLE_ARRAY];
154 StgClosure *payload[FLEXIBLE_ARRAY];
162 typedef struct _StgUpdateFrame {
169 StgInt exceptions_blocked;
185 } StgIntCharlikeClosure;
187 /* statically allocated */
192 typedef struct _StgForeignObj {
194 StgAddr data; /* pointer to data in non-haskell-land */
197 typedef struct _StgStableName {
202 typedef struct _StgWeak { /* Weak v */
205 StgClosure *value; /* v */
206 StgClosure *finalizer;
207 struct _StgWeak *link;
210 typedef struct _StgDeadWeak { /* Weak v */
212 struct _StgWeak *link;
215 /* Byte code objects. These are fixed size objects with pointers to
216 * four arrays, designed so that a BCO can be easily "re-linked" to
217 * other BCOs, to facilitate GHC's intelligent recompilation. The
218 * array of instructions is static and not re-generated when the BCO
219 * is re-linked, but the other 3 arrays will be regenerated.
221 * A BCO represents either a function or a stack frame. In each case,
222 * it needs a bitmap to describe to the garbage collector the
223 * pointerhood of its arguments/free variables respectively, and in
224 * the case of a function it also needs an arity. These are stored
225 * directly in the BCO, rather than in the instrs array, for two
227 * (a) speed: we need to get at the bitmap info quickly when
228 * the GC is examining APs and PAPs that point to this BCO
229 * (b) a subtle interaction with the compacting GC. In compacting
230 * GC, the info that describes the size/layout of a closure
231 * cannot be in an object more than one level of indirection
232 * away from the current object, because of the order in
233 * which pointers are updated to point to their new locations.
238 StgArrWords *instrs; /* a pointer to an ArrWords */
239 StgArrWords *literals; /* a pointer to an ArrWords */
240 StgMutArrPtrs *ptrs; /* a pointer to a MutArrPtrs */
241 StgArrWords *itbls; /* a pointer to an ArrWords */
242 StgHalfWord arity; /* arity of this BCO */
243 StgHalfWord size; /* size of this BCO (in words) */
244 StgWord bitmap[FLEXIBLE_ARRAY]; /* an StgLargeBitmap */
247 #define BCO_BITMAP(bco) ((StgLargeBitmap *)((StgBCO *)(bco))->bitmap)
248 #define BCO_BITMAP_SIZE(bco) (BCO_BITMAP(bco)->size)
249 #define BCO_BITMAP_BITS(bco) (BCO_BITMAP(bco)->bitmap)
250 #define BCO_BITMAP_SIZEW(bco) ((BCO_BITMAP_SIZE(bco) + BITS_IN(StgWord) - 1) \
253 /* -----------------------------------------------------------------------------
254 Dynamic stack frames for generic heap checks.
256 These generic heap checks are slow, but have the advantage of being
257 usable in a variety of situations.
259 The one restriction is that any relevant SRTs must already be pointed
260 to from the stack. The return address doesn't need to have an info
261 table attached: hence it can be any old code pointer.
263 The liveness mask contains a 1 at bit n, if register Rn contains a
264 non-pointer. The contents of all 8 vanilla registers are always saved
265 on the stack; the liveness mask tells the GC which ones contain
268 Good places to use a generic heap check:
270 - case alternatives (the return address with an SRT is already
273 - primitives (no SRT required).
275 The stack frame layout for a RET_DYN is like this:
277 some pointers |-- RET_DYN_PTRS(liveness) words
278 some nonpointers |-- RET_DYN_NONPTRS(liveness) words
281 D1-2 |-- RET_DYN_NONPTR_REGS_SIZE words
284 R1-8 |-- RET_DYN_BITMAP_SIZE words
287 liveness mask |-- StgRetDyn structure
290 we assume that the size of a double is always 2 pointers (wasting a
291 word when it is only one pointer, but avoiding lots of #ifdefs).
293 See Liveness.h for the macros (RET_DYN_PTRS() etc.).
295 NOTE: if you change the layout of RET_DYN stack frames, then you
296 might also need to adjust the value of RESERVED_STACK_WORDS in
298 -------------------------------------------------------------------------- */
301 const struct _StgInfoTable* info;
304 StgClosure * payload[FLEXIBLE_ARRAY];
307 /* A function return stack frame: used when saving the state for a
308 * garbage collection at a function entry point. The function
309 * arguments are on the stack, and we also save the function (its
310 * info table describes the pointerhood of the arguments).
312 * The stack frame size is also cached in the frame for convenience.
315 const struct _StgInfoTable* info;
318 StgClosure * payload[FLEXIBLE_ARRAY];
321 /* Concurrent communication objects */
325 struct StgTSO_ *head;
326 struct StgTSO_ *tail;
331 /* STM data structures
333 * StgTVar defines the only type that can be updated through the STM
336 * Note that various optimisations may be possible in order to use less
337 * space for these data structures at the cost of more complexity in the
340 * - In StgTVar, current_value and first_wait_queue_entry could be held in
341 * the same field: if any thread is waiting then its expected_value for
342 * the tvar is the current value.
344 * - In StgTRecHeader, it might be worthwhile having separate chunks
345 * of read-only and read-write locations. This would save a
346 * new_value field in the read-only locations.
349 typedef struct StgTVarWaitQueue_ {
351 struct StgTSO_ *waiting_tso;
352 struct StgTVarWaitQueue_ *next_queue_entry;
353 struct StgTVarWaitQueue_ *prev_queue_entry;
358 StgClosure *volatile current_value;
359 StgTVarWaitQueue *volatile first_wait_queue_entry;
360 struct StgTRecHeader_ *volatile last_update_by;
363 /* new_value == expected_value for read-only accesses */
364 /* new_value is a StgTVarWaitQueue entry when trec in state TREC_WAITING */
367 StgClosure *expected_value;
368 StgClosure *new_value;
369 struct StgTRecHeader_ *saw_update_by;
372 #define TREC_CHUNK_NUM_ENTRIES 256
374 typedef struct StgTRecChunk_ {
376 struct StgTRecChunk_ *prev_chunk;
377 StgWord next_entry_idx;
378 TRecEntry entries[TREC_CHUNK_NUM_ENTRIES];
382 TREC_ACTIVE, /* Transaction in progress, outcome undecided */
383 TREC_CONDEMNED, /* Transaction in progress, inconsistent / out of date reads */
384 TREC_COMMITTED, /* Transaction has committed, now updating tvars */
385 TREC_ABORTED, /* Transaction has aborted, now reverting tvars */
386 TREC_WAITING, /* Transaction currently waiting */
389 typedef struct StgTRecHeader_ {
392 struct StgTRecHeader_ *enclosing_trec;
393 StgTRecChunk *current_chunk;
400 } StgAtomicallyFrame;
409 StgBool running_alt_code;
410 StgClosure *first_code;
411 StgClosure *alt_code;
412 StgTRecHeader *first_code_trec;
413 } StgCatchRetryFrame;
415 #if defined(PAR) || defined(GRAN)
417 StgBlockingQueueElement is a ``collective type'' representing the types
418 of closures that can be found on a blocking queue: StgTSO, StgRBHSave,
419 StgBlockedFetch. (StgRBHSave can only appear at the end of a blocking
420 queue). Logically, this is a union type, but defining another struct
421 with a common layout is easier to handle in the code.
422 Note that in the standard setup only StgTSOs can be on a blocking queue.
423 This is one of the main reasons for slightly different code in files
426 typedef struct StgBlockingQueueElement_ {
428 struct StgBlockingQueueElement_ *link; /* next elem in BQ */
429 struct StgClosure_ *payload[FLEXIBLE_ARRAY];/* contents of the closure */
430 } StgBlockingQueueElement;
432 /* only difference to std code is type of the elem in the BQ */
433 typedef struct StgBlockingQueue_ {
435 struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
438 /* this closure is hanging at the end of a blocking queue in (see RBH.c) */
439 typedef struct StgRBHSave_ {
441 StgClosure *payload[FLEXIBLE_ARRAY]; /* 2 words ripped out of the guts of the */
442 } StgRBHSave; /* closure holding the blocking queue */
444 typedef struct StgRBH_ {
446 struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
452 /* global indirections aka FETCH_ME closures */
453 typedef struct StgFetchMe_ {
455 globalAddr *ga; /* ptr to unique id for a closure */
458 /* same contents as an ordinary StgBlockingQueue */
459 typedef struct StgFetchMeBlockingQueue_ {
461 struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
462 } StgFetchMeBlockingQueue;
464 /* This is an entry in a blocking queue. It indicates a fetch request from a
465 TSO on another PE demanding the value of this closur. Note that a
466 StgBlockedFetch can only occur in a BQ. Once the node is evaluated and
467 updated with the result, the result will be sent back (the PE is encoded
468 in the globalAddr) and the StgBlockedFetch closure will be nuked.
470 typedef struct StgBlockedFetch_ {
472 struct StgBlockingQueueElement_ *link; /* next elem in the BQ */
473 StgClosure *node; /* node to fetch */
474 globalAddr ga; /* where to send the result to */
475 } StgBlockedFetch; /* NB: not just a ptr to a GA */
478 #endif /* CLOSURES_H */