1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * External Storage Manger Interface
7 * ---------------------------------------------------------------------------*/
13 #include "OSThreads.h"
16 /* -----------------------------------------------------------------------------
19 * We support an arbitrary number of generations, with an arbitrary number
20 * of steps per generation. Notes (in no particular order):
22 * - all generations except the oldest should have two steps. This gives
23 * objects a decent chance to age before being promoted, and in
24 * particular will ensure that we don't end up with too many
25 * thunks being updated in older generations.
27 * - the oldest generation has one step. There's no point in aging
28 * objects in the oldest generation.
30 * - generation 0, step 0 (G0S0) is the allocation area. It is given
31 * a fixed set of blocks during initialisation, and these blocks
34 * - during garbage collection, each step which is an evacuation
35 * destination (i.e. all steps except G0S0) is allocated a to-space.
36 * evacuated objects are allocated into the step's to-space until
37 * GC is finished, when the original step's contents may be freed
38 * and replaced by the to-space.
40 * - the mutable-list is per-generation (not per-step). G0 doesn't
41 * have one (since every garbage collection collects at least G0).
43 * - block descriptors contain pointers to both the step and the
44 * generation that the block belongs to, for convenience.
46 * - static objects are stored in per-generation lists. See GC.c for
47 * details of how we collect CAFs in the generational scheme.
49 * - large objects are per-step, and are promoted in the same way
50 * as small objects, except that we may allocate large objects into
51 * generation 1 initially.
53 * ------------------------------------------------------------------------- */
55 typedef struct step_ {
56 unsigned int no; // step number in this generation
57 unsigned int abs_no; // absolute step number
58 int is_compacted; // compact this step? (old gen only)
60 struct generation_ * gen; // generation this step belongs to
61 unsigned int gen_no; // generation number (cached)
63 bdescr * blocks; // blocks in this step
64 unsigned int n_blocks; // number of blocks
65 unsigned int n_words; // number of words
67 struct step_ * to; // destination step for live objects
69 bdescr * large_objects; // large objects (doubly linked)
70 unsigned int n_large_blocks; // no. of blocks used by large objs
72 StgTSO * threads; // threads in this step
73 // linked via global_link
75 // ------------------------------------
76 // Fields below are used during GC only
78 // During GC, if we are collecting this step, blocks and n_blocks
79 // are copied into the following two fields. After GC, these blocks
82 #if defined(THREADED_RTS)
83 char pad[128]; // make sure the following is
84 // on a separate cache line.
85 SpinLock sync_todo; // lock for todos
86 SpinLock sync_large_objects; // lock for large_objects
87 // and scavenged_large_objects
90 bdescr * old_blocks; // bdescr of first from-space block
91 unsigned int n_old_blocks; // number of blocks in from-space
93 bdescr * todos; // blocks waiting to be scavenged
95 unsigned int n_todos; // count of above
97 bdescr * part_blocks; // partially-full scanned blocks
98 unsigned int n_part_blocks; // count of above
100 bdescr * scavenged_large_objects; // live large objs after GC (d-link)
101 unsigned int n_scavenged_large_blocks; // size (not count) of above
103 bdescr * bitmap; // bitmap for compacting collection
105 StgTSO * old_threads;
110 typedef struct generation_ {
111 unsigned int no; // generation number
112 step * steps; // steps
113 unsigned int n_steps; // number of steps
114 unsigned int max_blocks; // max blocks in step 0
115 bdescr *mut_list; // mut objects in this gen (not G0)
118 unsigned int collections;
119 unsigned int par_collections;
120 unsigned int failed_promotions;
122 // temporary use during GC:
123 bdescr *saved_mut_list;
126 extern generation * RTS_VAR(generations);
128 extern generation * RTS_VAR(g0);
129 extern step * RTS_VAR(g0s0);
130 extern generation * RTS_VAR(oldest_gen);
131 extern step * RTS_VAR(all_steps);
132 extern nat RTS_VAR(total_steps);
134 /* -----------------------------------------------------------------------------
135 Initialisation / De-initialisation
136 -------------------------------------------------------------------------- */
138 extern void initStorage(void);
139 extern void exitStorage(void);
140 extern void freeStorage(void);
142 /* -----------------------------------------------------------------------------
145 StgPtr allocateInGen(generation *g, nat n)
146 Allocates a chunk of contiguous store
147 n words long in generation g,
148 returning a pointer to the first word.
151 StgPtr allocate(nat n) Equaivalent to allocateInGen(g0)
153 StgPtr allocateLocal(Capability *cap, nat n)
154 Allocates memory from the nursery in
155 the current Capability. This can be
156 done without taking a global lock,
159 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
160 n words long, which is at a fixed
161 address (won't be moved by GC).
162 Returns a pointer to the first word.
165 NOTE: the GC can't in general handle
166 pinned objects, so allocatePinned()
167 can only be used for ByteArrays at the
170 Don't forget to TICK_ALLOC_XXX(...)
171 after calling allocate or
172 allocatePinned, for the
173 benefit of the ticky-ticky profiler.
175 rtsBool doYouWantToGC(void) Returns True if the storage manager is
176 ready to perform a GC, False otherwise.
178 lnat allocatedBytes(void) Returns the number of bytes allocated
179 via allocate() since the last GC.
180 Used in the reporting of statistics.
182 -------------------------------------------------------------------------- */
184 extern StgPtr allocate ( nat n );
185 extern StgPtr allocateInGen ( generation *g, nat n );
186 extern StgPtr allocateLocal ( Capability *cap, nat n );
187 extern StgPtr allocatePinned ( nat n );
188 extern lnat allocatedBytes ( void );
190 extern bdescr * RTS_VAR(small_alloc_list);
191 extern bdescr * RTS_VAR(large_alloc_list);
192 extern bdescr * RTS_VAR(pinned_object_block);
194 extern nat RTS_VAR(alloc_blocks);
195 extern nat RTS_VAR(alloc_blocks_lim);
197 INLINE_HEADER rtsBool
198 doYouWantToGC( void )
200 return (alloc_blocks >= alloc_blocks_lim);
203 /* memory allocator for executable memory */
204 extern void *allocateExec (nat bytes);
205 extern void freeExec (void *p);
207 /* for splitting blocks groups in two */
208 extern bdescr * splitLargeBlock (bdescr *bd, nat blocks);
210 /* -----------------------------------------------------------------------------
211 Performing Garbage Collection
213 GarbageCollect(get_roots) Performs a garbage collection.
214 'get_roots' is called to find all the
215 roots that the system knows about.
218 -------------------------------------------------------------------------- */
220 extern void GarbageCollect(rtsBool force_major_gc);
222 /* -----------------------------------------------------------------------------
223 Generational garbage collection support
225 recordMutable(StgPtr p) Informs the garbage collector that a
226 previously immutable object has
227 become (permanently) mutable. Used
228 by thawArray and similar.
230 updateWithIndirection(p1,p2) Updates the object at p1 with an
231 indirection pointing to p2. This is
232 normally called for objects in an old
233 generation (>0) when they are updated.
235 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
237 -------------------------------------------------------------------------- */
240 * Storage manager mutex
242 #if defined(THREADED_RTS)
243 extern Mutex sm_mutex;
244 extern Mutex atomic_modify_mutvar_mutex;
245 extern SpinLock recordMutableGen_sync;
248 #if defined(THREADED_RTS)
249 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex);
250 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex);
251 #define ASSERT_SM_LOCK() ASSERT_LOCK_HELD(&sm_mutex);
253 #define ACQUIRE_SM_LOCK
254 #define RELEASE_SM_LOCK
255 #define ASSERT_SM_LOCK()
259 recordMutableGen(StgClosure *p, generation *gen)
264 if (bd->free >= bd->start + BLOCK_SIZE_W) {
266 new_bd = allocBlock();
271 *bd->free++ = (StgWord)p;
276 recordMutableGenLock(StgClosure *p, generation *gen)
279 recordMutableGen(p,gen);
283 extern bdescr *allocBlock_sync(void);
285 // Version of recordMutableGen() for use in parallel GC. The same as
286 // recordMutableGen(), except that we surround it with a spinlock and
287 // call the spinlock version of allocBlock().
289 recordMutableGen_GC(StgClosure *p, generation *gen)
293 ACQUIRE_SPIN_LOCK(&recordMutableGen_sync);
296 if (bd->free >= bd->start + BLOCK_SIZE_W) {
298 new_bd = allocBlock_sync();
303 *bd->free++ = (StgWord)p;
305 RELEASE_SPIN_LOCK(&recordMutableGen_sync);
309 recordMutable(StgClosure *p)
312 ASSERT(closure_MUTABLE(p));
314 if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
318 recordMutableLock(StgClosure *p)
325 /* -----------------------------------------------------------------------------
326 The CAF table - used to let us revert CAFs in GHCi
327 -------------------------------------------------------------------------- */
329 /* set to disable CAF garbage collection in GHCi. */
330 /* (needed when dynamic libraries are used). */
331 extern rtsBool keepCAFs;
333 /* -----------------------------------------------------------------------------
334 This is the write barrier for MUT_VARs, a.k.a. IORefs. A
335 MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
336 is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
337 and is put on the mutable list.
338 -------------------------------------------------------------------------- */
340 void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
342 /* -----------------------------------------------------------------------------
343 DEBUGGING predicates for pointers
345 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
346 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
348 These macros are complete but not sound. That is, they might
349 return false positives. Do not rely on them to distinguish info
350 pointers from closure pointers, for example.
352 We don't use address-space predicates these days, for portability
353 reasons, and the fact that code/data can be scattered about the
354 address space in a dynamically-linked environment. Our best option
355 is to look at the alleged info table and see whether it seems to
357 -------------------------------------------------------------------------- */
359 #define LOOKS_LIKE_INFO_PTR(p) \
360 (p && LOOKS_LIKE_INFO_PTR_NOT_NULL(p))
362 #define LOOKS_LIKE_INFO_PTR_NOT_NULL(p) \
363 (((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
364 ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
366 #define LOOKS_LIKE_CLOSURE_PTR(p) \
367 (LOOKS_LIKE_INFO_PTR((UNTAG_CLOSURE((StgClosure *)(p)))->header.info))
369 /* -----------------------------------------------------------------------------
370 Macros for calculating how big a closure will be (used during allocation)
371 -------------------------------------------------------------------------- */
373 INLINE_HEADER StgOffset PAP_sizeW ( nat n_args )
374 { return sizeofW(StgPAP) + n_args; }
376 INLINE_HEADER StgOffset AP_sizeW ( nat n_args )
377 { return sizeofW(StgAP) + n_args; }
379 INLINE_HEADER StgOffset AP_STACK_sizeW ( nat size )
380 { return sizeofW(StgAP_STACK) + size; }
382 INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np )
383 { return sizeofW(StgHeader) + p + np; }
385 INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
386 { return sizeofW(StgSelector); }
388 INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
389 { return sizeofW(StgHeader)+MIN_PAYLOAD_SIZE; }
391 /* --------------------------------------------------------------------------
393 ------------------------------------------------------------------------*/
395 INLINE_HEADER StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
396 { return sizeofW(StgClosure)
397 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
398 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
400 INLINE_HEADER StgOffset thunk_sizeW_fromITBL( const StgInfoTable* itbl )
401 { return sizeofW(StgThunk)
402 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
403 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
405 INLINE_HEADER StgOffset ap_stack_sizeW( StgAP_STACK* x )
406 { return AP_STACK_sizeW(x->size); }
408 INLINE_HEADER StgOffset ap_sizeW( StgAP* x )
409 { return AP_sizeW(x->n_args); }
411 INLINE_HEADER StgOffset pap_sizeW( StgPAP* x )
412 { return PAP_sizeW(x->n_args); }
414 INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x )
415 { return sizeofW(StgArrWords) + x->words; }
417 INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
418 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
420 INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso )
421 { return TSO_STRUCT_SIZEW + tso->stack_size; }
423 INLINE_HEADER StgWord bco_sizeW ( StgBCO *bco )
424 { return bco->size; }
427 closure_sizeW_ (StgClosure *p, StgInfoTable *info)
429 switch (info->type) {
432 return sizeofW(StgThunk) + 1;
437 return sizeofW(StgHeader) + 1;
441 return sizeofW(StgThunk) + 2;
448 return sizeofW(StgHeader) + 2;
450 return thunk_sizeW_fromITBL(info);
452 return THUNK_SELECTOR_sizeW();
454 return ap_stack_sizeW((StgAP_STACK *)p);
456 return ap_sizeW((StgAP *)p);
458 return pap_sizeW((StgPAP *)p);
462 case IND_OLDGEN_PERM:
463 return sizeofW(StgInd);
465 return arr_words_sizeW((StgArrWords *)p);
466 case MUT_ARR_PTRS_CLEAN:
467 case MUT_ARR_PTRS_DIRTY:
468 case MUT_ARR_PTRS_FROZEN:
469 case MUT_ARR_PTRS_FROZEN0:
470 return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
472 return tso_sizeW((StgTSO *)p);
474 return bco_sizeW((StgBCO *)p);
475 case TVAR_WATCH_QUEUE:
476 return sizeofW(StgTVarWatchQueue);
478 return sizeofW(StgTVar);
480 return sizeofW(StgTRecChunk);
482 return sizeofW(StgTRecHeader);
483 case ATOMIC_INVARIANT:
484 return sizeofW(StgAtomicInvariant);
485 case INVARIANT_CHECK_QUEUE:
486 return sizeofW(StgInvariantCheckQueue);
488 return sizeW_fromITBL(info);
492 // The definitive way to find the size, in words, of a heap-allocated closure
494 closure_sizeW (StgClosure *p)
496 return closure_sizeW_(p, get_itbl(p));
499 /* -----------------------------------------------------------------------------
500 Sizes of stack frames
501 -------------------------------------------------------------------------- */
503 INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame )
505 StgRetInfoTable *info;
507 info = get_ret_itbl(frame);
508 switch (info->i.type) {
512 StgRetDyn *dyn = (StgRetDyn *)frame;
513 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
514 RET_DYN_NONPTR_REGS_SIZE +
515 RET_DYN_PTRS(dyn->liveness) + RET_DYN_NONPTRS(dyn->liveness);
519 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
522 return 1 + GET_LARGE_BITMAP(&info->i)->size;
525 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
528 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
532 /* -----------------------------------------------------------------------------
534 -------------------------------------------------------------------------- */
536 extern void allocNurseries ( void );
537 extern void resetNurseries ( void );
538 extern void resizeNurseries ( nat blocks );
539 extern void resizeNurseriesFixed ( nat blocks );
540 extern lnat countNurseryBlocks ( void );
543 /* -----------------------------------------------------------------------------
545 -------------------------------------------------------------------------- */
547 typedef void (*evac_fn)(void *user, StgClosure **root);
549 extern void threadPaused ( Capability *cap, StgTSO * );
550 extern StgClosure * isAlive ( StgClosure *p );
551 extern void markCAFs ( evac_fn evac, void *user );
552 extern void GetRoots ( evac_fn evac, void *user );
554 /* -----------------------------------------------------------------------------
555 Stats 'n' DEBUG stuff
556 -------------------------------------------------------------------------- */
558 extern ullong RTS_VAR(total_allocated);
560 extern lnat calcAllocated ( void );
561 extern lnat calcLiveBlocks ( void );
562 extern lnat calcLiveWords ( void );
563 extern lnat countOccupied ( bdescr *bd );
564 extern lnat calcNeeded ( void );
567 extern void memInventory(rtsBool show);
568 extern void checkSanity(void);
569 extern nat countBlocks(bdescr *);
570 extern void checkNurserySanity( step *stp );
574 void printMutOnceList(generation *gen);
575 void printMutableList(generation *gen);
578 /* ----------------------------------------------------------------------------
579 Storage manager internal APIs and globals
580 ------------------------------------------------------------------------- */
582 #define END_OF_STATIC_LIST stgCast(StgClosure*,1)
584 extern void newDynCAF(StgClosure *);
586 extern void move_TSO(StgTSO *src, StgTSO *dest);
587 extern StgTSO *relocate_stack(StgTSO *dest, ptrdiff_t diff);
589 extern StgWeak * RTS_VAR(old_weak_ptr_list);
590 extern StgWeak * RTS_VAR(weak_ptr_list);
591 extern StgClosure * RTS_VAR(caf_list);
592 extern StgClosure * RTS_VAR(revertible_caf_list);
593 extern StgTSO * RTS_VAR(resurrected_threads);
595 #endif /* STORAGE_H */