1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * External Storage Manger Interface
7 * ---------------------------------------------------------------------------*/
13 #include "OSThreads.h"
16 /* -----------------------------------------------------------------------------
19 * We support an arbitrary number of generations, with an arbitrary number
20 * of steps per generation. Notes (in no particular order):
22 * - all generations except the oldest should have two steps. This gives
23 * objects a decent chance to age before being promoted, and in
24 * particular will ensure that we don't end up with too many
25 * thunks being updated in older generations.
27 * - the oldest generation has one step. There's no point in aging
28 * objects in the oldest generation.
30 * - generation 0, step 0 (G0S0) is the allocation area. It is given
31 * a fixed set of blocks during initialisation, and these blocks
34 * - during garbage collection, each step which is an evacuation
35 * destination (i.e. all steps except G0S0) is allocated a to-space.
36 * evacuated objects are allocated into the step's to-space until
37 * GC is finished, when the original step's contents may be freed
38 * and replaced by the to-space.
40 * - the mutable-list is per-generation (not per-step). G0 doesn't
41 * have one (since every garbage collection collects at least G0).
43 * - block descriptors contain pointers to both the step and the
44 * generation that the block belongs to, for convenience.
46 * - static objects are stored in per-generation lists. See GC.c for
47 * details of how we collect CAFs in the generational scheme.
49 * - large objects are per-step, and are promoted in the same way
50 * as small objects, except that we may allocate large objects into
51 * generation 1 initially.
53 * ------------------------------------------------------------------------- */
55 typedef struct step_ {
56 unsigned int no; // step number in this generation
57 unsigned int abs_no; // absolute step number
58 int is_compacted; // compact this step? (old gen only)
60 struct generation_ * gen; // generation this step belongs to
61 unsigned int gen_no; // generation number (cached)
63 bdescr * blocks; // blocks in this step
64 unsigned int n_blocks; // number of blocks
66 struct step_ * to; // destination step for live objects
68 bdescr * large_objects; // large objects (doubly linked)
69 unsigned int n_large_blocks; // no. of blocks used by large objs
72 // ------------------------------------
73 // Fields below are used during GC only
75 // During GC, if we are collecting this step, blocks and n_blocks
76 // are copied into the following two fields. After GC, these blocks
79 #if defined(THREADED_RTS)
80 char pad[128]; // make sure the following is
81 // on a separate cache line.
82 SpinLock sync_todo; // lock for todos
83 SpinLock sync_large_objects; // lock for large_objects
84 // and scavenged_large_objects
87 bdescr * old_blocks; // bdescr of first from-space block
88 unsigned int n_old_blocks; // number of blocks in from-space
90 bdescr * todos; // blocks waiting to be scavenged
92 unsigned int n_todos; // count of above
94 bdescr * scavenged_large_objects; // live large objs after GC (d-link)
95 unsigned int n_scavenged_large_blocks; // size (not count) of above
97 bdescr * bitmap; // bitmap for compacting collection
103 typedef struct generation_ {
104 unsigned int no; // generation number
105 step * steps; // steps
106 unsigned int n_steps; // number of steps
107 unsigned int max_blocks; // max blocks in step 0
108 bdescr *mut_list; // mut objects in this gen (not G0)
111 unsigned int collections;
112 unsigned int failed_promotions;
114 // temporary use during GC:
115 bdescr *saved_mut_list;
118 extern generation * RTS_VAR(generations);
120 extern generation * RTS_VAR(g0);
121 extern step * RTS_VAR(g0s0);
122 extern generation * RTS_VAR(oldest_gen);
123 extern step * RTS_VAR(all_steps);
124 extern nat RTS_VAR(total_steps);
126 /* -----------------------------------------------------------------------------
127 Initialisation / De-initialisation
128 -------------------------------------------------------------------------- */
130 extern void initStorage(void);
131 extern void exitStorage(void);
132 extern void freeStorage(void);
134 /* -----------------------------------------------------------------------------
137 StgPtr allocateInGen(generation *g, nat n)
138 Allocates a chunk of contiguous store
139 n words long in generation g,
140 returning a pointer to the first word.
143 StgPtr allocate(nat n) Equaivalent to allocateInGen(g0)
145 StgPtr allocateLocal(Capability *cap, nat n)
146 Allocates memory from the nursery in
147 the current Capability. This can be
148 done without taking a global lock,
151 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
152 n words long, which is at a fixed
153 address (won't be moved by GC).
154 Returns a pointer to the first word.
157 NOTE: the GC can't in general handle
158 pinned objects, so allocatePinned()
159 can only be used for ByteArrays at the
162 Don't forget to TICK_ALLOC_XXX(...)
163 after calling allocate or
164 allocatePinned, for the
165 benefit of the ticky-ticky profiler.
167 rtsBool doYouWantToGC(void) Returns True if the storage manager is
168 ready to perform a GC, False otherwise.
170 lnat allocatedBytes(void) Returns the number of bytes allocated
171 via allocate() since the last GC.
172 Used in the reporting of statistics.
174 -------------------------------------------------------------------------- */
176 extern StgPtr allocate ( nat n );
177 extern StgPtr allocateInGen ( generation *g, nat n );
178 extern StgPtr allocateLocal ( Capability *cap, nat n );
179 extern StgPtr allocatePinned ( nat n );
180 extern lnat allocatedBytes ( void );
182 extern bdescr * RTS_VAR(small_alloc_list);
183 extern bdescr * RTS_VAR(large_alloc_list);
184 extern bdescr * RTS_VAR(pinned_object_block);
186 extern nat RTS_VAR(alloc_blocks);
187 extern nat RTS_VAR(alloc_blocks_lim);
189 INLINE_HEADER rtsBool
190 doYouWantToGC( void )
192 return (alloc_blocks >= alloc_blocks_lim);
195 /* memory allocator for executable memory */
196 extern void *allocateExec (nat bytes);
197 extern void freeExec (void *p);
199 /* for splitting blocks groups in two */
200 extern bdescr * splitLargeBlock (bdescr *bd, nat blocks);
202 /* -----------------------------------------------------------------------------
203 Performing Garbage Collection
205 GarbageCollect(get_roots) Performs a garbage collection.
206 'get_roots' is called to find all the
207 roots that the system knows about.
210 -------------------------------------------------------------------------- */
212 extern void GarbageCollect(rtsBool force_major_gc);
214 /* -----------------------------------------------------------------------------
215 Generational garbage collection support
217 recordMutable(StgPtr p) Informs the garbage collector that a
218 previously immutable object has
219 become (permanently) mutable. Used
220 by thawArray and similar.
222 updateWithIndirection(p1,p2) Updates the object at p1 with an
223 indirection pointing to p2. This is
224 normally called for objects in an old
225 generation (>0) when they are updated.
227 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
229 -------------------------------------------------------------------------- */
232 * Storage manager mutex
234 #if defined(THREADED_RTS)
235 extern Mutex sm_mutex;
236 extern Mutex atomic_modify_mutvar_mutex;
237 extern SpinLock recordMutableGen_sync;
240 #if defined(THREADED_RTS)
241 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex);
242 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex);
243 #define ASSERT_SM_LOCK() ASSERT_LOCK_HELD(&sm_mutex);
245 #define ACQUIRE_SM_LOCK
246 #define RELEASE_SM_LOCK
247 #define ASSERT_SM_LOCK()
251 recordMutableGen(StgClosure *p, generation *gen)
256 if (bd->free >= bd->start + BLOCK_SIZE_W) {
258 new_bd = allocBlock();
263 *bd->free++ = (StgWord)p;
268 recordMutableGenLock(StgClosure *p, generation *gen)
271 recordMutableGen(p,gen);
275 extern bdescr *allocBlock_sync(void);
277 // Version of recordMutableGen() for use in parallel GC. The same as
278 // recordMutableGen(), except that we surround it with a spinlock and
279 // call the spinlock version of allocBlock().
281 recordMutableGen_GC(StgClosure *p, generation *gen)
285 ACQUIRE_SPIN_LOCK(&recordMutableGen_sync);
288 if (bd->free >= bd->start + BLOCK_SIZE_W) {
290 new_bd = allocBlock_sync();
295 *bd->free++ = (StgWord)p;
297 RELEASE_SPIN_LOCK(&recordMutableGen_sync);
301 recordMutable(StgClosure *p)
304 ASSERT(closure_MUTABLE(p));
306 if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
310 recordMutableLock(StgClosure *p)
317 /* -----------------------------------------------------------------------------
318 The CAF table - used to let us revert CAFs in GHCi
319 -------------------------------------------------------------------------- */
321 /* set to disable CAF garbage collection in GHCi. */
322 /* (needed when dynamic libraries are used). */
323 extern rtsBool keepCAFs;
325 /* -----------------------------------------------------------------------------
326 This is the write barrier for MUT_VARs, a.k.a. IORefs. A
327 MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
328 is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
329 and is put on the mutable list.
330 -------------------------------------------------------------------------- */
332 void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
334 /* -----------------------------------------------------------------------------
335 DEBUGGING predicates for pointers
337 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
338 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
340 These macros are complete but not sound. That is, they might
341 return false positives. Do not rely on them to distinguish info
342 pointers from closure pointers, for example.
344 We don't use address-space predicates these days, for portability
345 reasons, and the fact that code/data can be scattered about the
346 address space in a dynamically-linked environment. Our best option
347 is to look at the alleged info table and see whether it seems to
349 -------------------------------------------------------------------------- */
351 #define LOOKS_LIKE_INFO_PTR(p) \
352 (p && LOOKS_LIKE_INFO_PTR_NOT_NULL(p))
354 #define LOOKS_LIKE_INFO_PTR_NOT_NULL(p) \
355 (((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
356 ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
358 #define LOOKS_LIKE_CLOSURE_PTR(p) \
359 (LOOKS_LIKE_INFO_PTR((UNTAG_CLOSURE((StgClosure *)(p)))->header.info))
361 /* -----------------------------------------------------------------------------
362 Macros for calculating how big a closure will be (used during allocation)
363 -------------------------------------------------------------------------- */
365 INLINE_HEADER StgOffset PAP_sizeW ( nat n_args )
366 { return sizeofW(StgPAP) + n_args; }
368 INLINE_HEADER StgOffset AP_sizeW ( nat n_args )
369 { return sizeofW(StgAP) + n_args; }
371 INLINE_HEADER StgOffset AP_STACK_sizeW ( nat size )
372 { return sizeofW(StgAP_STACK) + size; }
374 INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np )
375 { return sizeofW(StgHeader) + p + np; }
377 INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
378 { return sizeofW(StgSelector); }
380 INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
381 { return sizeofW(StgHeader)+MIN_PAYLOAD_SIZE; }
383 /* --------------------------------------------------------------------------
385 ------------------------------------------------------------------------*/
387 INLINE_HEADER StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
388 { return sizeofW(StgClosure)
389 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
390 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
392 INLINE_HEADER StgOffset thunk_sizeW_fromITBL( const StgInfoTable* itbl )
393 { return sizeofW(StgThunk)
394 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
395 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
397 INLINE_HEADER StgOffset ap_stack_sizeW( StgAP_STACK* x )
398 { return AP_STACK_sizeW(x->size); }
400 INLINE_HEADER StgOffset ap_sizeW( StgAP* x )
401 { return AP_sizeW(x->n_args); }
403 INLINE_HEADER StgOffset pap_sizeW( StgPAP* x )
404 { return PAP_sizeW(x->n_args); }
406 INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x )
407 { return sizeofW(StgArrWords) + x->words; }
409 INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
410 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
412 INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso )
413 { return TSO_STRUCT_SIZEW + tso->stack_size; }
415 INLINE_HEADER StgWord bco_sizeW ( StgBCO *bco )
416 { return bco->size; }
419 closure_sizeW_ (StgClosure *p, StgInfoTable *info)
421 switch (info->type) {
424 return sizeofW(StgThunk) + 1;
429 return sizeofW(StgHeader) + 1;
433 return sizeofW(StgThunk) + 2;
440 return sizeofW(StgHeader) + 2;
442 return thunk_sizeW_fromITBL(info);
444 return THUNK_SELECTOR_sizeW();
446 return ap_stack_sizeW((StgAP_STACK *)p);
448 return ap_sizeW((StgAP *)p);
450 return pap_sizeW((StgPAP *)p);
454 case IND_OLDGEN_PERM:
455 return sizeofW(StgInd);
457 return arr_words_sizeW((StgArrWords *)p);
458 case MUT_ARR_PTRS_CLEAN:
459 case MUT_ARR_PTRS_DIRTY:
460 case MUT_ARR_PTRS_FROZEN:
461 case MUT_ARR_PTRS_FROZEN0:
462 return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
464 return tso_sizeW((StgTSO *)p);
466 return bco_sizeW((StgBCO *)p);
467 case TVAR_WATCH_QUEUE:
468 return sizeofW(StgTVarWatchQueue);
470 return sizeofW(StgTVar);
472 return sizeofW(StgTRecChunk);
474 return sizeofW(StgTRecHeader);
475 case ATOMIC_INVARIANT:
476 return sizeofW(StgAtomicInvariant);
477 case INVARIANT_CHECK_QUEUE:
478 return sizeofW(StgInvariantCheckQueue);
480 return sizeW_fromITBL(info);
484 // The definitive way to find the size, in words, of a heap-allocated closure
486 closure_sizeW (StgClosure *p)
488 return closure_sizeW_(p, get_itbl(p));
491 /* -----------------------------------------------------------------------------
492 Sizes of stack frames
493 -------------------------------------------------------------------------- */
495 INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame )
497 StgRetInfoTable *info;
499 info = get_ret_itbl(frame);
500 switch (info->i.type) {
504 StgRetDyn *dyn = (StgRetDyn *)frame;
505 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
506 RET_DYN_NONPTR_REGS_SIZE +
507 RET_DYN_PTRS(dyn->liveness) + RET_DYN_NONPTRS(dyn->liveness);
511 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
514 return 1 + GET_LARGE_BITMAP(&info->i)->size;
517 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
520 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
524 /* -----------------------------------------------------------------------------
526 -------------------------------------------------------------------------- */
528 extern void allocNurseries ( void );
529 extern void resetNurseries ( void );
530 extern void resizeNurseries ( nat blocks );
531 extern void resizeNurseriesFixed ( nat blocks );
532 extern lnat countNurseryBlocks ( void );
534 /* -----------------------------------------------------------------------------
536 -------------------------------------------------------------------------- */
538 typedef void (*evac_fn)(StgClosure **);
540 extern void threadPaused ( Capability *cap, StgTSO * );
541 extern StgClosure * isAlive ( StgClosure *p );
542 extern void markCAFs ( evac_fn evac );
543 extern void GetRoots ( evac_fn evac );
545 /* -----------------------------------------------------------------------------
546 Stats 'n' DEBUG stuff
547 -------------------------------------------------------------------------- */
549 extern ullong RTS_VAR(total_allocated);
551 extern lnat calcAllocated ( void );
552 extern lnat calcLiveBlocks ( void );
553 extern lnat calcLiveWords ( void );
554 extern lnat countOccupied ( bdescr *bd );
555 extern lnat calcNeeded ( void );
558 extern void memInventory(rtsBool show);
559 extern void checkSanity(void);
560 extern nat countBlocks(bdescr *);
561 extern void checkNurserySanity( step *stp );
565 void printMutOnceList(generation *gen);
566 void printMutableList(generation *gen);
569 /* ----------------------------------------------------------------------------
570 Storage manager internal APIs and globals
571 ------------------------------------------------------------------------- */
573 #define END_OF_STATIC_LIST stgCast(StgClosure*,1)
575 extern void newDynCAF(StgClosure *);
577 extern void move_TSO(StgTSO *src, StgTSO *dest);
578 extern StgTSO *relocate_stack(StgTSO *dest, ptrdiff_t diff);
580 extern StgWeak * RTS_VAR(old_weak_ptr_list);
581 extern StgWeak * RTS_VAR(weak_ptr_list);
582 extern StgClosure * RTS_VAR(caf_list);
583 extern StgClosure * RTS_VAR(revertible_caf_list);
584 extern StgTSO * RTS_VAR(resurrected_threads);
586 #endif /* STORAGE_H */