1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * External Storage Manger Interface
7 * ---------------------------------------------------------------------------*/
13 #include "OSThreads.h"
16 /* -----------------------------------------------------------------------------
19 * We support an arbitrary number of generations, with an arbitrary number
20 * of steps per generation. Notes (in no particular order):
22 * - all generations except the oldest should have two steps. This gives
23 * objects a decent chance to age before being promoted, and in
24 * particular will ensure that we don't end up with too many
25 * thunks being updated in older generations.
27 * - the oldest generation has one step. There's no point in aging
28 * objects in the oldest generation.
30 * - generation 0, step 0 (G0S0) is the allocation area. It is given
31 * a fixed set of blocks during initialisation, and these blocks
34 * - during garbage collection, each step which is an evacuation
35 * destination (i.e. all steps except G0S0) is allocated a to-space.
36 * evacuated objects are allocated into the step's to-space until
37 * GC is finished, when the original step's contents may be freed
38 * and replaced by the to-space.
40 * - the mutable-list is per-generation (not per-step). G0 doesn't
41 * have one (since every garbage collection collects at least G0).
43 * - block descriptors contain pointers to both the step and the
44 * generation that the block belongs to, for convenience.
46 * - static objects are stored in per-generation lists. See GC.c for
47 * details of how we collect CAFs in the generational scheme.
49 * - large objects are per-step, and are promoted in the same way
50 * as small objects, except that we may allocate large objects into
51 * generation 1 initially.
53 * ------------------------------------------------------------------------- */
55 typedef struct step_ {
56 unsigned int no; // step number in this generation
57 unsigned int abs_no; // absolute step number
58 int is_compacted; // compact this step? (old gen only)
60 struct generation_ * gen; // generation this step belongs to
61 unsigned int gen_no; // generation number (cached)
63 bdescr * blocks; // blocks in this step
64 unsigned int n_blocks; // number of blocks
66 struct step_ * to; // destination step for live objects
68 bdescr * large_objects; // large objects (doubly linked)
69 unsigned int n_large_blocks; // no. of blocks used by large objs
72 // ------------------------------------
73 // Fields below are used during GC only
75 // During GC, if we are collecting this step, blocks and n_blocks
76 // are copied into the following two fields. After GC, these blocks
79 #if defined(THREADED_RTS)
80 char pad[128]; // make sure the following is
81 // on a separate cache line.
82 SpinLock sync_todo; // lock for todos
83 SpinLock sync_large_objects; // lock for large_objects
84 // and scavenged_large_objects
87 bdescr * old_blocks; // bdescr of first from-space block
88 unsigned int n_old_blocks; // number of blocks in from-space
90 bdescr * todos; // blocks waiting to be scavenged
92 unsigned int n_todos; // count of above
94 bdescr * part_blocks; // partially-full scanned blocks
95 unsigned int n_part_blocks; // count of above
97 bdescr * scavenged_large_objects; // live large objs after GC (d-link)
98 unsigned int n_scavenged_large_blocks; // size (not count) of above
100 bdescr * bitmap; // bitmap for compacting collection
106 typedef struct generation_ {
107 unsigned int no; // generation number
108 step * steps; // steps
109 unsigned int n_steps; // number of steps
110 unsigned int max_blocks; // max blocks in step 0
111 bdescr *mut_list; // mut objects in this gen (not G0)
114 unsigned int collections;
115 unsigned int failed_promotions;
117 // temporary use during GC:
118 bdescr *saved_mut_list;
121 extern generation * RTS_VAR(generations);
123 extern generation * RTS_VAR(g0);
124 extern step * RTS_VAR(g0s0);
125 extern generation * RTS_VAR(oldest_gen);
126 extern step * RTS_VAR(all_steps);
127 extern nat RTS_VAR(total_steps);
129 /* -----------------------------------------------------------------------------
130 Initialisation / De-initialisation
131 -------------------------------------------------------------------------- */
133 extern void initStorage(void);
134 extern void exitStorage(void);
135 extern void freeStorage(void);
137 /* -----------------------------------------------------------------------------
140 StgPtr allocateInGen(generation *g, nat n)
141 Allocates a chunk of contiguous store
142 n words long in generation g,
143 returning a pointer to the first word.
146 StgPtr allocate(nat n) Equaivalent to allocateInGen(g0)
148 StgPtr allocateLocal(Capability *cap, nat n)
149 Allocates memory from the nursery in
150 the current Capability. This can be
151 done without taking a global lock,
154 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
155 n words long, which is at a fixed
156 address (won't be moved by GC).
157 Returns a pointer to the first word.
160 NOTE: the GC can't in general handle
161 pinned objects, so allocatePinned()
162 can only be used for ByteArrays at the
165 Don't forget to TICK_ALLOC_XXX(...)
166 after calling allocate or
167 allocatePinned, for the
168 benefit of the ticky-ticky profiler.
170 rtsBool doYouWantToGC(void) Returns True if the storage manager is
171 ready to perform a GC, False otherwise.
173 lnat allocatedBytes(void) Returns the number of bytes allocated
174 via allocate() since the last GC.
175 Used in the reporting of statistics.
177 -------------------------------------------------------------------------- */
179 extern StgPtr allocate ( nat n );
180 extern StgPtr allocateInGen ( generation *g, nat n );
181 extern StgPtr allocateLocal ( Capability *cap, nat n );
182 extern StgPtr allocatePinned ( nat n );
183 extern lnat allocatedBytes ( void );
185 extern bdescr * RTS_VAR(small_alloc_list);
186 extern bdescr * RTS_VAR(large_alloc_list);
187 extern bdescr * RTS_VAR(pinned_object_block);
189 extern nat RTS_VAR(alloc_blocks);
190 extern nat RTS_VAR(alloc_blocks_lim);
192 INLINE_HEADER rtsBool
193 doYouWantToGC( void )
195 return (alloc_blocks >= alloc_blocks_lim);
198 /* memory allocator for executable memory */
199 extern void *allocateExec (nat bytes);
200 extern void freeExec (void *p);
202 /* for splitting blocks groups in two */
203 extern bdescr * splitLargeBlock (bdescr *bd, nat blocks);
205 /* -----------------------------------------------------------------------------
206 Performing Garbage Collection
208 GarbageCollect(get_roots) Performs a garbage collection.
209 'get_roots' is called to find all the
210 roots that the system knows about.
213 -------------------------------------------------------------------------- */
215 extern void GarbageCollect(rtsBool force_major_gc);
217 /* -----------------------------------------------------------------------------
218 Generational garbage collection support
220 recordMutable(StgPtr p) Informs the garbage collector that a
221 previously immutable object has
222 become (permanently) mutable. Used
223 by thawArray and similar.
225 updateWithIndirection(p1,p2) Updates the object at p1 with an
226 indirection pointing to p2. This is
227 normally called for objects in an old
228 generation (>0) when they are updated.
230 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
232 -------------------------------------------------------------------------- */
235 * Storage manager mutex
237 #if defined(THREADED_RTS)
238 extern Mutex sm_mutex;
239 extern Mutex atomic_modify_mutvar_mutex;
240 extern SpinLock recordMutableGen_sync;
243 #if defined(THREADED_RTS)
244 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex);
245 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex);
246 #define ASSERT_SM_LOCK() ASSERT_LOCK_HELD(&sm_mutex);
248 #define ACQUIRE_SM_LOCK
249 #define RELEASE_SM_LOCK
250 #define ASSERT_SM_LOCK()
254 recordMutableGen(StgClosure *p, generation *gen)
259 if (bd->free >= bd->start + BLOCK_SIZE_W) {
261 new_bd = allocBlock();
266 *bd->free++ = (StgWord)p;
271 recordMutableGenLock(StgClosure *p, generation *gen)
274 recordMutableGen(p,gen);
278 extern bdescr *allocBlock_sync(void);
280 // Version of recordMutableGen() for use in parallel GC. The same as
281 // recordMutableGen(), except that we surround it with a spinlock and
282 // call the spinlock version of allocBlock().
284 recordMutableGen_GC(StgClosure *p, generation *gen)
288 ACQUIRE_SPIN_LOCK(&recordMutableGen_sync);
291 if (bd->free >= bd->start + BLOCK_SIZE_W) {
293 new_bd = allocBlock_sync();
298 *bd->free++ = (StgWord)p;
300 RELEASE_SPIN_LOCK(&recordMutableGen_sync);
304 recordMutable(StgClosure *p)
307 ASSERT(closure_MUTABLE(p));
309 if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
313 recordMutableLock(StgClosure *p)
320 /* -----------------------------------------------------------------------------
321 The CAF table - used to let us revert CAFs in GHCi
322 -------------------------------------------------------------------------- */
324 /* set to disable CAF garbage collection in GHCi. */
325 /* (needed when dynamic libraries are used). */
326 extern rtsBool keepCAFs;
328 /* -----------------------------------------------------------------------------
329 This is the write barrier for MUT_VARs, a.k.a. IORefs. A
330 MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
331 is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
332 and is put on the mutable list.
333 -------------------------------------------------------------------------- */
335 void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
337 /* -----------------------------------------------------------------------------
338 DEBUGGING predicates for pointers
340 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
341 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
343 These macros are complete but not sound. That is, they might
344 return false positives. Do not rely on them to distinguish info
345 pointers from closure pointers, for example.
347 We don't use address-space predicates these days, for portability
348 reasons, and the fact that code/data can be scattered about the
349 address space in a dynamically-linked environment. Our best option
350 is to look at the alleged info table and see whether it seems to
352 -------------------------------------------------------------------------- */
354 #define LOOKS_LIKE_INFO_PTR(p) \
355 (p && LOOKS_LIKE_INFO_PTR_NOT_NULL(p))
357 #define LOOKS_LIKE_INFO_PTR_NOT_NULL(p) \
358 (((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
359 ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
361 #define LOOKS_LIKE_CLOSURE_PTR(p) \
362 (LOOKS_LIKE_INFO_PTR((UNTAG_CLOSURE((StgClosure *)(p)))->header.info))
364 /* -----------------------------------------------------------------------------
365 Macros for calculating how big a closure will be (used during allocation)
366 -------------------------------------------------------------------------- */
368 INLINE_HEADER StgOffset PAP_sizeW ( nat n_args )
369 { return sizeofW(StgPAP) + n_args; }
371 INLINE_HEADER StgOffset AP_sizeW ( nat n_args )
372 { return sizeofW(StgAP) + n_args; }
374 INLINE_HEADER StgOffset AP_STACK_sizeW ( nat size )
375 { return sizeofW(StgAP_STACK) + size; }
377 INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np )
378 { return sizeofW(StgHeader) + p + np; }
380 INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
381 { return sizeofW(StgSelector); }
383 INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
384 { return sizeofW(StgHeader)+MIN_PAYLOAD_SIZE; }
386 /* --------------------------------------------------------------------------
388 ------------------------------------------------------------------------*/
390 INLINE_HEADER StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
391 { return sizeofW(StgClosure)
392 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
393 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
395 INLINE_HEADER StgOffset thunk_sizeW_fromITBL( const StgInfoTable* itbl )
396 { return sizeofW(StgThunk)
397 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
398 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
400 INLINE_HEADER StgOffset ap_stack_sizeW( StgAP_STACK* x )
401 { return AP_STACK_sizeW(x->size); }
403 INLINE_HEADER StgOffset ap_sizeW( StgAP* x )
404 { return AP_sizeW(x->n_args); }
406 INLINE_HEADER StgOffset pap_sizeW( StgPAP* x )
407 { return PAP_sizeW(x->n_args); }
409 INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x )
410 { return sizeofW(StgArrWords) + x->words; }
412 INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
413 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
415 INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso )
416 { return TSO_STRUCT_SIZEW + tso->stack_size; }
418 INLINE_HEADER StgWord bco_sizeW ( StgBCO *bco )
419 { return bco->size; }
422 closure_sizeW_ (StgClosure *p, StgInfoTable *info)
424 switch (info->type) {
427 return sizeofW(StgThunk) + 1;
432 return sizeofW(StgHeader) + 1;
436 return sizeofW(StgThunk) + 2;
443 return sizeofW(StgHeader) + 2;
445 return thunk_sizeW_fromITBL(info);
447 return THUNK_SELECTOR_sizeW();
449 return ap_stack_sizeW((StgAP_STACK *)p);
451 return ap_sizeW((StgAP *)p);
453 return pap_sizeW((StgPAP *)p);
457 case IND_OLDGEN_PERM:
458 return sizeofW(StgInd);
460 return arr_words_sizeW((StgArrWords *)p);
461 case MUT_ARR_PTRS_CLEAN:
462 case MUT_ARR_PTRS_DIRTY:
463 case MUT_ARR_PTRS_FROZEN:
464 case MUT_ARR_PTRS_FROZEN0:
465 return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
467 return tso_sizeW((StgTSO *)p);
469 return bco_sizeW((StgBCO *)p);
470 case TVAR_WATCH_QUEUE:
471 return sizeofW(StgTVarWatchQueue);
473 return sizeofW(StgTVar);
475 return sizeofW(StgTRecChunk);
477 return sizeofW(StgTRecHeader);
478 case ATOMIC_INVARIANT:
479 return sizeofW(StgAtomicInvariant);
480 case INVARIANT_CHECK_QUEUE:
481 return sizeofW(StgInvariantCheckQueue);
483 return sizeW_fromITBL(info);
487 // The definitive way to find the size, in words, of a heap-allocated closure
489 closure_sizeW (StgClosure *p)
491 return closure_sizeW_(p, get_itbl(p));
494 /* -----------------------------------------------------------------------------
495 Sizes of stack frames
496 -------------------------------------------------------------------------- */
498 INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame )
500 StgRetInfoTable *info;
502 info = get_ret_itbl(frame);
503 switch (info->i.type) {
507 StgRetDyn *dyn = (StgRetDyn *)frame;
508 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
509 RET_DYN_NONPTR_REGS_SIZE +
510 RET_DYN_PTRS(dyn->liveness) + RET_DYN_NONPTRS(dyn->liveness);
514 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
517 return 1 + GET_LARGE_BITMAP(&info->i)->size;
520 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
523 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
527 /* -----------------------------------------------------------------------------
529 -------------------------------------------------------------------------- */
531 extern void allocNurseries ( void );
532 extern void resetNurseries ( void );
533 extern void resizeNurseries ( nat blocks );
534 extern void resizeNurseriesFixed ( nat blocks );
535 extern lnat countNurseryBlocks ( void );
537 /* -----------------------------------------------------------------------------
539 -------------------------------------------------------------------------- */
541 typedef void (*evac_fn)(StgClosure **);
543 extern void threadPaused ( Capability *cap, StgTSO * );
544 extern StgClosure * isAlive ( StgClosure *p );
545 extern void markCAFs ( evac_fn evac );
546 extern void GetRoots ( evac_fn evac );
548 /* -----------------------------------------------------------------------------
549 Stats 'n' DEBUG stuff
550 -------------------------------------------------------------------------- */
552 extern ullong RTS_VAR(total_allocated);
554 extern lnat calcAllocated ( void );
555 extern lnat calcLiveBlocks ( void );
556 extern lnat calcLiveWords ( void );
557 extern lnat countOccupied ( bdescr *bd );
558 extern lnat calcNeeded ( void );
561 extern void memInventory(rtsBool show);
562 extern void checkSanity(void);
563 extern nat countBlocks(bdescr *);
564 extern void checkNurserySanity( step *stp );
568 void printMutOnceList(generation *gen);
569 void printMutableList(generation *gen);
572 /* ----------------------------------------------------------------------------
573 Storage manager internal APIs and globals
574 ------------------------------------------------------------------------- */
576 #define END_OF_STATIC_LIST stgCast(StgClosure*,1)
578 extern void newDynCAF(StgClosure *);
580 extern void move_TSO(StgTSO *src, StgTSO *dest);
581 extern StgTSO *relocate_stack(StgTSO *dest, ptrdiff_t diff);
583 extern StgWeak * RTS_VAR(old_weak_ptr_list);
584 extern StgWeak * RTS_VAR(weak_ptr_list);
585 extern StgClosure * RTS_VAR(caf_list);
586 extern StgClosure * RTS_VAR(revertible_caf_list);
587 extern StgTSO * RTS_VAR(resurrected_threads);
589 #endif /* STORAGE_H */