1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * External Storage Manger Interface
7 * ---------------------------------------------------------------------------*/
13 #include "OSThreads.h"
16 /* -----------------------------------------------------------------------------
19 * We support an arbitrary number of generations, with an arbitrary number
20 * of steps per generation. Notes (in no particular order):
22 * - all generations except the oldest should have two steps. This gives
23 * objects a decent chance to age before being promoted, and in
24 * particular will ensure that we don't end up with too many
25 * thunks being updated in older generations.
27 * - the oldest generation has one step. There's no point in aging
28 * objects in the oldest generation.
30 * - generation 0, step 0 (G0S0) is the allocation area. It is given
31 * a fixed set of blocks during initialisation, and these blocks
34 * - during garbage collection, each step which is an evacuation
35 * destination (i.e. all steps except G0S0) is allocated a to-space.
36 * evacuated objects are allocated into the step's to-space until
37 * GC is finished, when the original step's contents may be freed
38 * and replaced by the to-space.
40 * - the mutable-list is per-generation (not per-step). G0 doesn't
41 * have one (since every garbage collection collects at least G0).
43 * - block descriptors contain pointers to both the step and the
44 * generation that the block belongs to, for convenience.
46 * - static objects are stored in per-generation lists. See GC.c for
47 * details of how we collect CAFs in the generational scheme.
49 * - large objects are per-step, and are promoted in the same way
50 * as small objects, except that we may allocate large objects into
51 * generation 1 initially.
53 * ------------------------------------------------------------------------- */
55 typedef struct step_ {
56 unsigned int no; // step number in this generation
57 unsigned int abs_no; // absolute step number
59 struct generation_ * gen; // generation this step belongs to
60 unsigned int gen_no; // generation number (cached)
62 bdescr * blocks; // blocks in this step
63 unsigned int n_blocks; // number of blocks
64 unsigned int n_words; // number of words
66 struct step_ * to; // destination step for live objects
68 bdescr * large_objects; // large objects (doubly linked)
69 unsigned int n_large_blocks; // no. of blocks used by large objs
71 StgTSO * threads; // threads in this step
72 // linked via global_link
74 // ------------------------------------
75 // Fields below are used during GC only
77 // During GC, if we are collecting this step, blocks and n_blocks
78 // are copied into the following two fields. After GC, these blocks
81 #if defined(THREADED_RTS)
82 char pad[128]; // make sure the following is
83 // on a separate cache line.
84 SpinLock sync_todo; // lock for todos
85 SpinLock sync_large_objects; // lock for large_objects
86 // and scavenged_large_objects
89 int mark; // mark (not copy)? (old gen only)
90 int compact; // compact (not sweep)? (old gen only)
92 bdescr * old_blocks; // bdescr of first from-space block
93 unsigned int n_old_blocks; // number of blocks in from-space
94 unsigned int live_estimate; // for sweeping: estimate of live data
96 bdescr * todos; // blocks waiting to be scavenged
98 unsigned int n_todos; // count of above
100 bdescr * part_blocks; // partially-full scanned blocks
101 unsigned int n_part_blocks; // count of above
103 bdescr * scavenged_large_objects; // live large objs after GC (d-link)
104 unsigned int n_scavenged_large_blocks; // size (not count) of above
106 bdescr * bitmap; // bitmap for compacting collection
108 StgTSO * old_threads;
113 typedef struct generation_ {
114 unsigned int no; // generation number
115 step * steps; // steps
116 unsigned int n_steps; // number of steps
117 unsigned int max_blocks; // max blocks in step 0
118 bdescr *mut_list; // mut objects in this gen (not G0)
121 unsigned int collections;
122 unsigned int par_collections;
123 unsigned int failed_promotions;
125 // temporary use during GC:
126 bdescr *saved_mut_list;
129 extern generation * RTS_VAR(generations);
131 extern generation * RTS_VAR(g0);
132 extern step * RTS_VAR(g0s0);
133 extern generation * RTS_VAR(oldest_gen);
134 extern step * RTS_VAR(all_steps);
135 extern nat RTS_VAR(total_steps);
137 /* -----------------------------------------------------------------------------
138 Initialisation / De-initialisation
139 -------------------------------------------------------------------------- */
141 extern void initStorage(void);
142 extern void exitStorage(void);
143 extern void freeStorage(void);
145 /* -----------------------------------------------------------------------------
148 StgPtr allocateInGen(generation *g, nat n)
149 Allocates a chunk of contiguous store
150 n words long in generation g,
151 returning a pointer to the first word.
154 StgPtr allocate(nat n) Equaivalent to allocateInGen(g0)
156 StgPtr allocateLocal(Capability *cap, nat n)
157 Allocates memory from the nursery in
158 the current Capability. This can be
159 done without taking a global lock,
162 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
163 n words long, which is at a fixed
164 address (won't be moved by GC).
165 Returns a pointer to the first word.
168 NOTE: the GC can't in general handle
169 pinned objects, so allocatePinned()
170 can only be used for ByteArrays at the
173 Don't forget to TICK_ALLOC_XXX(...)
174 after calling allocate or
175 allocatePinned, for the
176 benefit of the ticky-ticky profiler.
178 rtsBool doYouWantToGC(void) Returns True if the storage manager is
179 ready to perform a GC, False otherwise.
181 lnat allocatedBytes(void) Returns the number of bytes allocated
182 via allocate() since the last GC.
183 Used in the reporting of statistics.
185 -------------------------------------------------------------------------- */
187 extern StgPtr allocate ( lnat n );
188 extern StgPtr allocateInGen ( generation *g, lnat n );
189 extern StgPtr allocateLocal ( Capability *cap, lnat n );
190 extern StgPtr allocatePinned ( lnat n );
191 extern lnat allocatedBytes ( void );
193 extern bdescr * RTS_VAR(small_alloc_list);
194 extern bdescr * RTS_VAR(large_alloc_list);
195 extern bdescr * RTS_VAR(pinned_object_block);
197 extern nat RTS_VAR(alloc_blocks);
198 extern nat RTS_VAR(alloc_blocks_lim);
200 INLINE_HEADER rtsBool
201 doYouWantToGC( void )
203 return (alloc_blocks >= alloc_blocks_lim);
206 /* memory allocator for executable memory */
207 extern void* allocateExec(unsigned int len, void **exec_addr);
208 extern void freeExec (void *p);
210 /* for splitting blocks groups in two */
211 extern bdescr * splitLargeBlock (bdescr *bd, nat blocks);
213 /* -----------------------------------------------------------------------------
214 Performing Garbage Collection
216 GarbageCollect(get_roots) Performs a garbage collection.
217 'get_roots' is called to find all the
218 roots that the system knows about.
221 -------------------------------------------------------------------------- */
223 extern void GarbageCollect(rtsBool force_major_gc, nat gc_type, Capability *cap);
225 /* -----------------------------------------------------------------------------
226 Generational garbage collection support
228 recordMutable(StgPtr p) Informs the garbage collector that a
229 previously immutable object has
230 become (permanently) mutable. Used
231 by thawArray and similar.
233 updateWithIndirection(p1,p2) Updates the object at p1 with an
234 indirection pointing to p2. This is
235 normally called for objects in an old
236 generation (>0) when they are updated.
238 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
240 -------------------------------------------------------------------------- */
243 * Storage manager mutex
245 #if defined(THREADED_RTS)
246 extern Mutex sm_mutex;
247 extern Mutex atomic_modify_mutvar_mutex;
250 #if defined(THREADED_RTS)
251 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex);
252 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex);
253 #define ASSERT_SM_LOCK() ASSERT_LOCK_HELD(&sm_mutex);
255 #define ACQUIRE_SM_LOCK
256 #define RELEASE_SM_LOCK
257 #define ASSERT_SM_LOCK()
263 recordMutableGen(StgClosure *p, nat gen_no)
267 bd = generations[gen_no].mut_list;
268 if (bd->free >= bd->start + BLOCK_SIZE_W) {
270 new_bd = allocBlock();
273 generations[gen_no].mut_list = bd;
275 *bd->free++ = (StgWord)p;
280 recordMutableGenLock(StgClosure *p, nat gen_no)
283 recordMutableGen(p,gen_no);
288 recordMutable(StgClosure *p)
291 ASSERT(closure_MUTABLE(p));
293 if (bd->gen_no > 0) recordMutableGen(p, bd->gen_no);
297 recordMutableLock(StgClosure *p)
304 #endif // !IN_STG_CODE
306 /* -----------------------------------------------------------------------------
307 The CAF table - used to let us revert CAFs in GHCi
308 -------------------------------------------------------------------------- */
310 /* set to disable CAF garbage collection in GHCi. */
311 /* (needed when dynamic libraries are used). */
312 extern rtsBool keepCAFs;
314 /* -----------------------------------------------------------------------------
315 This is the write barrier for MUT_VARs, a.k.a. IORefs. A
316 MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
317 is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
318 and is put on the mutable list.
319 -------------------------------------------------------------------------- */
321 void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
323 /* -----------------------------------------------------------------------------
324 DEBUGGING predicates for pointers
326 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
327 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
329 These macros are complete but not sound. That is, they might
330 return false positives. Do not rely on them to distinguish info
331 pointers from closure pointers, for example.
333 We don't use address-space predicates these days, for portability
334 reasons, and the fact that code/data can be scattered about the
335 address space in a dynamically-linked environment. Our best option
336 is to look at the alleged info table and see whether it seems to
338 -------------------------------------------------------------------------- */
340 INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR (StgWord p);
341 INLINE_HEADER rtsBool LOOKS_LIKE_CLOSURE_PTR (void *p); // XXX StgClosure*
343 /* -----------------------------------------------------------------------------
344 Macros for calculating how big a closure will be (used during allocation)
345 -------------------------------------------------------------------------- */
347 INLINE_HEADER StgOffset PAP_sizeW ( nat n_args )
348 { return sizeofW(StgPAP) + n_args; }
350 INLINE_HEADER StgOffset AP_sizeW ( nat n_args )
351 { return sizeofW(StgAP) + n_args; }
353 INLINE_HEADER StgOffset AP_STACK_sizeW ( nat size )
354 { return sizeofW(StgAP_STACK) + size; }
356 INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np )
357 { return sizeofW(StgHeader) + p + np; }
359 INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
360 { return sizeofW(StgSelector); }
362 INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
363 { return sizeofW(StgHeader)+MIN_PAYLOAD_SIZE; }
365 /* --------------------------------------------------------------------------
367 ------------------------------------------------------------------------*/
369 INLINE_HEADER StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
370 { return sizeofW(StgClosure)
371 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
372 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
374 INLINE_HEADER StgOffset thunk_sizeW_fromITBL( const StgInfoTable* itbl )
375 { return sizeofW(StgThunk)
376 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
377 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
379 INLINE_HEADER StgOffset ap_stack_sizeW( StgAP_STACK* x )
380 { return AP_STACK_sizeW(x->size); }
382 INLINE_HEADER StgOffset ap_sizeW( StgAP* x )
383 { return AP_sizeW(x->n_args); }
385 INLINE_HEADER StgOffset pap_sizeW( StgPAP* x )
386 { return PAP_sizeW(x->n_args); }
388 INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x )
389 { return sizeofW(StgArrWords) + x->words; }
391 INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
392 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
394 INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso )
395 { return TSO_STRUCT_SIZEW + tso->stack_size; }
397 INLINE_HEADER StgWord bco_sizeW ( StgBCO *bco )
398 { return bco->size; }
401 closure_sizeW_ (StgClosure *p, StgInfoTable *info)
403 switch (info->type) {
406 return sizeofW(StgThunk) + 1;
411 return sizeofW(StgHeader) + 1;
415 return sizeofW(StgThunk) + 2;
422 return sizeofW(StgHeader) + 2;
424 return thunk_sizeW_fromITBL(info);
426 return THUNK_SELECTOR_sizeW();
428 return ap_stack_sizeW((StgAP_STACK *)p);
430 return ap_sizeW((StgAP *)p);
432 return pap_sizeW((StgPAP *)p);
436 case IND_OLDGEN_PERM:
437 return sizeofW(StgInd);
439 return arr_words_sizeW((StgArrWords *)p);
440 case MUT_ARR_PTRS_CLEAN:
441 case MUT_ARR_PTRS_DIRTY:
442 case MUT_ARR_PTRS_FROZEN:
443 case MUT_ARR_PTRS_FROZEN0:
444 return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
446 return tso_sizeW((StgTSO *)p);
448 return bco_sizeW((StgBCO *)p);
449 case TVAR_WATCH_QUEUE:
450 return sizeofW(StgTVarWatchQueue);
452 return sizeofW(StgTVar);
454 return sizeofW(StgTRecChunk);
456 return sizeofW(StgTRecHeader);
457 case ATOMIC_INVARIANT:
458 return sizeofW(StgAtomicInvariant);
459 case INVARIANT_CHECK_QUEUE:
460 return sizeofW(StgInvariantCheckQueue);
462 return sizeW_fromITBL(info);
466 // The definitive way to find the size, in words, of a heap-allocated closure
468 closure_sizeW (StgClosure *p)
470 return closure_sizeW_(p, get_itbl(p));
473 /* -----------------------------------------------------------------------------
474 Sizes of stack frames
475 -------------------------------------------------------------------------- */
477 INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame )
479 StgRetInfoTable *info;
481 info = get_ret_itbl(frame);
482 switch (info->i.type) {
486 StgRetDyn *dyn = (StgRetDyn *)frame;
487 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
488 RET_DYN_NONPTR_REGS_SIZE +
489 RET_DYN_PTRS(dyn->liveness) + RET_DYN_NONPTRS(dyn->liveness);
493 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
496 return 1 + GET_LARGE_BITMAP(&info->i)->size;
499 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
502 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
506 /* -----------------------------------------------------------------------------
508 -------------------------------------------------------------------------- */
510 extern void allocNurseries ( void );
511 extern void resetNurseries ( void );
512 extern void resizeNurseries ( nat blocks );
513 extern void resizeNurseriesFixed ( nat blocks );
514 extern lnat countNurseryBlocks ( void );
517 /* -----------------------------------------------------------------------------
519 -------------------------------------------------------------------------- */
521 typedef void (*evac_fn)(void *user, StgClosure **root);
523 extern void threadPaused ( Capability *cap, StgTSO * );
524 extern StgClosure * isAlive ( StgClosure *p );
525 extern void markCAFs ( evac_fn evac, void *user );
526 extern void GetRoots ( evac_fn evac, void *user );
528 /* -----------------------------------------------------------------------------
529 Stats 'n' DEBUG stuff
530 -------------------------------------------------------------------------- */
532 extern ullong RTS_VAR(total_allocated);
534 extern lnat calcAllocated ( void );
535 extern lnat calcLiveBlocks ( void );
536 extern lnat calcLiveWords ( void );
537 extern lnat countOccupied ( bdescr *bd );
538 extern lnat calcNeeded ( void );
541 extern void memInventory(rtsBool show);
542 extern void checkSanity(void);
543 extern nat countBlocks(bdescr *);
544 extern void checkNurserySanity( step *stp );
548 void printMutOnceList(generation *gen);
549 void printMutableList(generation *gen);
552 /* ----------------------------------------------------------------------------
553 Storage manager internal APIs and globals
554 ------------------------------------------------------------------------- */
556 #define END_OF_STATIC_LIST stgCast(StgClosure*,1)
558 extern void newDynCAF(StgClosure *);
560 extern void move_TSO(StgTSO *src, StgTSO *dest);
561 extern StgTSO *relocate_stack(StgTSO *dest, ptrdiff_t diff);
563 extern StgWeak * RTS_VAR(old_weak_ptr_list);
564 extern StgWeak * RTS_VAR(weak_ptr_list);
565 extern StgClosure * RTS_VAR(caf_list);
566 extern StgClosure * RTS_VAR(revertible_caf_list);
567 extern StgTSO * RTS_VAR(resurrected_threads);
569 #define IS_FORWARDING_PTR(p) ((((StgWord)p) & 1) != 0)
570 #define MK_FORWARDING_PTR(p) (((StgWord)p) | 1)
571 #define UN_FORWARDING_PTR(p) (((StgWord)p) - 1)
573 INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR_NOT_NULL (StgWord p)
575 StgInfoTable *info = INFO_PTR_TO_STRUCT(p);
576 return info->type != INVALID_OBJECT && info->type < N_CLOSURE_TYPES;
579 INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR (StgWord p)
581 return p && (IS_FORWARDING_PTR(p) || LOOKS_LIKE_INFO_PTR_NOT_NULL(p));
584 INLINE_HEADER rtsBool LOOKS_LIKE_CLOSURE_PTR (void *p)
586 return LOOKS_LIKE_INFO_PTR((StgWord)(UNTAG_CLOSURE((StgClosure *)(p)))->header.info);
589 #endif /* STORAGE_H */