1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * External Storage Manger Interface
7 * ---------------------------------------------------------------------------*/
14 /* -----------------------------------------------------------------------------
17 * We support an arbitrary number of generations, with an arbitrary number
18 * of steps per generation. Notes (in no particular order):
20 * - all generations except the oldest should have two steps. This gives
21 * objects a decent chance to age before being promoted, and in
22 * particular will ensure that we don't end up with too many
23 * thunks being updated in older generations.
25 * - the oldest generation has one step. There's no point in aging
26 * objects in the oldest generation.
28 * - generation 0, step 0 (G0S0) is the allocation area. It is given
29 * a fixed set of blocks during initialisation, and these blocks
32 * - during garbage collection, each step which is an evacuation
33 * destination (i.e. all steps except G0S0) is allocated a to-space.
34 * evacuated objects are allocated into the step's to-space until
35 * GC is finished, when the original step's contents may be freed
36 * and replaced by the to-space.
38 * - the mutable-list is per-generation (not per-step). G0 doesn't
39 * have one (since every garbage collection collects at least G0).
41 * - block descriptors contain pointers to both the step and the
42 * generation that the block belongs to, for convenience.
44 * - static objects are stored in per-generation lists. See GC.c for
45 * details of how we collect CAFs in the generational scheme.
47 * - large objects are per-step, and are promoted in the same way
48 * as small objects, except that we may allocate large objects into
49 * generation 1 initially.
51 * ------------------------------------------------------------------------- */
53 typedef struct step_ {
54 unsigned int no; /* step number */
55 bdescr * blocks; /* blocks in this step */
56 unsigned int n_blocks; /* number of blocks */
57 struct step_ * to; /* destination step for live objects */
58 struct generation_ * gen; /* generation this step belongs to */
59 unsigned int gen_no; /* generation number (cached) */
60 bdescr * large_objects; /* large objects (doubly linked) */
61 unsigned int n_large_blocks; /* no. of blocks used by large objs */
62 int is_compacted; /* compact this step? (old gen only) */
64 /* temporary use during GC: */
65 StgPtr hp; /* next free locn in to-space */
66 StgPtr hpLim; /* end of current to-space block */
67 bdescr * hp_bd; /* bdescr of current to-space block */
68 bdescr * to_blocks; /* bdescr of first to-space block */
69 unsigned int n_to_blocks; /* number of blocks in to-space */
70 bdescr * scan_bd; /* block currently being scanned */
71 StgPtr scan; /* scan pointer in current block */
72 bdescr * new_large_objects; /* large objects collected so far */
73 bdescr * scavenged_large_objects; /* live large objs after GC (d-link) */
74 unsigned int n_scavenged_large_blocks;/* size of above */
75 bdescr * bitmap; /* bitmap for compacting collection */
78 typedef struct generation_ {
79 unsigned int no; /* generation number */
80 step * steps; /* steps */
81 unsigned int n_steps; /* number of steps */
82 unsigned int max_blocks; /* max blocks in step 0 */
83 bdescr *mut_list; /* mut objects in this gen (not G0)*/
85 /* temporary use during GC: */
86 bdescr *saved_mut_list;
88 /* stats information */
89 unsigned int collections;
90 unsigned int failed_promotions;
93 extern generation * RTS_VAR(generations);
95 extern generation * RTS_VAR(g0);
96 extern step * RTS_VAR(g0s0);
97 extern generation * RTS_VAR(oldest_gen);
99 /* -----------------------------------------------------------------------------
100 Initialisation / De-initialisation
101 -------------------------------------------------------------------------- */
103 extern void initStorage(void);
104 extern void exitStorage(void);
106 /* -----------------------------------------------------------------------------
109 StgPtr allocate(nat n) Allocates a chunk of contiguous store
110 n words long, returning a pointer to
111 the first word. Always succeeds.
113 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
114 n words long, which is at a fixed
115 address (won't be moved by GC).
116 Returns a pointer to the first word.
119 NOTE: the GC can't in general handle
120 pinned objects, so allocatePinned()
121 can only be used for ByteArrays at the
124 Don't forget to TICK_ALLOC_XXX(...)
125 after calling allocate or
126 allocatePinned, for the
127 benefit of the ticky-ticky profiler.
129 rtsBool doYouWantToGC(void) Returns True if the storage manager is
130 ready to perform a GC, False otherwise.
132 lnat allocated_bytes(void) Returns the number of bytes allocated
133 via allocate() since the last GC.
134 Used in the reporting of statistics.
136 SMP: allocate and doYouWantToGC can be used from STG code, they are
137 surrounded by a mutex.
138 -------------------------------------------------------------------------- */
140 extern StgPtr allocate ( nat n );
141 extern StgPtr allocatePinned ( nat n );
142 extern lnat allocated_bytes ( void );
144 extern bdescr * RTS_VAR(small_alloc_list);
145 extern bdescr * RTS_VAR(large_alloc_list);
146 extern bdescr * RTS_VAR(pinned_object_block);
148 extern StgPtr RTS_VAR(alloc_Hp);
149 extern StgPtr RTS_VAR(alloc_HpLim);
151 extern nat RTS_VAR(alloc_blocks);
152 extern nat RTS_VAR(alloc_blocks_lim);
154 INLINE_HEADER rtsBool
155 doYouWantToGC( void )
157 return (alloc_blocks >= alloc_blocks_lim);
160 /* -----------------------------------------------------------------------------
161 Performing Garbage Collection
163 GarbageCollect(get_roots) Performs a garbage collection.
164 'get_roots' is called to find all the
165 roots that the system knows about.
167 StgClosure Called by get_roots on each root.
168 MarkRoot(StgClosure *p) Returns the new location of the root.
169 -------------------------------------------------------------------------- */
171 extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
173 /* -----------------------------------------------------------------------------
174 Generational garbage collection support
176 recordMutable(StgPtr p) Informs the garbage collector that a
177 previously immutable object has
178 become (permanently) mutable. Used
179 by thawArray and similar.
181 updateWithIndirection(p1,p2) Updates the object at p1 with an
182 indirection pointing to p2. This is
183 normally called for objects in an old
184 generation (>0) when they are updated.
186 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
188 -------------------------------------------------------------------------- */
190 /* ToDo: shouldn't recordMutable acquire some
191 * kind of lock in the SMP case? Or do we need per-processor
195 recordMutableGen(StgClosure *p, generation *gen)
200 if (bd->free >= bd->start + BLOCK_SIZE_W) {
202 new_bd = allocBlock();
207 *bd->free++ = (StgWord)p;
211 recordMutable(StgClosure *p)
214 ASSERT(closure_MUTABLE(p));
216 if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
219 /* -----------------------------------------------------------------------------
220 The CAF table - used to let us revert CAFs in GHCi
221 -------------------------------------------------------------------------- */
223 /* set to disable CAF garbage collection in GHCi. */
224 /* (needed when dynamic libraries are used). */
225 extern rtsBool keepCAFs;
227 /* -----------------------------------------------------------------------------
228 DEBUGGING predicates for pointers
230 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
231 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
233 These macros are complete but not sound. That is, they might
234 return false positives. Do not rely on them to distinguish info
235 pointers from closure pointers, for example.
237 We don't use address-space predicates these days, for portability
238 reasons, and the fact that code/data can be scattered about the
239 address space in a dynamically-linked environment. Our best option
240 is to look at the alleged info table and see whether it seems to
242 -------------------------------------------------------------------------- */
244 #define LOOKS_LIKE_INFO_PTR(p) \
245 (p && ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
246 ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
248 #define LOOKS_LIKE_CLOSURE_PTR(p) \
249 (LOOKS_LIKE_INFO_PTR(((StgClosure *)(p))->header.info))
251 /* -----------------------------------------------------------------------------
252 Macros for calculating how big a closure will be (used during allocation)
253 -------------------------------------------------------------------------- */
255 INLINE_HEADER StgOffset PAP_sizeW ( nat n_args )
256 { return sizeofW(StgPAP) + n_args; }
258 INLINE_HEADER StgOffset AP_sizeW ( nat n_args )
259 { return sizeofW(StgAP) + n_args; }
261 INLINE_HEADER StgOffset AP_STACK_sizeW ( nat size )
262 { return sizeofW(StgAP_STACK) + size; }
264 INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np )
265 { return sizeofW(StgHeader) + p + np; }
267 INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
268 { return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgSelector)); }
270 INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
271 { return sizeofW(StgHeader)+MIN_UPD_SIZE; }
273 /* --------------------------------------------------------------------------
275 ------------------------------------------------------------------------*/
277 INLINE_HEADER StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
278 { return sizeofW(StgClosure)
279 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
280 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
282 INLINE_HEADER StgOffset thunk_sizeW_fromITBL( const StgInfoTable* itbl )
283 { return sizeofW(StgThunk)
284 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
285 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
287 INLINE_HEADER StgOffset ap_stack_sizeW( StgAP_STACK* x )
288 { return AP_STACK_sizeW(x->size); }
290 INLINE_HEADER StgOffset ap_sizeW( StgAP* x )
291 { return AP_sizeW(x->n_args); }
293 INLINE_HEADER StgOffset pap_sizeW( StgPAP* x )
294 { return PAP_sizeW(x->n_args); }
296 INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x )
297 { return sizeofW(StgArrWords) + x->words; }
299 INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
300 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
302 INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso )
303 { return TSO_STRUCT_SIZEW + tso->stack_size; }
305 INLINE_HEADER StgWord bco_sizeW ( StgBCO *bco )
306 { return bco->size; }
308 /* -----------------------------------------------------------------------------
309 Sizes of stack frames
310 -------------------------------------------------------------------------- */
312 INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame )
314 StgRetInfoTable *info;
316 info = get_ret_itbl(frame);
317 switch (info->i.type) {
321 StgRetDyn *dyn = (StgRetDyn *)frame;
322 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
323 RET_DYN_NONPTR_REGS_SIZE +
324 RET_DYN_PTRS(dyn->liveness) + RET_DYN_NONPTRS(dyn->liveness);
328 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
332 return 1 + GET_LARGE_BITMAP(&info->i)->size;
335 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
338 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
342 /* -----------------------------------------------------------------------------
344 -------------------------------------------------------------------------- */
346 extern void allocNurseries ( void );
347 extern void resetNurseries ( void );
348 extern void resizeNurseries ( nat blocks );
349 extern void tidyAllocateLists ( void );
350 extern lnat countNurseryBlocks ( void );
352 /* -----------------------------------------------------------------------------
354 -------------------------------------------------------------------------- */
356 extern void threadPaused ( StgTSO * );
357 extern StgClosure * isAlive ( StgClosure *p );
358 extern void markCAFs ( evac_fn evac );
360 /* -----------------------------------------------------------------------------
361 Stats 'n' DEBUG stuff
362 -------------------------------------------------------------------------- */
364 extern ullong RTS_VAR(total_allocated);
366 extern lnat calcAllocated ( void );
367 extern lnat calcLive ( void );
368 extern lnat calcNeeded ( void );
371 extern void memInventory(void);
372 extern void checkSanity(void);
373 extern nat countBlocks(bdescr *);
377 void printMutOnceList(generation *gen);
378 void printMutableList(generation *gen);
381 /* ----------------------------------------------------------------------------
382 Storage manager internal APIs and globals
383 ------------------------------------------------------------------------- */
385 #define END_OF_STATIC_LIST stgCast(StgClosure*,1)
387 extern void newDynCAF(StgClosure *);
389 extern void move_TSO(StgTSO *src, StgTSO *dest);
390 extern StgTSO *relocate_stack(StgTSO *dest, ptrdiff_t diff);
392 extern StgClosure * RTS_VAR(static_objects);
393 extern StgClosure * RTS_VAR(scavenged_static_objects);
394 extern StgWeak * RTS_VAR(old_weak_ptr_list);
395 extern StgWeak * RTS_VAR(weak_ptr_list);
396 extern StgClosure * RTS_VAR(caf_list);
397 extern StgClosure * RTS_VAR(revertible_caf_list);
398 extern StgTSO * RTS_VAR(resurrected_threads);
400 #endif /* STORAGE_H */