1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.h,v 1.49 2003/03/24 14:46:57 simonmar Exp $
4 * (c) The GHC Team, 1998-2002
6 * External Storage Manger Interface
8 * ---------------------------------------------------------------------------*/
15 #include "BlockAlloc.h"
16 #include "StoragePriv.h"
18 #include "LdvProfile.h"
21 /* -----------------------------------------------------------------------------
22 Initialisation / De-initialisation
23 -------------------------------------------------------------------------- */
25 extern void initStorage(void);
26 extern void exitStorage(void);
28 /* -----------------------------------------------------------------------------
31 StgPtr allocate(nat n) Allocates a chunk of contiguous store
32 n words long, returning a pointer to
33 the first word. Always succeeds.
35 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
36 n words long, which is at a fixed
37 address (won't be moved by GC).
38 Returns a pointer to the first word.
41 NOTE: the GC can't in general handle
42 pinned objects, so allocatePinned()
43 can only be used for ByteArrays at the
46 Don't forget to TICK_ALLOC_XXX(...)
47 after calling allocate or
48 allocatePinned, for the
49 benefit of the ticky-ticky profiler.
51 rtsBool doYouWantToGC(void) Returns True if the storage manager is
52 ready to perform a GC, False otherwise.
54 lnat allocated_bytes(void) Returns the number of bytes allocated
55 via allocate() since the last GC.
56 Used in the reporting of statistics.
58 SMP: allocate and doYouWantToGC can be used from STG code, they are
59 surrounded by a mutex.
60 -------------------------------------------------------------------------- */
62 extern StgPtr allocate ( nat n );
63 extern StgPtr allocatePinned ( nat n );
64 extern lnat allocated_bytes ( void );
69 return (alloc_blocks >= alloc_blocks_lim);
72 /* -----------------------------------------------------------------------------
73 ExtendNursery(hp,hplim) When hplim is reached, try to grab
74 some more allocation space. Returns
75 False if the allocation space is
76 exhausted, and the application should
77 call GarbageCollect().
78 -------------------------------------------------------------------------- */
80 #define ExtendNursery(hp,hplim) \
81 (CurrentNursery->free = (P_)(hp)+1, \
82 CurrentNursery->link == NULL ? rtsFalse : \
83 (CurrentNursery = CurrentNursery->link, \
84 OpenNursery(hp,hplim), \
87 extern void PleaseStopAllocating(void);
89 /* -----------------------------------------------------------------------------
90 Performing Garbage Collection
92 GarbageCollect(get_roots) Performs a garbage collection.
93 'get_roots' is called to find all the
94 roots that the system knows about.
96 StgClosure Called by get_roots on each root.
97 MarkRoot(StgClosure *p) Returns the new location of the root.
98 -------------------------------------------------------------------------- */
100 extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
102 /* -----------------------------------------------------------------------------
103 Generational garbage collection support
105 recordMutable(StgPtr p) Informs the garbage collector that a
106 previously immutable object has
107 become (permanently) mutable. Used
108 by thawArray and similar.
110 updateWithIndirection(p1,p2) Updates the object at p1 with an
111 indirection pointing to p2. This is
112 normally called for objects in an old
113 generation (>0) when they are updated.
115 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
117 -------------------------------------------------------------------------- */
120 * Storage manager mutex
123 extern Mutex sm_mutex;
124 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
125 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
127 #define ACQUIRE_SM_LOCK
128 #define RELEASE_SM_LOCK
131 /* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
132 * kind of lock in the SMP case?
135 recordMutable(StgMutClosure *p)
140 ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
142 ASSERT(closure_MUTABLE(p));
146 if (bd->gen_no > 0) {
147 p->mut_link = generations[bd->gen_no].mut_list;
148 generations[bd->gen_no].mut_list = p;
153 recordOldToNewPtrs(StgMutClosure *p)
158 if (bd->gen_no > 0) {
159 p->mut_link = generations[bd->gen_no].mut_once_list;
160 generations[bd->gen_no].mut_once_list = p;
165 // We zero out the slop when PROFILING is on.
167 #if !defined(DEBUG) && !defined(PROFILING)
168 #define updateWithIndirection(info, p1, p2) \
172 bd = Bdescr((P_)p1); \
173 if (bd->gen_no == 0) { \
174 ((StgInd *)p1)->indirectee = p2; \
175 SET_INFO(p1,&stg_IND_info); \
176 TICK_UPD_NEW_IND(); \
178 ((StgIndOldGen *)p1)->indirectee = p2; \
179 if (info != &stg_BLACKHOLE_BQ_info) { \
181 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
182 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
185 SET_INFO(p1,&stg_IND_OLDGEN_info); \
186 TICK_UPD_OLD_IND(); \
189 #elif defined(PROFILING)
191 // We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
195 // After all, we do *NOT* need to call LDV_recordCreate() for both IND and
196 // IND_OLDGEN closures because they are inherently used. But, it corrupts
197 // the invariants that every closure keeps its creation time in the profiling
198 // field. So, we call LDV_recordCreate().
200 #define updateWithIndirection(info, p1, p2) \
204 LDV_recordDead_FILL_SLOP_DYNAMIC((p1)); \
205 bd = Bdescr((P_)p1); \
206 if (bd->gen_no == 0) { \
207 ((StgInd *)p1)->indirectee = p2; \
208 SET_INFO(p1,&stg_IND_info); \
209 LDV_recordCreate((p1)); \
210 TICK_UPD_NEW_IND(); \
212 ((StgIndOldGen *)p1)->indirectee = p2; \
213 if (info != &stg_BLACKHOLE_BQ_info) { \
215 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
216 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
219 SET_INFO(p1,&stg_IND_OLDGEN_info); \
220 LDV_recordCreate((p1)); \
226 /* In the DEBUG case, we also zero out the slop of the old closure,
227 * so that the sanity checker can tell where the next closure is.
229 * Two important invariants: we should never try to update a closure
230 * to point to itself, and the closure being updated should not
231 * already have been updated (the mutable list will get messed up
234 #define updateWithIndirection(info, p1, p2) \
238 ASSERT( p1 != p2 && !closure_IND(p1) ); \
239 bd = Bdescr((P_)p1); \
240 if (bd->gen_no == 0) { \
241 ((StgInd *)p1)->indirectee = p2; \
242 SET_INFO(p1,&stg_IND_info); \
243 TICK_UPD_NEW_IND(); \
245 if (info != &stg_BLACKHOLE_BQ_info) { \
247 StgInfoTable *inf = get_itbl(p1); \
248 nat np = inf->layout.payload.ptrs, \
249 nw = inf->layout.payload.nptrs, i; \
250 if (inf->type != THUNK_SELECTOR) { \
251 for (i = 0; i < np + nw; i++) { \
252 ((StgClosure *)p1)->payload[i] = 0; \
257 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
258 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
261 ((StgIndOldGen *)p1)->indirectee = p2; \
262 SET_INFO(p1,&stg_IND_OLDGEN_info); \
263 TICK_UPD_OLD_IND(); \
268 /* Static objects all live in the oldest generation
270 #define updateWithStaticIndirection(info, p1, p2) \
272 ASSERT( p1 != p2 && !closure_IND(p1) ); \
273 ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \
276 ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list; \
277 oldest_gen->mut_once_list = (StgMutClosure *)p1; \
280 ((StgInd *)p1)->indirectee = p2; \
281 SET_INFO((StgInd *)p1, &stg_IND_STATIC_info); \
282 TICK_UPD_STATIC_IND(); \
285 #if defined(TICKY_TICKY) || defined(PROFILING)
287 updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2)
291 ASSERT( p1 != p2 && !closure_IND(p1) );
295 // Destroy the old closure.
296 // Nb: LDV_* stuff cannot mix with ticky-ticky
297 LDV_recordDead_FILL_SLOP_DYNAMIC(p1);
300 if (bd->gen_no == 0) {
301 ((StgInd *)p1)->indirectee = p2;
302 SET_INFO(p1,&stg_IND_PERM_info);
305 // We have just created a new closure.
306 LDV_recordCreate(p1);
308 TICK_UPD_NEW_PERM_IND(p1);
310 ((StgIndOldGen *)p1)->indirectee = p2;
311 if (info != &stg_BLACKHOLE_BQ_info) {
313 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
314 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
317 SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
320 // We have just created a new closure.
321 LDV_recordCreate(p1);
323 TICK_UPD_OLD_PERM_IND();
328 /* -----------------------------------------------------------------------------
329 The CAF table - used to let us revert CAFs in GHCi
330 -------------------------------------------------------------------------- */
332 void revertCAFs( void );
334 /* -----------------------------------------------------------------------------
335 DEBUGGING predicates for pointers
337 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
338 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
340 These macros are complete but not sound. That is, they might
341 return false positives. Do not rely on them to distinguish info
342 pointers from closure pointers, for example.
344 We don't use address-space predicates these days, for portability
345 reasons, and the fact that code/data can be scattered about the
346 address space in a dynamically-linked environment. Our best option
347 is to look at the alleged info table and see whether it seems to
349 -------------------------------------------------------------------------- */
351 #define LOOKS_LIKE_INFO_PTR(p) \
352 (p && ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
353 ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
355 #define LOOKS_LIKE_CLOSURE_PTR(p) \
356 (LOOKS_LIKE_INFO_PTR(((StgClosure *)(p))->header.info))
358 /* -----------------------------------------------------------------------------
359 Macros for calculating how big a closure will be (used during allocation)
360 -------------------------------------------------------------------------- */
362 static __inline__ StgOffset PAP_sizeW ( nat n_args )
363 { return sizeofW(StgPAP) + n_args; }
365 static __inline__ StgOffset AP_STACK_sizeW ( nat size )
366 { return sizeofW(StgAP_STACK) + size; }
368 static __inline__ StgOffset CONSTR_sizeW( nat p, nat np )
369 { return sizeofW(StgHeader) + p + np; }
371 static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )
372 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
374 static __inline__ StgOffset BLACKHOLE_sizeW ( void )
375 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
377 /* --------------------------------------------------------------------------
379 ------------------------------------------------------------------------*/
381 static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
382 { return sizeofW(StgClosure)
383 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
384 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
386 static __inline__ StgOffset ap_stack_sizeW( StgAP_STACK* x )
387 { return AP_STACK_sizeW(x->size); }
389 static __inline__ StgOffset pap_sizeW( StgPAP* x )
390 { return PAP_sizeW(x->n_args); }
392 static __inline__ StgOffset arr_words_sizeW( StgArrWords* x )
393 { return sizeofW(StgArrWords) + x->words; }
395 static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
396 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
398 static __inline__ StgWord tso_sizeW ( StgTSO *tso )
399 { return TSO_STRUCT_SIZEW + tso->stack_size; }
401 static __inline__ StgWord bco_sizeW ( StgBCO *bco )
402 { return bco->size; }
404 /* -----------------------------------------------------------------------------
405 Sizes of stack frames
406 -------------------------------------------------------------------------- */
408 static inline StgWord stack_frame_sizeW( StgClosure *frame )
410 StgRetInfoTable *info;
412 info = get_ret_itbl(frame);
413 switch (info->i.type) {
417 StgRetDyn *dyn = (StgRetDyn *)frame;
418 return sizeofW(StgRetDyn) + RET_DYN_SIZE +
419 GET_PTRS(dyn->liveness) + GET_NONPTRS(dyn->liveness);
423 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
427 return 1 + info->i.layout.large_bitmap->size;
430 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
433 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
437 /* -----------------------------------------------------------------------------
439 -------------------------------------------------------------------------- */
442 void printMutOnceList(generation *gen);
443 void printMutableList(generation *gen);