1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.h,v 1.50 2003/03/26 17:40:57 sof Exp $
4 * (c) The GHC Team, 1998-2002
6 * External Storage Manger Interface
8 * ---------------------------------------------------------------------------*/
15 #include "BlockAlloc.h"
16 #include "StoragePriv.h"
18 #include "LdvProfile.h"
21 /* -----------------------------------------------------------------------------
22 Initialisation / De-initialisation
23 -------------------------------------------------------------------------- */
25 extern void initStorage(void);
26 extern void exitStorage(void);
28 /* -----------------------------------------------------------------------------
31 StgPtr allocate(nat n) Allocates a chunk of contiguous store
32 n words long, returning a pointer to
33 the first word. Always succeeds.
35 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
36 n words long, which is at a fixed
37 address (won't be moved by GC).
38 Returns a pointer to the first word.
41 NOTE: the GC can't in general handle
42 pinned objects, so allocatePinned()
43 can only be used for ByteArrays at the
46 Don't forget to TICK_ALLOC_XXX(...)
47 after calling allocate or
48 allocatePinned, for the
49 benefit of the ticky-ticky profiler.
51 rtsBool doYouWantToGC(void) Returns True if the storage manager is
52 ready to perform a GC, False otherwise.
54 lnat allocated_bytes(void) Returns the number of bytes allocated
55 via allocate() since the last GC.
56 Used in the reporting of statistics.
58 SMP: allocate and doYouWantToGC can be used from STG code, they are
59 surrounded by a mutex.
60 -------------------------------------------------------------------------- */
62 extern StgPtr allocate ( nat n );
63 extern StgPtr allocatePinned ( nat n );
64 extern lnat allocated_bytes ( void );
69 return (alloc_blocks >= alloc_blocks_lim);
72 /* -----------------------------------------------------------------------------
73 ExtendNursery(hp,hplim) When hplim is reached, try to grab
74 some more allocation space. Returns
75 False if the allocation space is
76 exhausted, and the application should
77 call GarbageCollect().
78 -------------------------------------------------------------------------- */
80 #define ExtendNursery(hp,hplim) \
82 CurrentNursery->link == NULL ? rtsFalse : \
83 (CurrentNursery = CurrentNursery->link, \
84 OpenNursery(hp,hplim), \
87 /* -----------------------------------------------------------------------------
88 Performing Garbage Collection
90 GarbageCollect(get_roots) Performs a garbage collection.
91 'get_roots' is called to find all the
92 roots that the system knows about.
94 StgClosure Called by get_roots on each root.
95 MarkRoot(StgClosure *p) Returns the new location of the root.
96 -------------------------------------------------------------------------- */
98 extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
100 /* -----------------------------------------------------------------------------
101 Generational garbage collection support
103 recordMutable(StgPtr p) Informs the garbage collector that a
104 previously immutable object has
105 become (permanently) mutable. Used
106 by thawArray and similar.
108 updateWithIndirection(p1,p2) Updates the object at p1 with an
109 indirection pointing to p2. This is
110 normally called for objects in an old
111 generation (>0) when they are updated.
113 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
115 -------------------------------------------------------------------------- */
118 * Storage manager mutex
121 extern Mutex sm_mutex;
122 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
123 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
125 #define ACQUIRE_SM_LOCK
126 #define RELEASE_SM_LOCK
129 /* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
130 * kind of lock in the SMP case?
133 recordMutable(StgMutClosure *p)
138 ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
140 ASSERT(closure_MUTABLE(p));
144 if (bd->gen_no > 0) {
145 p->mut_link = generations[bd->gen_no].mut_list;
146 generations[bd->gen_no].mut_list = p;
151 recordOldToNewPtrs(StgMutClosure *p)
156 if (bd->gen_no > 0) {
157 p->mut_link = generations[bd->gen_no].mut_once_list;
158 generations[bd->gen_no].mut_once_list = p;
163 // We zero out the slop when PROFILING is on.
165 #if !defined(DEBUG) && !defined(PROFILING)
166 #define updateWithIndirection(info, p1, p2) \
170 bd = Bdescr((P_)p1); \
171 if (bd->gen_no == 0) { \
172 ((StgInd *)p1)->indirectee = p2; \
173 SET_INFO(p1,&stg_IND_info); \
174 TICK_UPD_NEW_IND(); \
176 ((StgIndOldGen *)p1)->indirectee = p2; \
177 if (info != &stg_BLACKHOLE_BQ_info) { \
179 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
180 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
183 SET_INFO(p1,&stg_IND_OLDGEN_info); \
184 TICK_UPD_OLD_IND(); \
187 #elif defined(PROFILING)
189 // We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
193 // After all, we do *NOT* need to call LDV_recordCreate() for both IND and
194 // IND_OLDGEN closures because they are inherently used. But, it corrupts
195 // the invariants that every closure keeps its creation time in the profiling
196 // field. So, we call LDV_recordCreate().
198 #define updateWithIndirection(info, p1, p2) \
202 LDV_recordDead_FILL_SLOP_DYNAMIC((p1)); \
203 bd = Bdescr((P_)p1); \
204 if (bd->gen_no == 0) { \
205 ((StgInd *)p1)->indirectee = p2; \
206 SET_INFO(p1,&stg_IND_info); \
207 LDV_recordCreate((p1)); \
208 TICK_UPD_NEW_IND(); \
210 ((StgIndOldGen *)p1)->indirectee = p2; \
211 if (info != &stg_BLACKHOLE_BQ_info) { \
213 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
214 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
217 SET_INFO(p1,&stg_IND_OLDGEN_info); \
218 LDV_recordCreate((p1)); \
224 /* In the DEBUG case, we also zero out the slop of the old closure,
225 * so that the sanity checker can tell where the next closure is.
227 * Two important invariants: we should never try to update a closure
228 * to point to itself, and the closure being updated should not
229 * already have been updated (the mutable list will get messed up
232 #define updateWithIndirection(info, p1, p2) \
236 ASSERT( p1 != p2 && !closure_IND(p1) ); \
237 bd = Bdescr((P_)p1); \
238 if (bd->gen_no == 0) { \
239 ((StgInd *)p1)->indirectee = p2; \
240 SET_INFO(p1,&stg_IND_info); \
241 TICK_UPD_NEW_IND(); \
243 if (info != &stg_BLACKHOLE_BQ_info) { \
245 StgInfoTable *inf = get_itbl(p1); \
246 nat np = inf->layout.payload.ptrs, \
247 nw = inf->layout.payload.nptrs, i; \
248 if (inf->type != THUNK_SELECTOR) { \
249 for (i = 0; i < np + nw; i++) { \
250 ((StgClosure *)p1)->payload[i] = 0; \
255 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
256 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
259 ((StgIndOldGen *)p1)->indirectee = p2; \
260 SET_INFO(p1,&stg_IND_OLDGEN_info); \
261 TICK_UPD_OLD_IND(); \
266 /* Static objects all live in the oldest generation
268 #define updateWithStaticIndirection(info, p1, p2) \
270 ASSERT( p1 != p2 && !closure_IND(p1) ); \
271 ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \
274 ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list; \
275 oldest_gen->mut_once_list = (StgMutClosure *)p1; \
278 ((StgInd *)p1)->indirectee = p2; \
279 SET_INFO((StgInd *)p1, &stg_IND_STATIC_info); \
280 TICK_UPD_STATIC_IND(); \
283 #if defined(TICKY_TICKY) || defined(PROFILING)
285 updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2)
289 ASSERT( p1 != p2 && !closure_IND(p1) );
293 // Destroy the old closure.
294 // Nb: LDV_* stuff cannot mix with ticky-ticky
295 LDV_recordDead_FILL_SLOP_DYNAMIC(p1);
298 if (bd->gen_no == 0) {
299 ((StgInd *)p1)->indirectee = p2;
300 SET_INFO(p1,&stg_IND_PERM_info);
303 // We have just created a new closure.
304 LDV_recordCreate(p1);
306 TICK_UPD_NEW_PERM_IND(p1);
308 ((StgIndOldGen *)p1)->indirectee = p2;
309 if (info != &stg_BLACKHOLE_BQ_info) {
311 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
312 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
315 SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
318 // We have just created a new closure.
319 LDV_recordCreate(p1);
321 TICK_UPD_OLD_PERM_IND();
326 /* -----------------------------------------------------------------------------
327 The CAF table - used to let us revert CAFs in GHCi
328 -------------------------------------------------------------------------- */
330 void revertCAFs( void );
332 /* -----------------------------------------------------------------------------
333 DEBUGGING predicates for pointers
335 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
336 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
338 These macros are complete but not sound. That is, they might
339 return false positives. Do not rely on them to distinguish info
340 pointers from closure pointers, for example.
342 We don't use address-space predicates these days, for portability
343 reasons, and the fact that code/data can be scattered about the
344 address space in a dynamically-linked environment. Our best option
345 is to look at the alleged info table and see whether it seems to
347 -------------------------------------------------------------------------- */
349 #define LOOKS_LIKE_INFO_PTR(p) \
350 (p && ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
351 ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
353 #define LOOKS_LIKE_CLOSURE_PTR(p) \
354 (LOOKS_LIKE_INFO_PTR(((StgClosure *)(p))->header.info))
356 /* -----------------------------------------------------------------------------
357 Macros for calculating how big a closure will be (used during allocation)
358 -------------------------------------------------------------------------- */
360 static __inline__ StgOffset PAP_sizeW ( nat n_args )
361 { return sizeofW(StgPAP) + n_args; }
363 static __inline__ StgOffset AP_STACK_sizeW ( nat size )
364 { return sizeofW(StgAP_STACK) + size; }
366 static __inline__ StgOffset CONSTR_sizeW( nat p, nat np )
367 { return sizeofW(StgHeader) + p + np; }
369 static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )
370 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
372 static __inline__ StgOffset BLACKHOLE_sizeW ( void )
373 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
375 /* --------------------------------------------------------------------------
377 ------------------------------------------------------------------------*/
379 static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
380 { return sizeofW(StgClosure)
381 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
382 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
384 static __inline__ StgOffset ap_stack_sizeW( StgAP_STACK* x )
385 { return AP_STACK_sizeW(x->size); }
387 static __inline__ StgOffset pap_sizeW( StgPAP* x )
388 { return PAP_sizeW(x->n_args); }
390 static __inline__ StgOffset arr_words_sizeW( StgArrWords* x )
391 { return sizeofW(StgArrWords) + x->words; }
393 static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
394 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
396 static __inline__ StgWord tso_sizeW ( StgTSO *tso )
397 { return TSO_STRUCT_SIZEW + tso->stack_size; }
399 static __inline__ StgWord bco_sizeW ( StgBCO *bco )
400 { return bco->size; }
402 /* -----------------------------------------------------------------------------
403 Sizes of stack frames
404 -------------------------------------------------------------------------- */
406 static inline StgWord stack_frame_sizeW( StgClosure *frame )
408 StgRetInfoTable *info;
410 info = get_ret_itbl(frame);
411 switch (info->i.type) {
415 StgRetDyn *dyn = (StgRetDyn *)frame;
416 return sizeofW(StgRetDyn) + RET_DYN_SIZE +
417 GET_PTRS(dyn->liveness) + GET_NONPTRS(dyn->liveness);
421 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
425 return 1 + info->i.layout.large_bitmap->size;
428 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
431 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
435 /* -----------------------------------------------------------------------------
437 -------------------------------------------------------------------------- */
440 void printMutOnceList(generation *gen);
441 void printMutableList(generation *gen);