[project @ 2002-12-11 15:36:20 by simonmar]
[ghc-hetmet.git] / ghc / rts / Storage.h
1 /* -----------------------------------------------------------------------------
2  * $Id: Storage.h,v 1.46 2002/12/11 15:36:54 simonmar Exp $
3  *
4  * (c) The GHC Team, 1998-2002
5  *
6  * External Storage Manger Interface
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #ifndef STORAGE_H
11 #define STORAGE_H
12
13 #include "Block.h"
14 #include "MBlock.h"
15 #include "BlockAlloc.h"
16 #include "StoragePriv.h"
17 #ifdef PROFILING
18 #include "LdvProfile.h"
19 #endif
20
21 /* -----------------------------------------------------------------------------
22    Initialisation / De-initialisation
23    -------------------------------------------------------------------------- */
24
25 extern void initStorage(void);
26 extern void exitStorage(void);
27
28 /* -----------------------------------------------------------------------------
29    Generic allocation
30
31    StgPtr allocate(nat n)       Allocates a chunk of contiguous store
32                                 n words long, returning a pointer to
33                                 the first word.  Always succeeds.
34                                 
35    StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
36                                 n words long, which is at a fixed
37                                 address (won't be moved by GC).  
38                                 Returns a pointer to the first word.
39                                 Always succeeds.
40                                 
41                                 NOTE: the GC can't in general handle
42                                 pinned objects, so allocatePinned()
43                                 can only be used for ByteArrays at the
44                                 moment.
45
46                                 Don't forget to TICK_ALLOC_XXX(...)
47                                 after calling allocate or
48                                 allocatePinned, for the
49                                 benefit of the ticky-ticky profiler.
50
51    rtsBool doYouWantToGC(void)  Returns True if the storage manager is
52                                 ready to perform a GC, False otherwise.
53
54    lnat  allocated_bytes(void)  Returns the number of bytes allocated
55                                 via allocate() since the last GC.
56                                 Used in the reoprting of statistics.
57
58    SMP: allocate and doYouWantToGC can be used from STG code, they are
59    surrounded by a mutex.
60    -------------------------------------------------------------------------- */
61
62 extern StgPtr  allocate        ( nat n );
63 extern StgPtr  allocatePinned  ( nat n );
64 extern lnat    allocated_bytes ( void );
65
66 static inline rtsBool
67 doYouWantToGC( void )
68 {
69   return (alloc_blocks >= alloc_blocks_lim);
70 }
71
72 /* -----------------------------------------------------------------------------
73    ExtendNursery(hp,hplim)      When hplim is reached, try to grab
74                                 some more allocation space.  Returns
75                                 False if the allocation space is
76                                 exhausted, and the application should
77                                 call GarbageCollect().
78   -------------------------------------------------------------------------- */
79
80 #define ExtendNursery(hp,hplim)                 \
81   (CurrentNursery->free = (P_)(hp)+1,           \
82    CurrentNursery->link == NULL ? rtsFalse :    \
83    (CurrentNursery = CurrentNursery->link,      \
84     OpenNursery(hp,hplim),                      \
85     rtsTrue))
86
87 extern void PleaseStopAllocating(void);
88
89 /* -----------------------------------------------------------------------------
90    Performing Garbage Collection
91
92    GarbageCollect(get_roots)    Performs a garbage collection.  
93                                 'get_roots' is called to find all the 
94                                 roots that the system knows about.
95
96    StgClosure                   Called by get_roots on each root.       
97    MarkRoot(StgClosure *p)      Returns the new location of the root.
98    -------------------------------------------------------------------------- */
99
100 extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
101
102 /* -----------------------------------------------------------------------------
103    Generational garbage collection support
104
105    recordMutable(StgPtr p)       Informs the garbage collector that a
106                                  previously immutable object has
107                                  become (permanently) mutable.  Used
108                                  by thawArray and similar.
109
110    updateWithIndirection(p1,p2)  Updates the object at p1 with an
111                                  indirection pointing to p2.  This is
112                                  normally called for objects in an old
113                                  generation (>0) when they are updated.
114
115    updateWithPermIndirection(p1,p2)  As above but uses a permanent indir.
116
117    -------------------------------------------------------------------------- */
118
119 /*
120  * Storage manager mutex
121  */
122 #if defined(SMP)
123 extern Mutex sm_mutex;
124 #define ACQUIRE_SM_LOCK   ACQUIRE_LOCK(&sm_mutex)
125 #define RELEASE_SM_LOCK   RELEASE_LOCK(&sm_mutex)
126 #else
127 #define ACQUIRE_SM_LOCK
128 #define RELEASE_SM_LOCK
129 #endif
130
131 /* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
132  * kind of lock in the SMP case?
133  */
134 static inline void
135 recordMutable(StgMutClosure *p)
136 {
137   bdescr *bd;
138
139 #ifdef SMP
140   ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
141 #else
142   ASSERT(closure_MUTABLE(p));
143 #endif
144
145   bd = Bdescr((P_)p);
146   if (bd->gen_no > 0) {
147     p->mut_link = generations[bd->gen_no].mut_list;
148     generations[bd->gen_no].mut_list = p;
149   }
150 }
151
152 static inline void
153 recordOldToNewPtrs(StgMutClosure *p)
154 {
155   bdescr *bd;
156   
157   bd = Bdescr((P_)p);
158   if (bd->gen_no > 0) {
159     p->mut_link = generations[bd->gen_no].mut_once_list;
160     generations[bd->gen_no].mut_once_list = p;
161   }
162 }
163
164 // @LDV profiling
165 // We zero out the slop when PROFILING is on.
166 // #ifndef DEBUG
167 #if !defined(DEBUG) && !defined(PROFILING)
168 #define updateWithIndirection(info, p1, p2)                             \
169   {                                                                     \
170     bdescr *bd;                                                         \
171                                                                         \
172     bd = Bdescr((P_)p1);                                                \
173     if (bd->gen_no == 0) {                                              \
174       ((StgInd *)p1)->indirectee = p2;                                  \
175       SET_INFO(p1,&stg_IND_info);                                       \
176       TICK_UPD_NEW_IND();                                               \
177     } else {                                                            \
178       ((StgIndOldGen *)p1)->indirectee = p2;                            \
179       if (info != &stg_BLACKHOLE_BQ_info) {                             \
180         ACQUIRE_SM_LOCK;                                                \
181         ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
182         generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;                    \
183         RELEASE_SM_LOCK;                                                \
184       }                                                                 \
185       SET_INFO(p1,&stg_IND_OLDGEN_info);                                \
186       TICK_UPD_OLD_IND();                                               \
187     }                                                                   \
188   }
189 #elif defined(PROFILING)
190 // @LDV profiling
191 // We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in 
192 // which p1 resides.
193 //
194 // Note: 
195 //   After all, we do *NOT* need to call LDV_recordCreate() for both IND and 
196 //   IND_OLDGEN closures because they are inherently used. But, it corrupts
197 //   the invariants that every closure keeps its creation time in the profiling
198 //   field. So, we call LDV_recordCreate().
199
200 #define updateWithIndirection(info, p1, p2)                             \
201   {                                                                     \
202     bdescr *bd;                                                         \
203                                                                         \
204     LDV_recordDead_FILL_SLOP_DYNAMIC((p1));                             \
205     bd = Bdescr((P_)p1);                                                \
206     if (bd->gen_no == 0) {                                              \
207       ((StgInd *)p1)->indirectee = p2;                                  \
208       SET_INFO(p1,&stg_IND_info);                                       \
209       LDV_recordCreate((p1));                                           \
210       TICK_UPD_NEW_IND();                                               \
211     } else {                                                            \
212       ((StgIndOldGen *)p1)->indirectee = p2;                            \
213       if (info != &stg_BLACKHOLE_BQ_info) {                             \
214         ACQUIRE_SM_LOCK;                                                \
215         ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
216         generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;    \
217         RELEASE_SM_LOCK;                                                \
218       }                                                                 \
219       SET_INFO(p1,&stg_IND_OLDGEN_info);                                \
220       LDV_recordCreate((p1));                                           \
221     }                                                                   \
222   }
223
224 #else
225
226 /* In the DEBUG case, we also zero out the slop of the old closure,
227  * so that the sanity checker can tell where the next closure is.
228  *
229  * Two important invariants: we should never try to update a closure
230  * to point to itself, and the closure being updated should not
231  * already have been updated (the mutable list will get messed up
232  * otherwise).
233  */
234 #define updateWithIndirection(info, p1, p2)                             \
235   {                                                                     \
236     bdescr *bd;                                                         \
237                                                                         \
238     ASSERT( p1 != p2 && !closure_IND(p1) );                             \
239     bd = Bdescr((P_)p1);                                                \
240     if (bd->gen_no == 0) {                                              \
241       ((StgInd *)p1)->indirectee = p2;                                  \
242       SET_INFO(p1,&stg_IND_info);                                       \
243       TICK_UPD_NEW_IND();                                               \
244     } else {                                                            \
245       if (info != &stg_BLACKHOLE_BQ_info) {                             \
246         {                                                               \
247           StgInfoTable *inf = get_itbl(p1);                             \
248           nat np = inf->layout.payload.ptrs,                            \
249               nw = inf->layout.payload.nptrs, i;                        \
250           if (inf->type != THUNK_SELECTOR) {                            \
251              for (i = 0; i < np + nw; i++) {                            \
252                 ((StgClosure *)p1)->payload[i] = 0;                     \
253              }                                                          \
254           }                                                             \
255         }                                                               \
256         ACQUIRE_SM_LOCK;                                                \
257         ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
258         generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;                    \
259         RELEASE_SM_LOCK;                                                \
260       }                                                                 \
261       ((StgIndOldGen *)p1)->indirectee = p2;                            \
262       SET_INFO(p1,&stg_IND_OLDGEN_info);                                \
263       TICK_UPD_OLD_IND();                                               \
264     }                                                                   \
265   }
266 #endif
267
268 /* Static objects all live in the oldest generation
269  */
270 #define updateWithStaticIndirection(info, p1, p2)                       \
271   {                                                                     \
272     ASSERT( p1 != p2 && !closure_IND(p1) );                             \
273     ASSERT( ((StgMutClosure*)p1)->mut_link == NULL );                   \
274                                                                         \
275     ACQUIRE_SM_LOCK;                                                    \
276     ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list;        \
277     oldest_gen->mut_once_list = (StgMutClosure *)p1;                    \
278     RELEASE_SM_LOCK;                                                    \
279                                                                         \
280     ((StgInd *)p1)->indirectee = p2;                                    \
281     SET_INFO((StgInd *)p1, &stg_IND_STATIC_info);                       \
282     TICK_UPD_STATIC_IND();                                              \
283   }
284
285 #if defined(TICKY_TICKY) || defined(PROFILING)
286 static inline void
287 updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2) 
288 {
289   bdescr *bd;
290
291   ASSERT( p1 != p2 && !closure_IND(p1) );
292
293 #ifdef PROFILING
294   // @LDV profiling
295   // Destroy the old closure.
296   // Nb: LDV_* stuff cannot mix with ticky-ticky
297   LDV_recordDead_FILL_SLOP_DYNAMIC(p1);
298 #endif
299   bd = Bdescr((P_)p1);
300   if (bd->gen_no == 0) {
301     ((StgInd *)p1)->indirectee = p2;
302     SET_INFO(p1,&stg_IND_PERM_info);
303 #ifdef PROFILING
304     // @LDV profiling
305     // We have just created a new closure.
306     LDV_recordCreate(p1);
307 #endif
308     TICK_UPD_NEW_PERM_IND(p1);
309   } else {
310     ((StgIndOldGen *)p1)->indirectee = p2;
311     if (info != &stg_BLACKHOLE_BQ_info) {
312       ACQUIRE_SM_LOCK;
313       ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
314       generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
315       RELEASE_SM_LOCK;
316     }
317     SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
318 #ifdef PROFILING
319     // @LDV profiling
320     // We have just created a new closure.
321     LDV_recordCreate(p1);
322 #endif
323     TICK_UPD_OLD_PERM_IND();
324   }
325 }
326 #endif
327
328 /* -----------------------------------------------------------------------------
329    The CAF table - used to let us revert CAFs in GHCi
330    -------------------------------------------------------------------------- */
331
332 void revertCAFs( void );
333
334 /* -----------------------------------------------------------------------------
335    DEBUGGING predicates for pointers
336
337    LOOKS_LIKE_INFO_PTR(p)    returns False if p is definitely not an info ptr
338    LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
339
340    These macros are complete but not sound.  That is, they might
341    return false positives.  Do not rely on them to distinguish info
342    pointers from closure pointers, for example.
343
344    We don't use address-space predicates these days, for portability
345    reasons, and the fact that code/data can be scattered about the
346    address space in a dynamically-linked environment.  Our best option
347    is to look at the alleged info table and see whether it seems to
348    make sense...
349    -------------------------------------------------------------------------- */
350
351 #define LOOKS_LIKE_INFO_PTR(p) \
352    (p && ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
353     ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
354
355 #define LOOKS_LIKE_CLOSURE_PTR(p) \
356    (LOOKS_LIKE_INFO_PTR(((StgClosure *)(p))->header.info))
357
358 /* -----------------------------------------------------------------------------
359    Macros for calculating how big a closure will be (used during allocation)
360    -------------------------------------------------------------------------- */
361
362 static __inline__ StgOffset PAP_sizeW   ( nat n_args )
363 { return sizeofW(StgPAP) + n_args; }
364
365 static __inline__ StgOffset AP_STACK_sizeW ( nat size )
366 { return sizeofW(StgAP_STACK) + size; }
367
368 static __inline__ StgOffset CONSTR_sizeW( nat p, nat np )
369 { return sizeofW(StgHeader) + p + np; }
370
371 static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )
372 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
373
374 static __inline__ StgOffset BLACKHOLE_sizeW ( void )
375 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
376
377 /* --------------------------------------------------------------------------
378    Sizes of closures
379    ------------------------------------------------------------------------*/
380
381 static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl ) 
382 { return sizeofW(StgClosure) 
383        + sizeofW(StgPtr)  * itbl->layout.payload.ptrs 
384        + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
385
386 static __inline__ StgOffset ap_stack_sizeW( StgAP_STACK* x )
387 { return AP_STACK_sizeW(x->size); }
388
389 static __inline__ StgOffset pap_sizeW( StgPAP* x )
390 { return PAP_sizeW(x->n_args); }
391
392 static __inline__ StgOffset arr_words_sizeW( StgArrWords* x )
393 { return sizeofW(StgArrWords) + x->words; }
394
395 static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
396 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
397
398 static __inline__ StgWord tso_sizeW ( StgTSO *tso )
399 { return TSO_STRUCT_SIZEW + tso->stack_size; }
400
401 /* -----------------------------------------------------------------------------
402    Sizes of stack frames
403    -------------------------------------------------------------------------- */
404
405 static inline StgWord stack_frame_sizeW( StgClosure *frame )
406 {
407     StgRetInfoTable *info;
408
409     info = get_ret_itbl(frame);
410     switch (info->i.type) {
411
412     case RET_DYN:
413     {
414         StgRetDyn *dyn = (StgRetDyn *)frame;
415         return  sizeofW(StgRetDyn) + RET_DYN_SIZE + 
416             GET_PTRS(dyn->liveness) + GET_NONPTRS(dyn->liveness);
417     }
418             
419     case RET_FUN:
420         return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
421
422     case RET_BIG:
423         return 1 + info->i.layout.large_bitmap->size;
424
425     case RET_BCO:
426         return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
427
428     default:
429         return 1 + BITMAP_SIZE(info->i.layout.bitmap);
430     }
431 }
432
433 /* -----------------------------------------------------------------------------
434    Debugging bits
435    -------------------------------------------------------------------------- */
436
437 #if defined(DEBUG)
438 void printMutOnceList(generation *gen);
439 void printMutableList(generation *gen);
440 #endif
441
442 #endif // STORAGE_H