[project @ 2003-03-26 17:40:57 by sof]
[ghc-hetmet.git] / ghc / rts / Storage.h
1 /* -----------------------------------------------------------------------------
2  * $Id: Storage.h,v 1.50 2003/03/26 17:40:57 sof Exp $
3  *
4  * (c) The GHC Team, 1998-2002
5  *
6  * External Storage Manger Interface
7  *
8  * ---------------------------------------------------------------------------*/
9
10 #ifndef STORAGE_H
11 #define STORAGE_H
12
13 #include "Block.h"
14 #include "MBlock.h"
15 #include "BlockAlloc.h"
16 #include "StoragePriv.h"
17 #ifdef PROFILING
18 #include "LdvProfile.h"
19 #endif
20
21 /* -----------------------------------------------------------------------------
22    Initialisation / De-initialisation
23    -------------------------------------------------------------------------- */
24
25 extern void initStorage(void);
26 extern void exitStorage(void);
27
28 /* -----------------------------------------------------------------------------
29    Generic allocation
30
31    StgPtr allocate(nat n)       Allocates a chunk of contiguous store
32                                 n words long, returning a pointer to
33                                 the first word.  Always succeeds.
34                                 
35    StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
36                                 n words long, which is at a fixed
37                                 address (won't be moved by GC).  
38                                 Returns a pointer to the first word.
39                                 Always succeeds.
40                                 
41                                 NOTE: the GC can't in general handle
42                                 pinned objects, so allocatePinned()
43                                 can only be used for ByteArrays at the
44                                 moment.
45
46                                 Don't forget to TICK_ALLOC_XXX(...)
47                                 after calling allocate or
48                                 allocatePinned, for the
49                                 benefit of the ticky-ticky profiler.
50
51    rtsBool doYouWantToGC(void)  Returns True if the storage manager is
52                                 ready to perform a GC, False otherwise.
53
54    lnat  allocated_bytes(void)  Returns the number of bytes allocated
55                                 via allocate() since the last GC.
56                                 Used in the reporting of statistics.
57
58    SMP: allocate and doYouWantToGC can be used from STG code, they are
59    surrounded by a mutex.
60    -------------------------------------------------------------------------- */
61
62 extern StgPtr  allocate        ( nat n );
63 extern StgPtr  allocatePinned  ( nat n );
64 extern lnat    allocated_bytes ( void );
65
66 static inline rtsBool
67 doYouWantToGC( void )
68 {
69   return (alloc_blocks >= alloc_blocks_lim);
70 }
71
72 /* -----------------------------------------------------------------------------
73    ExtendNursery(hp,hplim)      When hplim is reached, try to grab
74                                 some more allocation space.  Returns
75                                 False if the allocation space is
76                                 exhausted, and the application should
77                                 call GarbageCollect().
78   -------------------------------------------------------------------------- */
79
80 #define ExtendNursery(hp,hplim)                 \
81   (CloseNursery(hp),                            \
82    CurrentNursery->link == NULL ? rtsFalse :    \
83    (CurrentNursery = CurrentNursery->link,      \
84     OpenNursery(hp,hplim),                      \
85     rtsTrue))
86
87 /* -----------------------------------------------------------------------------
88    Performing Garbage Collection
89
90    GarbageCollect(get_roots)    Performs a garbage collection.  
91                                 'get_roots' is called to find all the 
92                                 roots that the system knows about.
93
94    StgClosure                   Called by get_roots on each root.       
95    MarkRoot(StgClosure *p)      Returns the new location of the root.
96    -------------------------------------------------------------------------- */
97
98 extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
99
100 /* -----------------------------------------------------------------------------
101    Generational garbage collection support
102
103    recordMutable(StgPtr p)       Informs the garbage collector that a
104                                  previously immutable object has
105                                  become (permanently) mutable.  Used
106                                  by thawArray and similar.
107
108    updateWithIndirection(p1,p2)  Updates the object at p1 with an
109                                  indirection pointing to p2.  This is
110                                  normally called for objects in an old
111                                  generation (>0) when they are updated.
112
113    updateWithPermIndirection(p1,p2)  As above but uses a permanent indir.
114
115    -------------------------------------------------------------------------- */
116
117 /*
118  * Storage manager mutex
119  */
120 #if defined(SMP)
121 extern Mutex sm_mutex;
122 #define ACQUIRE_SM_LOCK   ACQUIRE_LOCK(&sm_mutex)
123 #define RELEASE_SM_LOCK   RELEASE_LOCK(&sm_mutex)
124 #else
125 #define ACQUIRE_SM_LOCK
126 #define RELEASE_SM_LOCK
127 #endif
128
129 /* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
130  * kind of lock in the SMP case?
131  */
132 static inline void
133 recordMutable(StgMutClosure *p)
134 {
135   bdescr *bd;
136
137 #ifdef SMP
138   ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
139 #else
140   ASSERT(closure_MUTABLE(p));
141 #endif
142
143   bd = Bdescr((P_)p);
144   if (bd->gen_no > 0) {
145     p->mut_link = generations[bd->gen_no].mut_list;
146     generations[bd->gen_no].mut_list = p;
147   }
148 }
149
150 static inline void
151 recordOldToNewPtrs(StgMutClosure *p)
152 {
153   bdescr *bd;
154   
155   bd = Bdescr((P_)p);
156   if (bd->gen_no > 0) {
157     p->mut_link = generations[bd->gen_no].mut_once_list;
158     generations[bd->gen_no].mut_once_list = p;
159   }
160 }
161
162 // @LDV profiling
163 // We zero out the slop when PROFILING is on.
164 // #ifndef DEBUG
165 #if !defined(DEBUG) && !defined(PROFILING)
166 #define updateWithIndirection(info, p1, p2)                             \
167   {                                                                     \
168     bdescr *bd;                                                         \
169                                                                         \
170     bd = Bdescr((P_)p1);                                                \
171     if (bd->gen_no == 0) {                                              \
172       ((StgInd *)p1)->indirectee = p2;                                  \
173       SET_INFO(p1,&stg_IND_info);                                       \
174       TICK_UPD_NEW_IND();                                               \
175     } else {                                                            \
176       ((StgIndOldGen *)p1)->indirectee = p2;                            \
177       if (info != &stg_BLACKHOLE_BQ_info) {                             \
178         ACQUIRE_SM_LOCK;                                                \
179         ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
180         generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;                    \
181         RELEASE_SM_LOCK;                                                \
182       }                                                                 \
183       SET_INFO(p1,&stg_IND_OLDGEN_info);                                \
184       TICK_UPD_OLD_IND();                                               \
185     }                                                                   \
186   }
187 #elif defined(PROFILING)
188 // @LDV profiling
189 // We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in 
190 // which p1 resides.
191 //
192 // Note: 
193 //   After all, we do *NOT* need to call LDV_recordCreate() for both IND and 
194 //   IND_OLDGEN closures because they are inherently used. But, it corrupts
195 //   the invariants that every closure keeps its creation time in the profiling
196 //   field. So, we call LDV_recordCreate().
197
198 #define updateWithIndirection(info, p1, p2)                             \
199   {                                                                     \
200     bdescr *bd;                                                         \
201                                                                         \
202     LDV_recordDead_FILL_SLOP_DYNAMIC((p1));                             \
203     bd = Bdescr((P_)p1);                                                \
204     if (bd->gen_no == 0) {                                              \
205       ((StgInd *)p1)->indirectee = p2;                                  \
206       SET_INFO(p1,&stg_IND_info);                                       \
207       LDV_recordCreate((p1));                                           \
208       TICK_UPD_NEW_IND();                                               \
209     } else {                                                            \
210       ((StgIndOldGen *)p1)->indirectee = p2;                            \
211       if (info != &stg_BLACKHOLE_BQ_info) {                             \
212         ACQUIRE_SM_LOCK;                                                \
213         ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
214         generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;    \
215         RELEASE_SM_LOCK;                                                \
216       }                                                                 \
217       SET_INFO(p1,&stg_IND_OLDGEN_info);                                \
218       LDV_recordCreate((p1));                                           \
219     }                                                                   \
220   }
221
222 #else
223
224 /* In the DEBUG case, we also zero out the slop of the old closure,
225  * so that the sanity checker can tell where the next closure is.
226  *
227  * Two important invariants: we should never try to update a closure
228  * to point to itself, and the closure being updated should not
229  * already have been updated (the mutable list will get messed up
230  * otherwise).
231  */
232 #define updateWithIndirection(info, p1, p2)                             \
233   {                                                                     \
234     bdescr *bd;                                                         \
235                                                                         \
236     ASSERT( p1 != p2 && !closure_IND(p1) );                             \
237     bd = Bdescr((P_)p1);                                                \
238     if (bd->gen_no == 0) {                                              \
239       ((StgInd *)p1)->indirectee = p2;                                  \
240       SET_INFO(p1,&stg_IND_info);                                       \
241       TICK_UPD_NEW_IND();                                               \
242     } else {                                                            \
243       if (info != &stg_BLACKHOLE_BQ_info) {                             \
244         {                                                               \
245           StgInfoTable *inf = get_itbl(p1);                             \
246           nat np = inf->layout.payload.ptrs,                            \
247               nw = inf->layout.payload.nptrs, i;                        \
248           if (inf->type != THUNK_SELECTOR) {                            \
249              for (i = 0; i < np + nw; i++) {                            \
250                 ((StgClosure *)p1)->payload[i] = 0;                     \
251              }                                                          \
252           }                                                             \
253         }                                                               \
254         ACQUIRE_SM_LOCK;                                                \
255         ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
256         generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;                    \
257         RELEASE_SM_LOCK;                                                \
258       }                                                                 \
259       ((StgIndOldGen *)p1)->indirectee = p2;                            \
260       SET_INFO(p1,&stg_IND_OLDGEN_info);                                \
261       TICK_UPD_OLD_IND();                                               \
262     }                                                                   \
263   }
264 #endif
265
266 /* Static objects all live in the oldest generation
267  */
268 #define updateWithStaticIndirection(info, p1, p2)                       \
269   {                                                                     \
270     ASSERT( p1 != p2 && !closure_IND(p1) );                             \
271     ASSERT( ((StgMutClosure*)p1)->mut_link == NULL );                   \
272                                                                         \
273     ACQUIRE_SM_LOCK;                                                    \
274     ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list;        \
275     oldest_gen->mut_once_list = (StgMutClosure *)p1;                    \
276     RELEASE_SM_LOCK;                                                    \
277                                                                         \
278     ((StgInd *)p1)->indirectee = p2;                                    \
279     SET_INFO((StgInd *)p1, &stg_IND_STATIC_info);                       \
280     TICK_UPD_STATIC_IND();                                              \
281   }
282
283 #if defined(TICKY_TICKY) || defined(PROFILING)
284 static inline void
285 updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2) 
286 {
287   bdescr *bd;
288
289   ASSERT( p1 != p2 && !closure_IND(p1) );
290
291 #ifdef PROFILING
292   // @LDV profiling
293   // Destroy the old closure.
294   // Nb: LDV_* stuff cannot mix with ticky-ticky
295   LDV_recordDead_FILL_SLOP_DYNAMIC(p1);
296 #endif
297   bd = Bdescr((P_)p1);
298   if (bd->gen_no == 0) {
299     ((StgInd *)p1)->indirectee = p2;
300     SET_INFO(p1,&stg_IND_PERM_info);
301 #ifdef PROFILING
302     // @LDV profiling
303     // We have just created a new closure.
304     LDV_recordCreate(p1);
305 #endif
306     TICK_UPD_NEW_PERM_IND(p1);
307   } else {
308     ((StgIndOldGen *)p1)->indirectee = p2;
309     if (info != &stg_BLACKHOLE_BQ_info) {
310       ACQUIRE_SM_LOCK;
311       ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
312       generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
313       RELEASE_SM_LOCK;
314     }
315     SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
316 #ifdef PROFILING
317     // @LDV profiling
318     // We have just created a new closure.
319     LDV_recordCreate(p1);
320 #endif
321     TICK_UPD_OLD_PERM_IND();
322   }
323 }
324 #endif
325
326 /* -----------------------------------------------------------------------------
327    The CAF table - used to let us revert CAFs in GHCi
328    -------------------------------------------------------------------------- */
329
330 void revertCAFs( void );
331
332 /* -----------------------------------------------------------------------------
333    DEBUGGING predicates for pointers
334
335    LOOKS_LIKE_INFO_PTR(p)    returns False if p is definitely not an info ptr
336    LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
337
338    These macros are complete but not sound.  That is, they might
339    return false positives.  Do not rely on them to distinguish info
340    pointers from closure pointers, for example.
341
342    We don't use address-space predicates these days, for portability
343    reasons, and the fact that code/data can be scattered about the
344    address space in a dynamically-linked environment.  Our best option
345    is to look at the alleged info table and see whether it seems to
346    make sense...
347    -------------------------------------------------------------------------- */
348
349 #define LOOKS_LIKE_INFO_PTR(p) \
350    (p && ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
351     ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
352
353 #define LOOKS_LIKE_CLOSURE_PTR(p) \
354    (LOOKS_LIKE_INFO_PTR(((StgClosure *)(p))->header.info))
355
356 /* -----------------------------------------------------------------------------
357    Macros for calculating how big a closure will be (used during allocation)
358    -------------------------------------------------------------------------- */
359
360 static __inline__ StgOffset PAP_sizeW   ( nat n_args )
361 { return sizeofW(StgPAP) + n_args; }
362
363 static __inline__ StgOffset AP_STACK_sizeW ( nat size )
364 { return sizeofW(StgAP_STACK) + size; }
365
366 static __inline__ StgOffset CONSTR_sizeW( nat p, nat np )
367 { return sizeofW(StgHeader) + p + np; }
368
369 static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )
370 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
371
372 static __inline__ StgOffset BLACKHOLE_sizeW ( void )
373 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
374
375 /* --------------------------------------------------------------------------
376    Sizes of closures
377    ------------------------------------------------------------------------*/
378
379 static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl ) 
380 { return sizeofW(StgClosure) 
381        + sizeofW(StgPtr)  * itbl->layout.payload.ptrs 
382        + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
383
384 static __inline__ StgOffset ap_stack_sizeW( StgAP_STACK* x )
385 { return AP_STACK_sizeW(x->size); }
386
387 static __inline__ StgOffset pap_sizeW( StgPAP* x )
388 { return PAP_sizeW(x->n_args); }
389
390 static __inline__ StgOffset arr_words_sizeW( StgArrWords* x )
391 { return sizeofW(StgArrWords) + x->words; }
392
393 static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
394 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
395
396 static __inline__ StgWord tso_sizeW ( StgTSO *tso )
397 { return TSO_STRUCT_SIZEW + tso->stack_size; }
398
399 static __inline__ StgWord bco_sizeW ( StgBCO *bco )
400 { return bco->size; }
401
402 /* -----------------------------------------------------------------------------
403    Sizes of stack frames
404    -------------------------------------------------------------------------- */
405
406 static inline StgWord stack_frame_sizeW( StgClosure *frame )
407 {
408     StgRetInfoTable *info;
409
410     info = get_ret_itbl(frame);
411     switch (info->i.type) {
412
413     case RET_DYN:
414     {
415         StgRetDyn *dyn = (StgRetDyn *)frame;
416         return  sizeofW(StgRetDyn) + RET_DYN_SIZE + 
417             GET_PTRS(dyn->liveness) + GET_NONPTRS(dyn->liveness);
418     }
419             
420     case RET_FUN:
421         return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
422
423     case RET_BIG:
424     case RET_VEC_BIG:
425         return 1 + info->i.layout.large_bitmap->size;
426
427     case RET_BCO:
428         return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
429
430     default:
431         return 1 + BITMAP_SIZE(info->i.layout.bitmap);
432     }
433 }
434
435 /* -----------------------------------------------------------------------------
436    Debugging bits
437    -------------------------------------------------------------------------- */
438
439 #if defined(DEBUG)
440 void printMutOnceList(generation *gen);
441 void printMutableList(generation *gen);
442 #endif
443
444 #endif // STORAGE_H