1 /* -----------------------------------------------------------------------------
2 * $Id: Storage.h,v 1.27 2001/02/09 10:33:22 sewardj Exp $
4 * (c) The GHC Team, 1998-1999
6 * External Storage Manger Interface
8 * ---------------------------------------------------------------------------*/
14 #include "BlockAlloc.h"
15 #include "StoragePriv.h"
17 /* -----------------------------------------------------------------------------
18 Initialisation / De-initialisation
19 -------------------------------------------------------------------------- */
21 extern void initStorage(void);
22 extern void exitStorage(void);
24 /* -----------------------------------------------------------------------------
27 StgPtr allocate(int n) Allocates a chunk of contiguous store
28 n words long, returning a pointer to
29 the first word. Always succeeds.
31 Don't forget to TICK_ALLOC_XXX(...)
32 after calling allocate, for the
33 benefit of the ticky-ticky profiler.
35 rtsBool doYouWantToGC(void) Returns True if the storage manager is
36 ready to perform a GC, False otherwise.
38 lnat allocated_bytes(void) Returns the number of bytes allocated
39 via allocate() since the last GC.
40 Used in the reoprting of statistics.
42 SMP: allocate and doYouWantToGC can be used from STG code, they are
43 surrounded by a mutex.
44 -------------------------------------------------------------------------- */
46 extern StgPtr allocate(nat n);
47 static inline rtsBool doYouWantToGC(void)
49 return (alloc_blocks >= alloc_blocks_lim);
51 extern lnat allocated_bytes(void);
53 /* -----------------------------------------------------------------------------
54 ExtendNursery(hp,hplim) When hplim is reached, try to grab
55 some more allocation space. Returns
56 False if the allocation space is
57 exhausted, and the application should
58 call GarbageCollect().
59 -------------------------------------------------------------------------- */
61 #define ExtendNursery(hp,hplim) \
62 (CurrentNursery->free = (P_)(hp)+1, \
63 CurrentNursery->link == NULL ? rtsFalse : \
64 (CurrentNursery = CurrentNursery->link, \
65 OpenNursery(hp,hplim), \
68 extern void PleaseStopAllocating(void);
70 /* -----------------------------------------------------------------------------
71 Performing Garbage Collection
73 GarbageCollect(get_roots) Performs a garbage collection.
74 'get_roots' is called to find all the
75 roots that the system knows about.
77 StgClosure Called by get_roots on each root.
78 MarkRoot(StgClosure *p) Returns the new location of the root.
79 -------------------------------------------------------------------------- */
81 extern void GarbageCollect(void (*get_roots)(void),rtsBool force_major_gc);
82 extern StgClosure *MarkRoot(StgClosure *p);
84 /* Temporary measure to ensure we retain all the dynamically-loaded CAFs */
86 extern void markCafs( void );
89 /* -----------------------------------------------------------------------------
90 Generational garbage collection support
92 recordMutable(StgPtr p) Informs the garbage collector that a
93 previously immutable object has
94 become (permanently) mutable. Used
95 by thawArray and similar.
97 updateWithIndirection(p1,p2) Updates the object at p1 with an
98 indirection pointing to p2. This is
99 normally called for objects in an old
100 generation (>0) when they are updated.
102 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
104 -------------------------------------------------------------------------- */
107 * Storage manager mutex
110 extern pthread_mutex_t sm_mutex;
113 /* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
114 * kind of lock in the SMP case?
117 recordMutable(StgMutClosure *p)
122 ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
124 ASSERT(closure_MUTABLE(p));
128 if (bd->gen->no > 0) {
129 p->mut_link = bd->gen->mut_list;
130 bd->gen->mut_list = p;
135 recordOldToNewPtrs(StgMutClosure *p)
140 if (bd->gen->no > 0) {
141 p->mut_link = bd->gen->mut_once_list;
142 bd->gen->mut_once_list = p;
147 #define updateWithIndirection(info, p1, p2) \
151 bd = Bdescr((P_)p1); \
152 if (bd->gen->no == 0) { \
153 ((StgInd *)p1)->indirectee = p2; \
154 SET_INFO(p1,&stg_IND_info); \
155 TICK_UPD_NEW_IND(); \
157 ((StgIndOldGen *)p1)->indirectee = p2; \
158 if (info != &stg_BLACKHOLE_BQ_info) { \
159 ACQUIRE_LOCK(&sm_mutex); \
160 ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \
161 bd->gen->mut_once_list = (StgMutClosure *)p1; \
162 RELEASE_LOCK(&sm_mutex); \
164 SET_INFO(p1,&stg_IND_OLDGEN_info); \
165 TICK_UPD_OLD_IND(); \
170 /* In the DEBUG case, we also zero out the slop of the old closure,
171 * so that the sanity checker can tell where the next closure is.
173 #define updateWithIndirection(info, p1, p2) \
177 bd = Bdescr((P_)p1); \
178 if (bd->gen->no == 0) { \
179 ((StgInd *)p1)->indirectee = p2; \
180 SET_INFO(p1,&stg_IND_info); \
181 TICK_UPD_NEW_IND(); \
183 if (info != &stg_BLACKHOLE_BQ_info) { \
185 StgInfoTable *inf = get_itbl(p1); \
186 nat np = inf->layout.payload.ptrs, \
187 nw = inf->layout.payload.nptrs, i; \
188 if (inf->type != THUNK_SELECTOR) { \
189 for (i = np; i < np + nw; i++) { \
190 ((StgClosure *)p1)->payload[i] = 0; \
194 ACQUIRE_LOCK(&sm_mutex); \
195 ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \
196 bd->gen->mut_once_list = (StgMutClosure *)p1; \
197 RELEASE_LOCK(&sm_mutex); \
199 ((StgIndOldGen *)p1)->indirectee = p2; \
200 SET_INFO(p1,&stg_IND_OLDGEN_info); \
201 TICK_UPD_OLD_IND(); \
206 /* Static objects all live in the oldest generation
208 #define updateWithStaticIndirection(info, p1, p2) \
210 ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \
212 ACQUIRE_LOCK(&sm_mutex); \
213 ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list; \
214 oldest_gen->mut_once_list = (StgMutClosure *)p1; \
215 RELEASE_LOCK(&sm_mutex); \
217 ((StgInd *)p1)->indirectee = p2; \
218 SET_INFO((StgInd *)p1, &stg_IND_STATIC_info); \
219 TICK_UPD_STATIC_IND(); \
222 #if defined(TICKY_TICKY) || defined(PROFILING)
224 updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2)
229 if (bd->gen->no == 0) {
230 ((StgInd *)p1)->indirectee = p2;
231 SET_INFO(p1,&stg_IND_PERM_info);
232 TICK_UPD_NEW_PERM_IND(p1);
234 ((StgIndOldGen *)p1)->indirectee = p2;
235 if (info != &stg_BLACKHOLE_BQ_info) {
236 ACQUIRE_LOCK(&sm_mutex);
237 ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list;
238 bd->gen->mut_once_list = (StgMutClosure *)p1;
239 RELEASE_LOCK(&sm_mutex);
241 SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
242 TICK_UPD_OLD_PERM_IND();
247 /* -----------------------------------------------------------------------------
248 The CAF table - used to let us revert CAFs
249 -------------------------------------------------------------------------- */
251 #if defined(INTERPRETER)
252 typedef struct StgCAFTabEntry_ {
254 StgInfoTable* origItbl;
257 extern void addToECafTable ( StgClosure* closure, StgInfoTable* origItbl );
258 extern void clearECafTable ( void );
260 extern StgCAF* ecafList;
261 extern StgCAFTabEntry* ecafTable;
262 extern StgInt usedECafTable;
263 extern StgInt sizeECafTable;
267 void printMutOnceList(generation *gen);
268 void printMutableList(generation *gen);
271 /* --------------------------------------------------------------------------
272 Address space layout macros
273 --------------------------------------------------------------------------
275 Here are the assumptions GHC makes about address space layout.
276 Broadly, it thinks there are three sections:
278 CODE Read-only. Contains code and read-only data (such as
282 DATA Read-write data. Contains static closures (and on some
283 architectures, info tables too)
285 HEAP Dynamically-allocated closures
287 USER None of the above. The only way USER things arise right
288 now is when GHCi allocates a constructor info table, which
289 it does by mallocing them.
291 Three macros identify these three areas:
292 IS_CODE(p), IS_DATA(p), HEAP_ALLOCED(p)
294 HEAP_ALLOCED is called FOR EVERY SINGLE CLOSURE during GC.
297 Implementation of HEAP_ALLOCED
298 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
299 Concerning HEAP, most of the time (certainly under [Static] and [GHCi],
300 we ensure that the heap is allocated above some fixed address HEAP_BASE
301 (defined in MBlock.h). In this case we set TEXT_BEFORE_HEAP, and we
302 get a nice fast test.
304 Sometimes we can't be quite sure. For example in Windows, we can't
305 fix where our heap address space comes from. In this case we un-set
306 TEXT_BEFORE_HEAP. That makes it more expensive to test whether a pointer
307 comes from the HEAP section, because we need to look at the allocator's
308 address maps (see HEAP_ALLOCED macro)
310 Implementation of CODE and DATA
311 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
312 Concerning CODE and DATA, there are three main regimes:
314 [Static] Totally The segments are contiguous, and laid out
315 statically linked exactly as above
317 [GHCi] Static, GHCi may load new modules, but it knows the
318 except for GHCi address map, so for any given address it can
319 still tell which section it belongs to
321 [DLL] OS-supported Chunks of CODE and DATA may be mixed in
322 dynamic loading the address space, and we can't tell how
325 For the [Static] case, we assume memory is laid out like this
326 (in order of increasing addresses)
330 TEXT_SECTION_END_MARKER (usually _etext)
332 DATA_SECTION_END_MARKER (usually _end)
337 For the [GHCi] case, we have to consult GHCi's dynamic linker's
338 address maps, which is done by macros
339 is_dynamically_loaded_code_or_rodata_ptr
340 is_dynamically_loaded_code_or_rwdata_ptr
342 For the [DLL] case, IS_CODE and IS_DATA are really not usable at all.
346 #undef TEXT_BEFORE_HEAP
347 #ifndef mingw32_TARGET_OS
348 #define TEXT_BEFORE_HEAP 1
351 extern void* TEXT_SECTION_END_MARKER_DECL;
352 extern void* DATA_SECTION_END_MARKER_DECL;
354 #if defined(INTERPRETER) || defined(GHCI)
355 /* Take into account code sections in dynamically loaded object files. */
356 #define IS_CODE_PTR(p) ( ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER) \
357 || is_dynamically_loaded_code_or_rodata_ptr((char *)p) )
358 #define IS_DATA_PTR(p) ( ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && \
359 (P_)(p) < (P_)&DATA_SECTION_END_MARKER) \
360 || is_dynamically_loaded_rwdata_ptr((char *)p) )
361 #define IS_USER_PTR(p) ( ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) \
362 && is_not_dynamically_loaded_ptr((char *)p) )
364 #define IS_CODE_PTR(p) ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER)
365 #define IS_DATA_PTR(p) ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && (P_)(p) < (P_)&DATA_SECTION_END_MARKER)
366 #define IS_USER_PTR(p) ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER)
369 /* The HEAP_ALLOCED test below is called FOR EVERY SINGLE CLOSURE
370 * during GC. It needs to be FAST.
372 * BEWARE: when we're dynamically loading code (for GHCi), make sure
373 * that we don't load any code above HEAP_BASE, or this test won't work.
375 #ifdef TEXT_BEFORE_HEAP
376 # define HEAP_ALLOCED(x) ((StgPtr)(x) >= (StgPtr)(HEAP_BASE))
378 extern int is_heap_alloced(const void* x);
379 # define HEAP_ALLOCED(x) (is_heap_alloced(x))
383 /* --------------------------------------------------------------------------
384 Macros for distinguishing data pointers from code pointers
385 --------------------------------------------------------------------------
389 The garbage collector needs to make some critical distinctions between pointers.
390 In particular we need
392 LOOKS_LIKE_GHC_INFO(p) p points to an info table
394 For both of these macros, p is
395 *either* a pointer to a closure (static or heap allocated)
396 *or* a return address on the (Haskell) stack
398 (Return addresses are in fact info-pointers, so that the Haskell stack
399 looks very like a chunk of heap.)
401 The garbage collector uses LOOKS_LIKE_GHC_INFO when walking the stack, as it
402 walks over the "pending arguments" on its way to the next return address.
403 It is called moderately often, but not as often as HEAP_ALLOCED
405 ToDo: LOOKS_LIKE_GHC_INFO(p) does not return True when p points to a
406 constructor info table allocated by GHCi. We should really rename
407 LOOKS_LIKE_GHC_INFO to LOOKS_LIKE_GHC_RETURN_INFO.
411 LOOKS_LIKE_GHC_INFO is more complicated because of the need to distinguish
412 between static closures and info tables. It's a known portability problem.
413 We have three approaches:
415 Plan A: Address-space partitioning.
416 Keep info tables in the (single, contiguous) text segment: IS_CODE_PTR(p)
417 and static closures in the (single, contiguous) data segment: IS_DATA_PTR(p)
419 Plan A can fail for two reasons:
420 * In many environments (eg. dynamic loading),
421 text and data aren't in a single contiguous range.
422 * When we compile through vanilla C (no mangling) we sometimes
423 can't guaranteee to put info tables in the text section. This
424 happens eg. on MacOS where the C compiler refuses to put const
425 data in the text section if it has any code pointers in it
426 (which info tables do *only* when we're compiling without
427 TABLES_NEXT_TO_CODE).
429 Hence, Plan B: (compile-via-C-with-mangling, or native code generation)
430 Put a zero word before each static closure.
431 When compiling to native code, or via C-with-mangling, info tables
432 are laid out "backwards" from the address specified in the info pointer
433 (the entry code goes forward from the info pointer). Hence, the word
434 before the one referenced the info pointer is part of the info table,
435 and is guaranteed non-zero.
437 For reasons nobody seems to fully understand, the statically-allocated tables
438 of INTLIKE and CHARLIKE closures can't have this zero word, so we
439 have to test separately for them.
441 Plan B fails altogether for the compile-through-vanilla-C route, because
442 info tables aren't laid out backwards.
445 Hence, Plan C: (unregisterised, compile-through-vanilla-C route only)
446 If we didn't manage to get info tables into the text section, then
447 we can distinguish between a static closure pointer and an info
448 pointer as follows: the first word of an info table is a code pointer,
449 and therefore in text space, whereas the first word of a closure pointer
450 is an info pointer, and therefore not. Shazam!
454 /* When working with Win32 DLLs, static closures are identified by
455 being prefixed with a zero word. This is needed so that we can
456 distinguish between pointers to static closures and (reversed!)
459 This 'scheme' breaks down for closure tables such as CHARLIKE,
460 so we catch these separately.
462 LOOKS_LIKE_STATIC_CLOSURE()
463 - discriminates between static closures and info tbls
464 (needed by LOOKS_LIKE_GHC_INFO() below - [Win32 DLLs only.])
466 - distinguishes between static and heap allocated data.
468 #if defined(ENABLE_WIN32_DLL_SUPPORT) && !defined(INTERPRETER)
469 /* definitely do not enable for mingw DietHEP */
470 #define LOOKS_LIKE_STATIC(r) (!(HEAP_ALLOCED(r)))
472 /* Tiresome predicates needed to check for pointers into the closure tables */
473 #define IS_CHARLIKE_CLOSURE(p) \
474 ( (P_)(p) >= (P_)stg_CHARLIKE_closure && \
475 (char*)(p) <= ((char*)stg_CHARLIKE_closure + \
476 (MAX_CHARLIKE-MIN_CHARLIKE) * sizeof(StgIntCharlikeClosure)) )
477 #define IS_INTLIKE_CLOSURE(p) \
478 ( (P_)(p) >= (P_)stg_INTLIKE_closure && \
479 (char*)(p) <= ((char*)stg_INTLIKE_closure + \
480 (MAX_INTLIKE-MIN_INTLIKE) * sizeof(StgIntCharlikeClosure)) )
482 #define LOOKS_LIKE_STATIC_CLOSURE(r) (((*(((unsigned long *)(r))-1)) == 0) || IS_CHARLIKE_CLOSURE(r) || IS_INTLIKE_CLOSURE(r))
484 #define LOOKS_LIKE_STATIC(r) IS_DATA_PTR(r)
485 #define LOOKS_LIKE_STATIC_CLOSURE(r) IS_DATA_PTR(r)
489 /* -----------------------------------------------------------------------------
490 Macros for distinguishing infotables from closures.
492 You'd think it'd be easy to tell an info pointer from a closure pointer:
493 closures live on the heap and infotables are in read only memory. Right?
494 Wrong! Static closures live in read only memory and Hugs allocates
495 infotables for constructors on the (writable) C heap.
496 -------------------------------------------------------------------------- */
499 # ifdef USE_MINIINTERPRETER
500 /* yoiks: one of the dreaded pointer equality tests */
501 # define IS_HUGS_CONSTR_INFO(info) \
502 (((StgInfoTable *)(info))->entry == (StgFunPtr)&Hugs_CONSTR_entry)
504 # define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */
507 /* not accurate by any means, but stops the assertions failing... */
508 # define IS_HUGS_CONSTR_INFO(info) IS_USER_PTR(info)
510 # define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */
513 /* LOOKS_LIKE_GHC_INFO is called moderately often during GC, but
514 * Certainly not as often as HEAP_ALLOCED.
516 #ifdef TEXT_BEFORE_HEAP /* needed for mingw DietHEP */
517 # define LOOKS_LIKE_GHC_INFO(info) IS_CODE_PTR(info)
519 # define LOOKS_LIKE_GHC_INFO(info) (!HEAP_ALLOCED(info) \
520 && !LOOKS_LIKE_STATIC_CLOSURE(info))
524 /* -----------------------------------------------------------------------------
525 Macros for calculating how big a closure will be (used during allocation)
526 -------------------------------------------------------------------------- */
528 /* ToDo: replace unsigned int by nat. The only fly in the ointment is that
529 * nat comes from Rts.h which many folk dont include. Sigh!
531 static __inline__ StgOffset AP_sizeW ( unsigned int n_args )
532 { return sizeofW(StgAP_UPD) + n_args; }
534 static __inline__ StgOffset PAP_sizeW ( unsigned int n_args )
535 { return sizeofW(StgPAP) + n_args; }
537 static __inline__ StgOffset CONSTR_sizeW( unsigned int p, unsigned int np )
538 { return sizeofW(StgHeader) + p + np; }
540 static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )
541 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
543 static __inline__ StgOffset BLACKHOLE_sizeW ( void )
544 { return sizeofW(StgHeader) + MIN_UPD_SIZE; }
546 /* --------------------------------------------------------------------------
548 * ------------------------------------------------------------------------*/
550 static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
551 { return sizeofW(StgClosure)
552 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
553 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
555 static __inline__ StgOffset pap_sizeW( StgPAP* x )
556 { return PAP_sizeW(x->n_args); }
558 static __inline__ StgOffset arr_words_sizeW( StgArrWords* x )
559 { return sizeofW(StgArrWords) + x->words; }
561 static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
562 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
564 static __inline__ StgWord tso_sizeW ( StgTSO *tso )
565 { return TSO_STRUCT_SIZEW + tso->stack_size; }