[project @ 2001-12-12 14:31:42 by simonmar]
[ghc-hetmet.git] / ghc / rts / Storage.h
index 7d9815a..f746d1f 100644 (file)
@@ -1,5 +1,5 @@
 /* -----------------------------------------------------------------------------
- * $Id: Storage.h,v 1.28 2001/02/09 13:09:16 simonmar Exp $
+ * $Id: Storage.h,v 1.37 2001/11/22 14:25:12 simonmar Exp $
  *
  * (c) The GHC Team, 1998-1999
  *
@@ -13,6 +13,9 @@
 #include "Block.h"
 #include "BlockAlloc.h"
 #include "StoragePriv.h"
+#ifdef PROFILING
+#include "LdvProfile.h"
+#endif
 
 /* -----------------------------------------------------------------------------
    Initialisation / De-initialisation
@@ -24,12 +27,24 @@ extern void exitStorage(void);
 /* -----------------------------------------------------------------------------
    Generic allocation
 
-   StgPtr allocate(int n)       Allocates a chunk of contiguous store
+   StgPtr allocate(nat n)       Allocates a chunk of contiguous store
                                n words long, returning a pointer to
                                the first word.  Always succeeds.
                                
+   StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
+                               n words long, which is at a fixed
+                               address (won't be moved by GC).  
+                               Returns a pointer to the first word.
+                               Always succeeds.
+                               
+                               NOTE: the GC can't in general handle
+                               pinned objects, so allocatePinned()
+                               can only be used for ByteArrays at the
+                               moment.
+
                                Don't forget to TICK_ALLOC_XXX(...)
-                               after calling allocate, for the
+                               after calling allocate or
+                               allocatePinned, for the
                                benefit of the ticky-ticky profiler.
 
    rtsBool doYouWantToGC(void)  Returns True if the storage manager is
@@ -43,12 +58,15 @@ extern void exitStorage(void);
    surrounded by a mutex.
    -------------------------------------------------------------------------- */
 
-extern StgPtr  allocate(nat n);
-static inline rtsBool doYouWantToGC(void)
+extern StgPtr  allocate        ( nat n );
+extern StgPtr  allocatePinned  ( nat n );
+extern lnat    allocated_bytes ( void );
+
+static inline rtsBool
+doYouWantToGC( void )
 {
   return (alloc_blocks >= alloc_blocks_lim);
 }
-extern lnat allocated_bytes(void);
 
 /* -----------------------------------------------------------------------------
    ExtendNursery(hp,hplim)      When hplim is reached, try to grab
@@ -78,13 +96,7 @@ extern void PleaseStopAllocating(void);
    MarkRoot(StgClosure *p)     Returns the new location of the root.
    -------------------------------------------------------------------------- */
 
-extern void   GarbageCollect(void (*get_roots)(void),rtsBool force_major_gc);
-extern StgClosure *MarkRoot(StgClosure *p);
-
-/* Temporary measure to ensure we retain all the dynamically-loaded CAFs */
-#ifdef GHCI
-extern void markCafs( void );
-#endif
+extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
 
 /* -----------------------------------------------------------------------------
    Generational garbage collection support
@@ -125,9 +137,9 @@ recordMutable(StgMutClosure *p)
 #endif
 
   bd = Bdescr((P_)p);
-  if (bd->gen->no > 0) {
-    p->mut_link = bd->gen->mut_list;
-    bd->gen->mut_list = p;
+  if (bd->gen_no > 0) {
+    p->mut_link = generations[bd->gen_no].mut_list;
+    generations[bd->gen_no].mut_list = p;
   }
 }
 
@@ -137,19 +149,22 @@ recordOldToNewPtrs(StgMutClosure *p)
   bdescr *bd;
   
   bd = Bdescr((P_)p);
-  if (bd->gen->no > 0) {
-    p->mut_link = bd->gen->mut_once_list;
-    bd->gen->mut_once_list = p;
+  if (bd->gen_no > 0) {
+    p->mut_link = generations[bd->gen_no].mut_once_list;
+    generations[bd->gen_no].mut_once_list = p;
   }
 }
 
-#ifndef DEBUG
+// @LDV profiling
+// We zero out the slop when PROFILING is on.
+// #ifndef DEBUG
+#if !defined(DEBUG) && !defined(PROFILING)
 #define updateWithIndirection(info, p1, p2)                            \
   {                                                                    \
     bdescr *bd;                                                                \
                                                                        \
     bd = Bdescr((P_)p1);                                               \
-    if (bd->gen->no == 0) {                                            \
+    if (bd->gen_no == 0) {                                             \
       ((StgInd *)p1)->indirectee = p2;                                 \
       SET_INFO(p1,&stg_IND_info);                                      \
       TICK_UPD_NEW_IND();                                              \
@@ -157,43 +172,84 @@ recordOldToNewPtrs(StgMutClosure *p)
       ((StgIndOldGen *)p1)->indirectee = p2;                           \
       if (info != &stg_BLACKHOLE_BQ_info) {                            \
         ACQUIRE_LOCK(&sm_mutex);                                       \
-        ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list;       \
-        bd->gen->mut_once_list = (StgMutClosure *)p1;                  \
+        ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;        \
+        generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;                   \
         RELEASE_LOCK(&sm_mutex);                                       \
       }                                                                        \
       SET_INFO(p1,&stg_IND_OLDGEN_info);                               \
       TICK_UPD_OLD_IND();                                              \
     }                                                                  \
   }
+#elif defined(PROFILING)
+// @LDV profiling
+// We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in 
+// which p1 resides.
+//
+// Note: 
+//   After all, we do *NOT* need to call LDV_recordCreate() for both IND and 
+//   IND_OLDGEN closures because they are inherently used. But, it corrupts
+//   the invariants that every closure keeps its creation time in the profiling
+//   field. So, we call LDV_recordCreate().
+
+#define updateWithIndirection(info, p1, p2)                            \
+  {                                                                    \
+    bdescr *bd;                                                                \
+                                                                       \
+    LDV_recordDead_FILL_SLOP_DYNAMIC((p1));                             \
+    bd = Bdescr((P_)p1);                                               \
+    if (bd->gen_no == 0) {                                             \
+      ((StgInd *)p1)->indirectee = p2;                                 \
+      SET_INFO(p1,&stg_IND_info);                                      \
+      LDV_recordCreate((p1));                                           \
+      TICK_UPD_NEW_IND();                                              \
+    } else {                                                           \
+      ((StgIndOldGen *)p1)->indirectee = p2;                           \
+      if (info != &stg_BLACKHOLE_BQ_info) {                            \
+        ACQUIRE_LOCK(&sm_mutex);                                       \
+        ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;        \
+        generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;    \
+        RELEASE_LOCK(&sm_mutex);                                       \
+      }                                                                        \
+      SET_INFO(p1,&stg_IND_OLDGEN_info);                               \
+      LDV_recordCreate((p1));                                           \
+    }                                                                  \
+  }
+
 #else
 
 /* In the DEBUG case, we also zero out the slop of the old closure,
  * so that the sanity checker can tell where the next closure is.
+ *
+ * Two important invariants: we should never try to update a closure
+ * to point to itself, and the closure being updated should not
+ * already have been updated (the mutable list will get messed up
+ * otherwise).
  */
 #define updateWithIndirection(info, p1, p2)                            \
   {                                                                    \
     bdescr *bd;                                                                \
                                                                        \
+    ASSERT( p1 != p2 && !closure_IND(p1) );                            \
     bd = Bdescr((P_)p1);                                               \
-    if (bd->gen->no == 0) {                                            \
+    if (bd->gen_no == 0) {                                             \
       ((StgInd *)p1)->indirectee = p2;                                 \
       SET_INFO(p1,&stg_IND_info);                                      \
       TICK_UPD_NEW_IND();                                              \
     } else {                                                           \
       if (info != &stg_BLACKHOLE_BQ_info) {                            \
-       {                                                               \
+       {                                                               \
           StgInfoTable *inf = get_itbl(p1);                            \
          nat np = inf->layout.payload.ptrs,                            \
              nw = inf->layout.payload.nptrs, i;                        \
-          if (inf->type != THUNK_SELECTOR) {                            \
+          if (inf->type != THUNK_SELECTOR) {                           \
              for (i = np; i < np + nw; i++) {                          \
                ((StgClosure *)p1)->payload[i] = 0;                     \
-             }                                                          \
+             }                                                         \
           }                                                            \
         }                                                              \
         ACQUIRE_LOCK(&sm_mutex);                                       \
-        ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list;       \
-        bd->gen->mut_once_list = (StgMutClosure *)p1;                  \
+        ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;        \
+        generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;                   \
         RELEASE_LOCK(&sm_mutex);                                       \
       }                                                                        \
       ((StgIndOldGen *)p1)->indirectee = p2;                           \
@@ -207,6 +263,7 @@ recordOldToNewPtrs(StgMutClosure *p)
  */
 #define updateWithStaticIndirection(info, p1, p2)                      \
   {                                                                    \
+    ASSERT( p1 != p2 && !closure_IND(p1) );                            \
     ASSERT( ((StgMutClosure*)p1)->mut_link == NULL );                  \
                                                                        \
     ACQUIRE_LOCK(&sm_mutex);                                           \
@@ -225,20 +282,31 @@ updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *
 {
   bdescr *bd;
 
+  ASSERT( p1 != p2 && !closure_IND(p1) );
+
+  // @LDV profiling
+  // Destroy the old closure.
+  LDV_recordDead_FILL_SLOP_DYNAMIC(p1);
   bd = Bdescr((P_)p1);
-  if (bd->gen->no == 0) {
+  if (bd->gen_no == 0) {
     ((StgInd *)p1)->indirectee = p2;
     SET_INFO(p1,&stg_IND_PERM_info);
+    // @LDV profiling
+    // We have just created a new closure.
+    LDV_recordCreate(p1);
     TICK_UPD_NEW_PERM_IND(p1);
   } else {
     ((StgIndOldGen *)p1)->indirectee = p2;
     if (info != &stg_BLACKHOLE_BQ_info) {
       ACQUIRE_LOCK(&sm_mutex);
-      ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list;
-      bd->gen->mut_once_list = (StgMutClosure *)p1;
+      ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
+      generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
       RELEASE_LOCK(&sm_mutex);
     }
     SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
+    // @LDV profiling
+    // We have just created a new closure.
+    LDV_recordCreate(p1);
     TICK_UPD_OLD_PERM_IND();
   }
 }
@@ -248,10 +316,12 @@ updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *
    The CAF table - used to let us revert CAFs
    -------------------------------------------------------------------------- */
 
+void revertCAFs( void );
+
 #if defined(DEBUG)
 void printMutOnceList(generation *gen);
 void printMutableList(generation *gen);
-#endif DEBUG
+#endif /* DEBUG */
 
 /* --------------------------------------------------------------------------
                       Address space layout macros
@@ -336,7 +406,6 @@ void printMutableList(generation *gen);
 extern void* TEXT_SECTION_END_MARKER_DECL;
 extern void* DATA_SECTION_END_MARKER_DECL;
 
-#if defined(GHCI)
 /* Take into account code sections in dynamically loaded object files. */
 #define IS_CODE_PTR(p) (  ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER) \
                        || is_dynamically_loaded_code_or_rodata_ptr((char *)p) )
@@ -345,11 +414,6 @@ extern void* DATA_SECTION_END_MARKER_DECL;
                        || is_dynamically_loaded_rwdata_ptr((char *)p) )
 #define IS_USER_PTR(p) ( ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) \
                        && is_not_dynamically_loaded_ptr((char *)p) )
-#else
-#define IS_CODE_PTR(p) ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER)
-#define IS_DATA_PTR(p) ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && (P_)(p) < (P_)&DATA_SECTION_END_MARKER)
-#define IS_USER_PTR(p) ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER)
-#endif
 
 /* The HEAP_ALLOCED test below is called FOR EVERY SINGLE CLOSURE
  * during GC.  It needs to be FAST.
@@ -480,12 +544,9 @@ extern int is_heap_alloced(const void* x);
    infotables for constructors on the (writable) C heap.
    -------------------------------------------------------------------------- */
 
-#ifdef GHCI
-   /* not accurate by any means, but stops the assertions failing... */
-#  define IS_HUGS_CONSTR_INFO(info)  IS_USER_PTR(info)
-#else
-#  define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */
-#endif
+/* not accurate by any means, but stops the assertions failing... */
+/* TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO */
+#define IS_HUGS_CONSTR_INFO(info)  IS_USER_PTR(info)
 
 /* LOOKS_LIKE_GHC_INFO is called moderately often during GC, but
  * Certainly not as often as HEAP_ALLOCED.
@@ -502,16 +563,13 @@ extern int is_heap_alloced(const void* x);
    Macros for calculating how big a closure will be (used during allocation)
    -------------------------------------------------------------------------- */
 
-/* ToDo: replace unsigned int by nat.  The only fly in the ointment is that
- * nat comes from Rts.h which many folk dont include.  Sigh!
- */
-static __inline__ StgOffset AP_sizeW    ( unsigned int n_args )              
+static __inline__ StgOffset AP_sizeW    ( nat n_args )              
 { return sizeofW(StgAP_UPD) + n_args; }
 
-static __inline__ StgOffset PAP_sizeW   ( unsigned int n_args )              
+static __inline__ StgOffset PAP_sizeW   ( nat n_args )              
 { return sizeofW(StgPAP)    + n_args; }
 
-static __inline__ StgOffset CONSTR_sizeW( unsigned int p, unsigned int np )  
+static __inline__ StgOffset CONSTR_sizeW( nat p, nat np )  
 { return sizeofW(StgHeader) + p + np; }
 
 static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )                    
@@ -541,5 +599,5 @@ static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
 static __inline__ StgWord tso_sizeW ( StgTSO *tso )
 { return TSO_STRUCT_SIZEW + tso->stack_size; }
 
-#endif STORAGE_H
+#endif /* STORAGE_H */