Allow "INLINEABLE" as a synonym
[ghc-hetmet.git] / rts / sm / Sanity.c
index 442fee1..dfa9865 100644 (file)
@@ -26,6 +26,7 @@
 #include "Apply.h"
 #include "Printer.h"
 #include "Arena.h"
+#include "RetainerProfile.h"
 
 /* -----------------------------------------------------------------------------
    Forward decls.
@@ -303,11 +304,9 @@ checkClosure( StgClosure* p )
     case CONSTR_0_2:
     case CONSTR_2_0:
     case IND_PERM:
-    case IND_OLDGEN:
-    case IND_OLDGEN_PERM:
     case BLACKHOLE:
-    case CAF_BLACKHOLE:
-    case STABLE_NAME:
+    case PRIM:
+    case MUT_PRIM:
     case MUT_VAR_CLEAN:
     case MUT_VAR_DIRTY:
     case CONSTR_STATIC:
@@ -322,6 +321,24 @@ checkClosure( StgClosure* p )
            return sizeW_fromITBL(info);
        }
 
+    case BLOCKING_QUEUE:
+    {
+        StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+        // NO: the BH might have been updated now
+        // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
+        ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));
+
+        ASSERT(get_itbl(bq->owner)->type == TSO);
+        ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE 
+               || get_itbl(bq->queue)->type == TSO);
+        ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE || 
+               get_itbl(bq->link)->type == IND ||
+               get_itbl(bq->link)->type == BLOCKING_QUEUE);
+
+        return sizeofW(StgBlockingQueue);
+    }
+
     case BCO: {
        StgBCO *bco = (StgBCO *)p;
        ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
@@ -416,39 +433,6 @@ checkClosure( StgClosure* p )
         checkTSO((StgTSO *)p);
         return tso_sizeW((StgTSO *)p);
 
-    case TVAR_WATCH_QUEUE:
-      {
-        StgTVarWatchQueue *wq = (StgTVarWatchQueue *)p;
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->next_queue_entry));
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(wq->prev_queue_entry));
-        return sizeofW(StgTVarWatchQueue);
-      }
-
-    case INVARIANT_CHECK_QUEUE:
-      {
-        StgInvariantCheckQueue *q = (StgInvariantCheckQueue *)p;
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->invariant));
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->my_execution));
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(q->next_queue_entry));
-        return sizeofW(StgInvariantCheckQueue);
-      }
-
-    case ATOMIC_INVARIANT:
-      {
-        StgAtomicInvariant *invariant = (StgAtomicInvariant *)p;
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->code));
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(invariant->last_execution));
-        return sizeofW(StgAtomicInvariant);
-      }
-
-    case TVAR:
-      {
-        StgTVar *tv = (StgTVar *)p;
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->current_value));
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(tv->first_watch_queue_entry));
-        return sizeofW(StgTVar);
-      }
-
     case TREC_CHUNK:
       {
         nat i;
@@ -461,14 +445,6 @@ checkClosure( StgClosure* p )
         }
         return sizeofW(StgTRecChunk);
       }
-
-    case TREC_HEADER:
-      {
-        StgTRecHeader *trec = (StgTRecHeader *)p;
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> enclosing_trec));
-        ASSERT(LOOKS_LIKE_CLOSURE_PTR(trec -> current_chunk));
-        return sizeofW(StgTRecHeader);
-      }
       
     default:
            barf("checkClosure (closure type %d)", info->type);
@@ -497,16 +473,18 @@ checkHeap(bdescr *bd)
 #endif
 
     for (; bd != NULL; bd = bd->link) {
-       p = bd->start;
-       while (p < bd->free) {
-           nat size = checkClosure((StgClosure *)p);
-           /* This is the smallest size of closure that can live in the heap */
-           ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
-           p += size;
+        if(!(bd->flags & BF_SWEPT)) {
+            p = bd->start;
+            while (p < bd->free) {
+                nat size = checkClosure((StgClosure *)p);
+                /* This is the smallest size of closure that can live in the heap */
+                ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
+                p += size;
            
-           /* skip over slop */
-           while (p < bd->free &&
-                  (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; } 
+                /* skip over slop */
+                while (p < bd->free &&
+                       (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
+            }
        }
     }
 }
@@ -556,6 +534,13 @@ checkTSO(StgTSO *tso)
       return;
     }
 
+    ASSERT(tso->_link == END_TSO_QUEUE || 
+           tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info ||
+           tso->_link->header.info == &stg_TSO_info);
+    ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure));
+    ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq));
+    ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions));
+
     ASSERT(stack <= sp && sp < stack_end);
 
     checkStackChunk(sp, stack_end);
@@ -579,9 +564,7 @@ checkGlobalTSOList (rtsBool checkTSOs)
           if (checkTSOs)
               checkTSO(tso);
 
-          while (tso->what_next == ThreadRelocated) {
-              tso = tso->_link;
-          }
+          tso = deRefTSO(tso);
 
           // If this TSO is dirty and in an old generation, it better
           // be on the mutable list.
@@ -766,6 +749,18 @@ findMemoryLeak (void)
   reportUnmarkedBlocks();
 }
 
+void
+checkRunQueue(Capability *cap)
+{
+    StgTSO *prev, *tso;
+    prev = END_TSO_QUEUE;
+    for (tso = cap->run_queue_hd; tso != END_TSO_QUEUE; 
+         prev = tso, tso = tso->_link) {
+        ASSERT(prev == END_TSO_QUEUE || prev->_link == tso);
+        ASSERT(tso->block_info.prev == prev);
+    }
+    ASSERT(cap->run_queue_tl == prev);
+}
 
 /* -----------------------------------------------------------------------------
    Memory leak detection