X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSparks.c;h=0ff4ee4cceaef9227b227e437775ee542d4351d0;hb=e4417dcd4679da9c6b18c02ff667199c572bed89;hp=615d832e33e8de18e45e095030da158dc419d843;hpb=0065d5ab628975892cea1ec7303f968c3338cbe1;p=ghc-hetmet.git diff --git a/rts/Sparks.c b/rts/Sparks.c index 615d832..0ff4ee4 100644 --- a/rts/Sparks.c +++ b/rts/Sparks.c @@ -8,9 +8,9 @@ #include "PosixSource.h" #include "Rts.h" +#include "Storage.h" #include "Schedule.h" #include "SchedAPI.h" -#include "Storage.h" #include "RtsFlags.h" #include "RtsUtils.h" #include "ParTicky.h" @@ -21,6 +21,7 @@ # include "GranSimRts.h" # endif #include "Sparks.h" +#include "Trace.h" #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) @@ -62,6 +63,11 @@ initSparkPools( void ) #endif } +void +freeSparkPool(StgSparkPool *pool) { + stgFree(pool->base); +} + /* ----------------------------------------------------------------------------- * * findSpark: find a spark on the current Capability that we can fork @@ -128,13 +134,15 @@ markSparkQueue (evac_fn evac) sparkp = pool->hd; to_sparkp = pool->hd; while (sparkp != pool->tl) { - ASSERT(to_sparkp<=sparkp); ASSERT(*sparkp!=NULL); ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgClosure *)*sparkp))); // ToDo?: statistics gathering here (also for GUM!) if (closure_SHOULD_SPARK(*sparkp)) { evac(sparkp); *to_sparkp++ = *sparkp; + if (to_sparkp == pool->lim) { + to_sparkp = pool->base; + } n++; } else { pruned_sparks++; @@ -149,19 +157,18 @@ markSparkQueue (evac_fn evac) PAR_TICKY_MARK_SPARK_QUEUE_END(n); #if defined(PARALLEL_HASKELL) - IF_DEBUG(scheduler, - debugBelch("markSparkQueue: marked %d sparks and pruned %d sparks on [%x]", - n, pruned_sparks, mytid)); + debugTrace(DEBUG_sched, + "marked %d sparks and pruned %d sparks on [%x]", + n, pruned_sparks, mytid); #else - IF_DEBUG(scheduler, - debugBelch("markSparkQueue: marked %d sparks and pruned %d sparks\n", - n, pruned_sparks)); + debugTrace(DEBUG_sched, + "marked %d sparks and pruned %d sparks", + n, pruned_sparks); #endif - IF_DEBUG(scheduler, - debugBelch("markSparkQueue: new spark queue len=%d; (hd=%p; tl=%p)\n", - sparkPoolSize(pool), pool->hd, pool->tl)); - + debugTrace(DEBUG_sched, + "new spark queue len=%d; (hd=%p; tl=%p)\n", + sparkPoolSize(pool), pool->hd, pool->tl); } } @@ -193,6 +200,12 @@ newSpark (StgRegTable *reg, StgClosure *p) { StgSparkPool *pool = &(reg->rSparks); + /* I am not sure whether this is the right thing to do. + * Maybe it is better to exploit the tag information + * instead of throwing it away? + */ + p = UNTAG_CLOSURE(p); + ASSERT_SPARK_POOL_INVARIANTS(pool); if (closure_SHOULD_SPARK(p)) { @@ -825,8 +838,9 @@ markSparkQueue(void) // ToDo?: statistics gathering here (also for GUM!) sp->node = (StgClosure *)MarkRoot(sp->node); } + IF_DEBUG(gc, - debugBelch("@@ markSparkQueue: spark statistics at start of GC:"); + debugBelch("markSparkQueue: spark statistics at start of GC:"); print_sparkq_stats()); }