"evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
evac(user, (StgClosure **)(void *)&task->suspended_tso);
}
-
-#if defined(THREADED_RTS)
- markSparkQueue (evac, user, cap);
-#endif
}
#if !defined(THREADED_RTS)
#endif
}
+// Sparks are not roots for GC, so we don't mark them in
+// markSomeCapabilities(). Instead, we traverse the spark queues
+// after GC and throw away any that are unreachable.
+void
+updateCapabilitiesPostGC (void)
+{
+#if defined(THREADED_RTS)
+ nat i;
+ for (i = 0; i < n_capabilities; i++) {
+ updateSparkQueue (&capabilities[i]);
+ }
+#endif // THREADED_RTS
+}
+
+// This function is used by the compacting GC to thread all the
+// pointers from spark queues.
+void
+traverseSparkQueues (evac_fn evac USED_IF_THREADS, void *user USED_IF_THREADS)
+{
+#if defined(THREADED_RTS)
+ nat i;
+ for (i = 0; i < n_capabilities; i++) {
+ traverseSparkQueue (evac, user, &capabilities[i]);
+ }
+#endif // THREADED_RTS
+
+}
+
void
markCapabilities (evac_fn evac, void *user)
{
// FOr the GC:
void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta);
void markCapabilities (evac_fn evac, void *user);
+void updateCapabilitiesPostGC (void);
+void traverseSparkQueues (evac_fn evac, void *user);
/* -----------------------------------------------------------------------------
* INLINE functions... private below here
* -------------------------------------------------------------------------- */
void
-markSparkQueue (evac_fn evac, void *user, Capability *cap)
+updateSparkQueue (Capability *cap)
{
- StgClosure **sparkp, **to_sparkp;
+ StgClosure *spark, **sparkp, **to_sparkp;
nat n, pruned_sparks; // stats only
StgSparkPool *pool;
ASSERT_SPARK_POOL_INVARIANTS(pool);
-#if defined(PARALLEL_HASKELL)
- // stats only
- n = 0;
- pruned_sparks = 0;
-#endif
-
sparkp = pool->hd;
to_sparkp = pool->hd;
while (sparkp != pool->tl) {
ASSERT(*sparkp!=NULL);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgClosure *)*sparkp)));
// ToDo?: statistics gathering here (also for GUM!)
- if (closure_SHOULD_SPARK(*sparkp)) {
- evac(user, sparkp);
- *to_sparkp++ = *sparkp;
+ spark = isAlive(*sparkp);
+ if (spark != NULL && closure_SHOULD_SPARK(spark)) {
+ *to_sparkp++ = spark;
if (to_sparkp == pool->lim) {
to_sparkp = pool->base;
}
PAR_TICKY_MARK_SPARK_QUEUE_END(n);
-#if defined(PARALLEL_HASKELL)
debugTrace(DEBUG_sched,
- "marked %d sparks and pruned %d sparks on [%x]",
- n, pruned_sparks, mytid);
-#else
- debugTrace(DEBUG_sched,
- "marked %d sparks and pruned %d sparks",
+ "updated %d sparks and pruned %d sparks",
n, pruned_sparks);
-#endif
debugTrace(DEBUG_sched,
"new spark queue len=%d; (hd=%p; tl=%p)\n",
sparkPoolSize(pool), pool->hd, pool->tl);
}
+void
+traverseSparkQueue (evac_fn evac, void *user, Capability *cap)
+{
+ StgClosure **sparkp;
+ StgSparkPool *pool;
+
+ pool = &(cap->r.rSparks);
+ sparkp = pool->hd;
+ while (sparkp != pool->tl) {
+ evac(sparkp, user);
+ sparkp++;
+ if (sparkp == pool->lim) {
+ sparkp = pool->base;
+ }
+ }
+}
+
#else
StgInt
#ifndef SPARKS_H
#define SPARKS_H
-#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
+#if defined(THREADED_RTS)
StgClosure * findSpark (Capability *cap);
void initSparkPools (void);
void freeSparkPool (StgSparkPool *pool);
void createSparkThread (Capability *cap, StgClosure *p);
-void markSparkQueue (evac_fn evac, void *user, Capability *cap);
+void updateSparkQueue (Capability *cap);
+void traverseSparkQueue(evac_fn evac, void *user, Capability *cap);
INLINE_HEADER void discardSparks (StgSparkPool *pool);
INLINE_HEADER nat sparkPoolSize (StgSparkPool *pool);
INLINE_HEADER rtsBool emptySparkPoolCap (Capability *cap);
#endif
-#if defined(PARALLEL_HASKELL)
-StgTSO *activateSpark (rtsSpark spark) ;
-rtsBool add_to_spark_queue( StgClosure *closure, StgSparkPool *pool );
-void markSparkQueue( void );
-nat spark_queue_len( StgSparkPool *pool );
-void disposeSpark( StgClosure *spark );
-#endif
-
-#if defined(GRAN)
-void findLocalSpark (rtsEvent *event, rtsBool *found_res, rtsSparkQ *spark_res);
-rtsBool activateSpark (rtsEvent *event, rtsSparkQ spark);
-rtsSpark *newSpark(StgClosure *node, nat name, nat gran_info,
- nat size_info, nat par_info, nat local);
-void add_to_spark_queue(rtsSpark *spark);
-rtsSpark *delete_from_sparkq (rtsSpark *spark, PEs p, rtsBool dispose_too);
-void disposeSpark(rtsSpark *spark);
-void disposeSparkQ(rtsSparkQ spark);
-void print_spark(rtsSpark *spark);
-void print_sparkq(PEs proc);
-void print_sparkq_stats(void);
-nat spark_queue_len(PEs proc);
-void markSparkQueue(void);
-#endif
-
/* -----------------------------------------------------------------------------
* PRIVATE below here
* -------------------------------------------------------------------------- */
// 1. thread the roots
markCapabilities((evac_fn)thread_root, NULL);
+ // spark queues
+ traverseSparkQueues((evac_fn)thread_root, NULL);
+
// the weak pointer lists...
if (weak_ptr_list != NULL) {
thread((void *)&weak_ptr_list);
#include "Trace.h"
#include "RetainerProfile.h"
#include "RaiseAsync.h"
-#include "Sparks.h"
#include "Papi.h"
#include "GC.h"
// Update pointers from the Task list
update_task_list();
+ // Update pointers from capabilities (probably just the spark queues)
+ updateCapabilitiesPostGC();
+
// Now see which stable names are still alive.
gcStablePtrTable();