/* ---------------------------------------------------------------------------
- * $Id: Sparks.c,v 1.2 2000/03/31 03:09:36 hwloidl Exp $
+ * $Id: Sparks.c,v 1.5 2002/01/24 02:06:49 sof Exp $
*
* (c) The GHC Team, 2000
*
//* GUM code::
//* GranSim code::
//@end menu
+//*/
//@node Includes, GUM code, Spark Management Routines, Spark Management Routines
//@subsection Includes
+#include "PosixSource.h"
#include "Rts.h"
#include "Schedule.h"
#include "SchedAPI.h"
#include "Storage.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
+#include "ParTicky.h"
# if defined(PAR)
# include "ParallelRts.h"
+# include "GranSimRts.h" // for GR_...
# elif defined(GRAN)
# include "GranSimRts.h"
# endif
#include "Sparks.h"
-#if defined(SMP) || defined(PAR)
+#if /*defined(SMP) ||*/ defined(PAR)
//@node GUM code, GranSim code, Includes, Spark Management Routines
//@subsection GUM code
static void slide_spark_pool( StgSparkPool *pool );
-void
+rtsBool
initSparkPools( void )
{
Capability *cap;
pool->hd = pool->base;
pool->tl = pool->base;
}
+ return rtsTrue; /* Qapla' */
}
+/*
+ We traverse the spark pool until we find the 2nd usable (i.e. non-NF)
+ spark. Rationale, we don't want to give away the only work a PE has.
+ ToDo: introduce low- and high-water-marks for load balancing.
+*/
StgClosure *
-findSpark( void )
+findSpark( rtsBool for_export )
{
Capability *cap;
StgSparkPool *pool;
- StgClosure *spark;
+ StgClosure *spark, *first=NULL;
+ rtsBool isIdlePE = EMPTY_RUN_QUEUE();
#ifdef SMP
/* walk over the capabilities, allocating a spark pool for each one */
pool = &(cap->rSparks);
while (pool->hd < pool->tl) {
spark = *pool->hd++;
- if (closure_SHOULD_SPARK(spark))
- return spark;
+ if (closure_SHOULD_SPARK(spark)) {
+ if (for_export && isIdlePE) {
+ if (first==NULL) {
+ first = spark; // keep the first usable spark if PE is idle
+ } else {
+ pool->hd--; // found a second spark; keep it in the pool
+ ASSERT(*pool->hd==spark);
+ if (RtsFlags.ParFlags.ParStats.Sparks)
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_STEALING, ((StgTSO *)NULL), first,
+ 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
+ return first; // and return the *first* spark found
+ }
+ } else {
+ if (RtsFlags.ParFlags.ParStats.Sparks && for_export)
+ DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
+ GR_STEALING, ((StgTSO *)NULL), spark,
+ 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
+ return spark; // return first spark found
+ }
+ }
}
slide_spark_pool(pool);
}
return NULL;
}
+/*
+ activateSpark is defined in Schedule.c
+*/
rtsBool
add_to_spark_queue( StgClosure *closure, StgSparkPool *pool )
{
if (closure_SHOULD_SPARK(closure) &&
pool->tl < pool->lim) {
*(pool->tl++) = closure;
+
+#if defined(PAR)
+ // collect parallel global statistics (currently done together with GC stats)
+ if (RtsFlags.ParFlags.ParStats.Global &&
+ RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+ // fprintf(stderr, "Creating spark for %x @ %11.2f\n", closure, usertime());
+ globalParStats.tot_sparks_created++;
+ }
+#endif
return rtsTrue;
} else {
+#if defined(PAR)
+ // collect parallel global statistics (currently done together with GC stats)
+ if (RtsFlags.ParFlags.ParStats.Global &&
+ RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+ //fprintf(stderr, "Ignoring spark for %x @ %11.2f\n", closure, usertime());
+ globalParStats.tot_sparks_ignored++;
+ }
+#endif
return rtsFalse;
}
}
markSparkQueue( void )
{
StgClosure **sparkp, **to_sparkp;
-#ifdef DEBUG
- nat n, pruned_sparks;
-#endif
+ nat n, pruned_sparks; // stats only
StgSparkPool *pool;
Capability *cap;
+ PAR_TICKY_MARK_SPARK_QUEUE_START();
+
#ifdef SMP
/* walk over the capabilities, allocating a spark pool for each one */
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
{
#endif
pool = &(cap->rSparks);
-
-#ifdef DEBUG
+
+#if defined(PAR)
+ // stats only
n = 0;
pruned_sparks = 0;
#endif
if (closure_SHOULD_SPARK(*sparkp)) {
*to_sparkp = MarkRoot(*sparkp);
to_sparkp++;
-#ifdef DEBUG
+#ifdef PAR
n++;
#endif
} else {
-#ifdef DEBUG
+#ifdef PAR
pruned_sparks++;
#endif
}
pool->hd = pool->base;
pool->tl = to_sparkp;
+ PAR_TICKY_MARK_SPARK_QUEUE_END(n);
+
#if defined(SMP)
IF_DEBUG(scheduler,
belch("markSparkQueue: marked %d sparks and pruned %d sparks on [%x]",
IF_GRAN_DEBUG(pri,
belch("++ No high priority spark available; low priority (%u) spark chosen: node=%p; name=%u\n",
spark->gran_info,
- spark->node, spark->name);)
+ spark->node, spark->name));
}
CurrentTime[proc] += RtsFlags.GranFlags.Costs.threadcreatetime;
FindWork,
(StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
barf("//// activateSpark: out of heap ; ToDo: call GarbageCollect()");
- GarbageCollect(GetRoots);
+ GarbageCollect(GetRoots, rtsFalse);
// HWL old: ReallyPerformThreadGC(TSO_HS+TSO_CTS_SIZE,rtsFalse);
// HWL old: SAVE_Hp -= TSO_HS+TSO_CTS_SIZE;
spark = NULL;