+/* -------------------------------------------------------------------------
+ This is the main point where handling granularity information comes into
+ play.
+ ------------------------------------------------------------------------- */
+
+#define MAX_RAND_PRI 100
+
+/*
+ Granularity info transformers.
+ Applied to the GRAN_INFO field of a spark.
+*/
+STATIC_INLINE nat ID(nat x) { return(x); };
+STATIC_INLINE nat INV(nat x) { return(-x); };
+STATIC_INLINE nat IGNORE(nat x) { return (0); };
+STATIC_INLINE nat RAND(nat x) { return ((random() % MAX_RAND_PRI) + 1); }
+
+/* NB: size_info and par_info are currently unused (what a shame!) -- HWL */
+rtsSpark *
+newSpark(node,name,gran_info,size_info,par_info,local)
+StgClosure *node;
+nat name, gran_info, size_info, par_info, local;
+{
+ nat pri;
+ rtsSpark *newspark;
+
+ pri = RtsFlags.GranFlags.RandomPriorities ? RAND(gran_info) :
+ RtsFlags.GranFlags.InversePriorities ? INV(gran_info) :
+ RtsFlags.GranFlags.IgnorePriorities ? IGNORE(gran_info) :
+ ID(gran_info);
+
+ if ( RtsFlags.GranFlags.SparkPriority!=0 &&
+ pri<RtsFlags.GranFlags.SparkPriority ) {
+ IF_GRAN_DEBUG(pri,
+ debugBelch(",, NewSpark: Ignoring spark of priority %u (SparkPriority=%u); node=%#x; name=%u\n",
+ pri, RtsFlags.GranFlags.SparkPriority, node, name));
+ return ((rtsSpark*)NULL);
+ }
+
+ newspark = (rtsSpark*) stgMallocBytes(sizeof(rtsSpark), "NewSpark");
+ newspark->prev = newspark->next = (rtsSpark*)NULL;
+ newspark->node = node;
+ newspark->name = (name==1) ? CurrentTSO->gran.sparkname : name;
+ newspark->gran_info = pri;
+ newspark->global = !local; /* Check that with parAt, parAtAbs !!*/
+
+ if (RtsFlags.GranFlags.GranSimStats.Global) {
+ globalGranStats.tot_sparks_created++;
+ globalGranStats.sparks_created_on_PE[CurrentProc]++;
+ }
+
+ return(newspark);
+}
+
+void
+disposeSpark(spark)
+rtsSpark *spark;
+{
+ ASSERT(spark!=NULL);
+ stgFree(spark);
+}
+
+void
+disposeSparkQ(spark)
+rtsSparkQ spark;
+{
+ if (spark==NULL)
+ return;
+
+ disposeSparkQ(spark->next);
+
+# ifdef GRAN_CHECK
+ if (SparksAvail < 0) {
+ debugBelch("disposeSparkQ: SparksAvail<0 after disposing sparkq @ %p\n", &spark);
+ print_spark(spark);
+ }
+# endif
+
+ stgFree(spark);
+}
+
+/*
+ With PrioritySparking add_to_spark_queue performs an insert sort to keep
+ the spark queue sorted. Otherwise the spark is just added to the end of
+ the queue.
+*/
+
+void
+add_to_spark_queue(spark)
+rtsSpark *spark;
+{
+ rtsSpark *prev = NULL, *next = NULL;
+ nat count = 0;
+ rtsBool found = rtsFalse;
+
+ if ( spark == (rtsSpark *)NULL ) {
+ return;
+ }
+
+ if (RtsFlags.GranFlags.DoPrioritySparking && (spark->gran_info != 0) ) {
+ /* Priority sparking is enabled i.e. spark queues must be sorted */
+
+ for (prev = NULL, next = pending_sparks_hd, count=0;
+ (next != NULL) &&
+ !(found = (spark->gran_info >= next->gran_info));
+ prev = next, next = next->next, count++)
+ {}
+
+ } else { /* 'utQo' */
+ /* Priority sparking is disabled */
+
+ found = rtsFalse; /* to add it at the end */
+
+ }
+
+ if (found) {
+ /* next points to the first spark with a gran_info smaller than that
+ of spark; therefore, add spark before next into the spark queue */
+ spark->next = next;
+ if ( next == NULL ) {
+ pending_sparks_tl = spark;
+ } else {
+ next->prev = spark;
+ }
+ spark->prev = prev;
+ if ( prev == NULL ) {
+ pending_sparks_hd = spark;
+ } else {
+ prev->next = spark;
+ }
+ } else { /* (RtsFlags.GranFlags.DoPrioritySparking && !found) || !DoPrioritySparking */
+ /* add the spark at the end of the spark queue */
+ spark->next = NULL;
+ spark->prev = pending_sparks_tl;
+ if (pending_sparks_hd == NULL)
+ pending_sparks_hd = spark;
+ else
+ pending_sparks_tl->next = spark;
+ pending_sparks_tl = spark;
+ }
+ ++SparksAvail;
+
+ /* add costs for search in priority sparking */
+ if (RtsFlags.GranFlags.DoPrioritySparking) {
+ CurrentTime[CurrentProc] += count * RtsFlags.GranFlags.Costs.pri_spark_overhead;
+ }
+
+ IF_GRAN_DEBUG(checkSparkQ,
+ debugBelch("++ Spark stats after adding spark %p (node %p) to queue on PE %d",
+ spark, spark->node, CurrentProc);
+ print_sparkq_stats());
+
+# if defined(GRAN_CHECK)
+ if (RtsFlags.GranFlags.Debug.checkSparkQ) {
+ for (prev = NULL, next = pending_sparks_hd;
+ (next != NULL);
+ prev = next, next = next->next)
+ {}
+ if ( (prev!=NULL) && (prev!=pending_sparks_tl) )
+ debugBelch("SparkQ inconsistency after adding spark %p: (PE %u) pending_sparks_tl (%p) not end of queue (%p)\n",
+ spark,CurrentProc,
+ pending_sparks_tl, prev);
+ }
+# endif
+
+# if defined(GRAN_CHECK)
+ /* Check if the sparkq is still sorted. Just for testing, really! */
+ if ( RtsFlags.GranFlags.Debug.checkSparkQ &&
+ RtsFlags.GranFlags.Debug.pri ) {
+ rtsBool sorted = rtsTrue;
+ rtsSpark *prev, *next;
+
+ if (pending_sparks_hd == NULL ||
+ pending_sparks_hd->next == NULL ) {
+ /* just 1 elem => ok */
+ } else {
+ for (prev = pending_sparks_hd,
+ next = pending_sparks_hd->next;
+ (next != NULL) ;
+ prev = next, next = next->next) {
+ sorted = sorted &&
+ (prev->gran_info >= next->gran_info);