@AvailableStack@ is used to determine whether an existing stack can be
reused without new allocation, so reducing garbage collection, and
stack setup time. At present, it is only used for the first stack
-chunk of a thread, the one that's got @StkOChunkSize@ words.
+chunk of a thread, the one that's got
+@RTSflags.ConcFlags.stkChunkSize@ words.
\begin{code}
P_ AvailableStack = Nil_closure;
/* mattson thinks this is obsolete */
# if 0 && defined(GRAN)
-extern FILE *main_statsfile; /* Might be of general interest HWL */
typedef unsigned long TIME;
typedef unsigned char PROC;
BQ_lens = 0;
# endif
-I_ do_gr_binary = 0;
-I_ do_gr_profile = 0; /* Full .gr profile or only END events? */
I_ no_gr_profile = 0; /* Don't create any .gr file at all? */
I_ do_sp_profile = 0;
I_ do_gr_migration = 0;
if(EventHd == NULL)
{
fprintf(stderr,"No next event\n");
- exit(EXIT_FAILURE); /* ToDo: abort()? EXIT??? */
+ exit(EXIT_FAILURE); /* ToDo: abort()? EXIT? */
}
if(entry != NULL)
P_ tso, node;
sparkq spark;
{
- extern P_ xmalloc();
- eventq newentry = (eventq) xmalloc(sizeof(struct event));
+ eventq newentry = (eventq) stgMallocBytes(sizeof(struct event), "newevent");
EVENT_PROC(newentry) = proc;
EVENT_CREATOR(newentry) = creator;
static jmp_buf scheduler_loop;
-I_ MaxThreads = DEFAULT_MAX_THREADS;
I_ required_thread_count = 0;
I_ advisory_thread_count = 0;
I_ context_switch = 0;
-I_ contextSwitchTime = CS_MIN_MILLISECS; /* In milliseconds */
-
#if !defined(GRAN)
I_ threadId = 0;
+I_ sparksIgnored =0;
-I_ MaxLocalSparks = DEFAULT_MAX_LOCAL_SPARKS;
I_ SparkLimit[SPARK_POOLS];
-extern I_ doSanityChks;
-extern void checkAStack(STG_NO_ARGS);
-
rtsBool
-initThreadPools(size)
-I_ size;
+initThreadPools(STG_NO_ARGS)
{
+ I_ size = RTSflags.ConcFlags.maxLocalSparks;
+
SparkLimit[ADVISORY_POOL] = SparkLimit[REQUIRED_POOL] = size;
+
if ((PendingSparksBase[ADVISORY_POOL] = (PP_) malloc(size * sizeof(P_))) == NULL)
return rtsFalse;
+
if ((PendingSparksBase[REQUIRED_POOL] = (PP_) malloc(size * sizeof(P_))) == NULL)
return rtsFalse;
+
PendingSparksLim[ADVISORY_POOL] = PendingSparksBase[ADVISORY_POOL] + size;
PendingSparksLim[REQUIRED_POOL] = PendingSparksBase[REQUIRED_POOL] + size;
return rtsTrue;
ScheduleThreads(topClosure)
P_ topClosure;
{
+#ifdef GRAN
I_ i;
+#endif
P_ tso;
-#if defined(USE_COST_CENTRES) || defined(GUM)
- if (time_profiling || contextSwitchTime > 0) {
- if (initialize_virtual_timer(tick_millisecs)) {
+#if defined(PROFILING) || defined(PAR)
+ if (time_profiling || RTSflags.ConcFlags.ctxtSwitchTime > 0) {
+ if (initialize_virtual_timer(RTSflags.CcFlags.msecsPerTick)) {
#else
- if (contextSwitchTime > 0) {
- if (initialize_virtual_timer(contextSwitchTime)) {
+ if (RTSflags.ConcFlags.ctxtSwitchTime > 0) {
+ if (initialize_virtual_timer(RTSflags.ConcFlags.ctxtSwitchTime)) {
#endif
fflush(stdout);
fprintf(stderr, "Can't initialize virtual timer.\n");
init_qp_profiling();
/*
- * We perform GC so that a signal handler can install a new TopClosure and start
- * a new main thread.
+ * We perform GC so that a signal handler can install a new
+ * TopClosure and start a new main thread.
*/
#ifdef PAR
if (IAmMainThread) {
#endif
#ifdef PAR
- if (do_gr_profile) {
+ if (RTSflags.ParFlags.granSimStats) {
DumpGranEvent(GR_START, tso);
sameThread = rtsTrue;
}
fprintf(stderr, "No runnable threads!\n");
EXIT(EXIT_FAILURE);
}
- AwaitEvent(0);
+ AwaitEvent(RTSflags.ConcFlags.ctxtSwitchTime);
}
#else
if (RunnableThreadsHd == Nil_closure) {
- if (advisory_thread_count < MaxThreads &&
+ if (advisory_thread_count < RTSflags.ConcFlags.maxThreads &&
(PendingSparksHd[REQUIRED_POOL] < PendingSparksTl[REQUIRED_POOL] ||
PendingSparksHd[ADVISORY_POOL] < PendingSparksTl[ADVISORY_POOL])) {
/*
- * If we're here (no runnable threads) and we have pending sparks,
- * we must have a space problem. Get enough space to turn one of
- * those pending sparks into a thread...ReallyPerformGC doesn't
- * return until the space is available, so it may force global GC.
- * ToDo: Is this unnecessary here? Duplicated in ReSchedule()? --JSM
+ * If we're here (no runnable threads) and we have pending
+ * sparks, we must have a space problem. Get enough space
+ * to turn one of those pending sparks into a
+ * thread... ReallyPerformGC doesn't return until the
+ * space is available, so it may force global GC. ToDo:
+ * Is this unnecessary here? Duplicated in ReSchedule()?
+ * --JSM
*/
ReallyPerformThreadGC(THREAD_SPACE_REQUIRED, rtsTrue);
SAVE_Hp -= THREAD_SPACE_REQUIRED;
} else {
/*
- * We really have absolutely no work. Send out a fish (there may be
- * some out there already), and wait for something to arrive. We
- * clearly can't run any threads until a SCHEDULE or RESUME arrives,
- * and so that's what we're hoping to see. (Of course, we still have
- * to respond to other types of messages.)
+ * We really have absolutely no work. Send out a fish
+ * (there may be some out there already), and wait for
+ * something to arrive. We clearly can't run any threads
+ * until a SCHEDULE or RESUME arrives, and so that's what
+ * we're hoping to see. (Of course, we still have to
+ * respond to other types of messages.)
*/
if (!fishing)
sendFish(choosePE(), mytid, NEW_FISH_AGE, NEW_FISH_HISTORY,
NEW_FISH_HUNGER);
+
processMessages();
}
ReSchedule(0);
}
#ifdef PAR
- if (do_gr_profile && !sameThread)
+ if (RTSflags.ParFlags.granSimStats && !sameThread)
DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
#endif
#endif
/* If we're not running a timer, just leave the flag on */
- if (contextSwitchTime > 0)
+ if (RTSflags.ConcFlags.ctxtSwitchTime > 0)
context_switch = 0;
#if defined(GRAN_CHECK) && defined(GRAN) /* Just for testing */
}
#endif
-# if defined(__STG_TAILJUMPS__)
miniInterpret((StgFunPtr)resumeThread);
-# else
- if (doSanityChks)
- miniInterpret_debug((StgFunPtr)resumeThread, checkAStack);
- else
- miniInterpret((StgFunPtr)resumeThread);
-# endif /* __STG_TAILJUMPS__ */
}
\end{code}
/* This code does round-Robin, if preferred. */
if(DoFairSchedule && TSO_LINK(CurrentTSO) != Nil_closure)
{
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEvent(GR_DESCHEDULE,ThreadQueueHd);
ThreadQueueHd = TSO_LINK(CurrentTSO);
TSO_LINK(ThreadQueueTl) = CurrentTSO;
ThreadQueueTl = CurrentTSO;
TSO_LINK(CurrentTSO) = Nil_closure;
- if (do_gr_profile)
+ if (RTSflags.ParFlags.granSimStats)
DumpGranEvent(GR_SCHEDULE,ThreadQueueHd);
CurrentTime[CurrentProc] += gran_threadcontextswitchtime;
}
}
#endif
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEvent(GR_SCHEDULE,ThreadQueueHd);
CurrentTSO = ThreadQueueHd;
++TSO_FETCHCOUNT(EVENT_TSO(event));
TSO_FETCHTIME(EVENT_TSO(event)) += gran_fetchtime;
- if (do_gr_profile)
+ if (RTSflags.ParFlags.granSimStats)
DumpGranEventAndNode(GR_REPLY,EVENT_TSO(event),
EVENT_NODE(event),EVENT_CREATOR(event));
CONTINUETHREAD,Nil_closure,Nil_closure,NULL);
TSO_BLOCKTIME(EVENT_TSO(event)) += CurrentTime[CurrentProc] -
TSO_BLOCKEDAT(EVENT_TSO(event));
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEvent(GR_RESUME,EVENT_TSO(event));
continue;
} else {
if(do_sp_profile)
DumpSparkGranEvent(SP_PRUNED,spark);
- assert(spark != NULL);
+ ASSERT(spark != NULL);
SparkQueueHd = SPARK_NEXT(spark);
if(SparkQueueHd == NULL)
newevent(CurrentProc,CurrentProc,CurrentTime[CurrentProc],
STARTTHREAD,tso,Nil_closure,NULL);
- assert(spark != NULL);
+ ASSERT(spark != NULL);
SparkQueueHd = SPARK_NEXT(spark);
if(SparkQueueHd == NULL)
#ifdef PAR
/*
* In the parallel world, we do unfair scheduling for the moment.
- * Ultimately, this should all be merged with the more sophicticated
- * GrAnSim scheduling options. (Of course, some provision should be
- * made for *required* threads to make sure that they don't starve,
- * but for now we assume that no one is running concurrent Haskell on
- * a multi-processor platform.)
+ * Ultimately, this should all be merged with the more
+ * sophisticated GrAnSim scheduling options. (Of course, some
+ * provision should be made for *required* threads to make sure
+ * that they don't starve, but for now we assume that no one is
+ * running concurrent Haskell on a multi-processor platform.)
*/
sameThread = again;
if (RunnableThreadsHd == Nil_closure) {
RunnableThreadsHd = tso;
#ifdef PAR
- if (do_gr_profile) {
+ if (RTSflags.ParFlags.granSimStats) {
DumpGranEvent(GR_START, tso);
sameThread = rtsTrue;
}
} else {
TSO_LINK(RunnableThreadsTl) = tso;
#ifdef PAR
- if (do_gr_profile)
+ if (RTSflags.ParFlags.granSimStats)
DumpGranEvent(GR_STARTQ, tso);
#endif
}
(RunnableThreadsHd != Nil_closure ||
(required_thread_count == 0 && IAmMainThread)) ||
#endif
- advisory_thread_count == MaxThreads ||
+ advisory_thread_count == RTSflags.ConcFlags.maxThreads ||
(tso = NewThread(spark, T_ADVISORY)) == NULL)
break;
advisory_thread_count++;
if (RunnableThreadsHd == Nil_closure) {
RunnableThreadsHd = tso;
#ifdef PAR
- if (do_gr_profile) {
+ if (RTSflags.ParFlags.granSimStats) {
DumpGranEvent(GR_START, tso);
sameThread = rtsTrue;
}
} else {
TSO_LINK(RunnableThreadsTl) = tso;
#ifdef PAR
- if (do_gr_profile)
+ if (RTSflags.ParFlags.granSimStats)
DumpGranEvent(GR_STARTQ, tso);
#endif
}
CurrentTSO = ThreadQueueHd = ThreadQueueTl = EVENT_TSO(event);
newevent(CurrentProc,CurrentProc,CurrentTime[CurrentProc]+gran_threadqueuetime,
CONTINUETHREAD,Nil_closure,Nil_closure,NULL);
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEvent(event_type,EVENT_TSO(event));
}
else
if(DoThreadMigration)
++SurplusThreads;
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEvent(event_type+1,EVENT_TSO(event));
}
MAKE_BUSY(proc);
--SurplusThreads;
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpRawGranEvent(p,GR_STEALING,TSO_ID(thread));
CurrentTime[p] += 5l * gran_mtidytime;
#if defined(GRAN)
-/* Slow but relatively reliable method uses xmalloc */
+/* Slow but relatively reliable method uses stgMallocBytes */
/* Eventually change that to heap allocated sparks. */
sparkq
P_ node;
I_ name, local;
{
- extern P_ xmalloc();
- sparkq newspark = (sparkq) xmalloc(sizeof(struct spark));
+ sparkq newspark = (sparkq) stgMallocBytes(sizeof(struct spark), "NewSpark");
+
SPARK_PREV(newspark) = SPARK_NEXT(newspark) = NULL;
SPARK_NODE(newspark) = node;
SPARK_NAME(newspark) = name;
#endif
-I_ StkOChunkSize = DEFAULT_STKO_CHUNK_SIZE;
-
/* Create a new TSO, with the specified closure to enter and thread type */
P_
}
TSO_LINK(tso) = Nil_closure;
+#ifdef PAR
TSO_CCC(tso) = (CostCentre)STATIC_CC_REF(CC_MAIN);
+#endif
TSO_NAME(tso) = (P_) INFO_PTR(topClosure); /* A string would be nicer -- JSM */
TSO_ID(tso) = threadId++;
TSO_TYPE(tso) = type;
TSO_ARG1(tso) = TSO_EVENT(tso) = 0;
TSO_SWITCH(tso) = NULL;
-#ifdef DO_REDN_COUNTING
+#ifdef TICKY_TICKY
TSO_AHWM(tso) = 0;
TSO_BHWM(tso) = 0;
#endif
SET_PROCS(stko,ThisPE);
#endif
AvailableStack = STKO_LINK(AvailableStack);
- } else if (SAVE_Hp + STKO_HS + StkOChunkSize > SAVE_HpLim) {
+ } else if (SAVE_Hp + STKO_HS + RTSflags.ConcFlags.stkChunkSize > SAVE_HpLim) {
return(NULL);
} else {
- ALLOC_STK(STKO_HS,StkOChunkSize,0);
+ ALLOC_STK(STKO_HS,RTSflags.ConcFlags.stkChunkSize,0);
stko = SAVE_Hp + 1;
- SAVE_Hp += STKO_HS + StkOChunkSize;
+ SAVE_Hp += STKO_HS + RTSflags.ConcFlags.stkChunkSize;
SET_STKO_HDR(stko, StkO_info, CCC);
}
- STKO_SIZE(stko) = StkOChunkSize + STKO_VHS;
+ STKO_SIZE(stko) = RTSflags.ConcFlags.stkChunkSize + STKO_VHS;
STKO_SpB(stko) = STKO_SuB(stko) = STKO_BSTK_BOT(stko) + BREL(1);
STKO_SpA(stko) = STKO_SuA(stko) = STKO_ASTK_BOT(stko) + AREL(1);
STKO_LINK(stko) = Nil_closure;
}
# endif
-#ifdef DO_REDN_COUNTING
+#ifdef TICKY_TICKY
STKO_ADEP(stko) = STKO_BDEP(stko) = 0;
#endif
SAVE_Ret = (StgRetAddr) UNVEC(stopThreadDirectReturn,vtbl_stopStgWorld);
SAVE_StkO = stko;
+ ASSERT(sanityChk_StkO(stko));
+
if (DO_QP_PROF) {
QP_Event1(do_qp_prof > 1 ? "*A" : "*G", tso);
}
#ifdef PAR
TIME now = CURRENT_TIME;
#endif
-#ifdef DO_REDN_COUNTING
- extern FILE *tickyfile;
-
- if (tickyfile != NULL) {
- fprintf(tickyfile, "Thread %d (%lx)\n\tA stack max. depth: %ld words\n",
- TSO_ID(CurrentTSO), TSO_NAME(CurrentTSO), TSO_AHWM(CurrentTSO));
- fprintf(tickyfile, "\tB stack max. depth: %ld words\n",
- TSO_BHWM(CurrentTSO));
+#ifdef TICKY_TICKY
+ if (RTSflags.TickyFlags.showTickyStats) {
+ fprintf(RTSflags.TickyFlags.tickyFile,
+ "Thread %d (%lx)\n\tA stack max. depth: %ld words\n",
+ TSO_ID(CurrentTSO), TSO_NAME(CurrentTSO), TSO_AHWM(CurrentTSO));
+ fprintf(RTSflags.TickyFlags.tickyFile,
+ "\tB stack max. depth: %ld words\n",
+ TSO_BHWM(CurrentTSO));
}
#endif
}
#if defined(GRAN)
- assert(CurrentTSO == ThreadQueueHd);
+ ASSERT(CurrentTSO == ThreadQueueHd);
ThreadQueueHd = TSO_LINK(CurrentTSO);
if(ThreadQueueHd == Nil_closure)
/* make the job of bookkeeping the running, runnable, */
/* blocked threads easier for scripts like gr2ps -- HWL */
- if (do_gr_profile && !is_first)
+ if (RTSflags.ParFlags.granSimStats && !is_first)
DumpRawGranEvent(i,GR_SCHEDULE,
TSO_ID(RunnableThreadsHd[i]));
if (!no_gr_profile)
/* Note ThreadQueueHd is Nil when the main thread terminates */
if(ThreadQueueHd != Nil_closure)
{
- if (do_gr_profile && !no_gr_profile)
+ if (RTSflags.ParFlags.granSimStats && !no_gr_profile)
DumpGranEvent(GR_SCHEDULE,ThreadQueueHd);
CurrentTime[CurrentProc] += gran_threadscheduletime;
}
- else if (do_gr_binary && TSO_TYPE(CurrentTSO)==T_MAIN &&
+ else if (RTSflags.ParFlags.granSimStats_Binary && TSO_TYPE(CurrentTSO)==T_MAIN &&
!no_gr_profile)
grterminate(CurrentTime[CurrentProc]);
}
#endif /* GRAN */
#ifdef PAR
- if (do_gr_profile) {
+ if (RTSflags.ParFlags.granSimStats) {
TSO_EXECTIME(CurrentTSO) += now - TSO_BLOCKEDAT(CurrentTSO);
DumpGranInfo(thisPE, CurrentTSO, TSO_TYPE(CurrentTSO) != T_ADVISORY);
}
case T_MAIN:
required_thread_count--;
#ifdef PAR
- if (do_gr_binary)
+ if (RTSflags.ParFlags.granSimStats_Binary)
grterminate(now);
#endif
QP_Event2(do_qp_prof > 1 ? "RA" : "RG", bqe, CurrentTSO);
}
# ifdef PAR
- if (do_gr_profile) {
+ if (RTSflags.ParFlags.granSimStats) {
DumpGranEvent(GR_RESUMEQ, bqe);
switch (TSO_QUEUE(bqe)) {
case Q_BLOCKED:
while(tso != Nil_closure) {
W_ proc;
- assert(TSO_INTERNAL_PTR(tso)->rR[0].p == node);
+ ASSERT(TSO_INTERNAL_PTR(tso)->rR[0].p == node);
# if defined(COUNT)
++BQ_lens;
TSO_LINK(ThreadQueueTl) = tso;
while(TSO_LINK(tso) != Nil_closure) {
- assert(TSO_INTERNAL_PTR(tso)->rR[0].p == node);
+ ASSERT(TSO_INTERNAL_PTR(tso)->rR[0].p == node);
if (DO_QP_PROF) {
QP_Event2(do_qp_prof > 1 ? "RA" : "RG", tso, CurrentTSO);
}
tso = TSO_LINK(tso);
}
- assert(TSO_INTERNAL_PTR(tso)->rR[0].p == node);
+ ASSERT(TSO_INTERNAL_PTR(tso)->rR[0].p == node);
if (DO_QP_PROF) {
QP_Event2(do_qp_prof > 1 ? "RA" : "RG", tso, CurrentTSO);
}
QP_Event1("GR", CurrentTSO);
}
#ifdef PAR
- if (do_gr_profile) {
+ if (RTSflags.ParFlags.granSimStats) {
/* Note that CURRENT_TIME may perform an unsafe call */
TSO_EXECTIME(CurrentTSO) += CURRENT_TIME - TSO_BLOCKEDAT(CurrentTSO);
}
P_ node;
PROC from, to;
{
- assert(to==CurrentProc);
+ ASSERT(to==CurrentProc);
+
if (!IS_LOCAL_TO(PROCS(node),from) &&
!IS_LOCAL_TO(PROCS(node),to) )
return 1;
{ /* start tso */
newevent(p,CurrentProc,
CurrentTime[CurrentProc] /* +gran_latency */,
- FETCHREPLY,tso,node,NULL); /* node needed ?? */
+ FETCHREPLY,tso,node,NULL); /* node needed ? */
CurrentTime[CurrentProc] += gran_mtidytime;
}
else if (IS_LOCAL_TO(PROCS(node),CurrentProc) ) /* Is node still here? */
newevent(p,CurrentProc,
CurrentTime[CurrentProc]+gran_latency,
- FETCHREPLY,tso,node,NULL); /* node needed ?? */
+ FETCHREPLY,tso,node,NULL); /* node needed ? */
CurrentTime[CurrentProc] += gran_mtidytime;
}
if (NoForward) {
newevent(p,p_new,
max(CurrentTime[p_new],CurrentTime[CurrentProc])+gran_latency,
- FETCHREPLY,tso,node,NULL); /* node needed ?? */
+ FETCHREPLY,tso,node,NULL); /* node needed ? */
CurrentTime[CurrentProc] += gran_mtidytime;
return;
}
if(do_gr_sim)
{
- char *extension = do_gr_binary? "gb": "gr";
+ char *extension = RTSflags.ParFlags.granSimStats_Binary? "gb": "gr";
sprintf(gr_filename, GR_FILENAME_FMT, prog_argv[0],extension);
if ((gr_file = fopen(gr_filename,"w")) == NULL )
fputs("\n\n++++++++++++++++++++\n\n",gr_file);
}
- if(do_gr_binary)
+ if(RTSflags.ParFlags.granSimStats_Binary)
grputw(sizeof(TIME));
Idlers = max_proc;
fputc(' ', qp_file);
fputs(prog_argv[i], qp_file);
}
- fprintf(qp_file, " +RTS -C%d -t%d\n", contextSwitchTime, MaxThreads);
+ fprintf(qp_file, " +RTS -C%d -t%d\n"
+ , RTSflags.ConcFlags.ctxtSwitchTime
+ , RTSflags.ConcFlags.maxThreads);
+
fputs(time_str(), qp_file);
fputc('\n', qp_file);
}
if(ThreadQueueHd==Nil_closure) {
MAKE_IDLE(CurrentProc);
ThreadQueueTl = Nil_closure;
- } else if (do_gr_profile) {
+ } else if (RTSflags.ParFlags.granSimStats) {
CurrentTime[CurrentProc] += gran_threadcontextswitchtime;
DumpGranEvent(GR_SCHEDULE,ThreadQueueHd);
}
-- assumes head of queue == CurrentTSO */
if(!DoFairSchedule)
{
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEventAndNode(GR_FETCH,CurrentTSO,node,p);
ActivateNextThread();
else /* !DoReScheduleOnFetch */
{
/* Note: CurrentProc is still busy as it's blocked on fetch */
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEventAndNode(GR_FETCH,CurrentTSO,node,p);
#if defined(GRAN_CHECK) && defined(GRAN) /* Just for testing */
void
GranSimBlock()
{
- if(do_gr_profile)
+ if(RTSflags.ParFlags.granSimStats)
DumpGranEvent(GR_BLOCK,CurrentTSO);
++TSO_BLOCKCOUNT(CurrentTSO);
{
#if defined(GRAN_CHECK) && defined(GRAN)
if ( debug & 0x40 )
- fprintf(main_statsfile,"Saving Spark Root %d(proc: %d; pool: %d) -- 0x%lx\n",
+ fprintf(RTSflags.GcFlags.statsFile,"Saving Spark Root %d(proc: %d; pool: %d) -- 0x%lx\n",
num_ptr_roots,proc,i,SPARK_NODE(spark));
#endif
StorageMgrInfo.roots[num_ptr_roots++] = SPARK_NODE(spark);
}
} /* forall spark ... */
if (prunedSparks>0) {
- fprintf(main_statsfile,"Pruning and disposing %lu excess sparks (> %lu) on proc %d for GC purposes\n",
+ fprintf(RTSflags.GcFlags.statsFile,"Pruning and disposing %lu excess sparks (> %lu) on proc %d for GC purposes\n",
prunedSparks,MAX_SPARKS,proc);
if (disposeQ == PendingSparksHd[proc][i])
PendingSparksHd[proc][i] = NULL;
SPARK_NODE(spark) = StorageMgrInfo.roots[--num_ptr_roots];
#if defined(GRAN_CHECK) && defined(GRAN)
if ( debug & 0x40 )
- fprintf(main_statsfile,"Restoring Spark Root %d -- new: 0x%lx \n",
+ fprintf(RTSflags.GcFlags.statsFile,"Restoring Spark Root %d -- new: 0x%lx \n",
num_ptr_roots,SPARK_NODE(spark));
#endif
}
else
#if defined(GRAN_CHECK) && defined(GRAN)
if ( debug & 0x40 )
- fprintf(main_statsfile,"Error in RestoreSpkRoots (%d; @ spark 0x%x): More than MAX_SPARKS (%d) sparks\n",
+ fprintf(RTSflags.GcFlags.statsFile,"Error in RestoreSpkRoots (%d; @ spark 0x%x): More than MAX_SPARKS (%d) sparks\n",
num_ptr_roots,SPARK_NODE(spark),MAX_SPARKS);
#endif
if(name > GR_EVENT_MAX)
name = GR_EVENT_MAX;
- if(do_gr_binary)
+ if(RTSflags.ParFlags.granSimStats_Binary)
{
grputw(name);
grputw(pe);
if(name > GR_EVENT_MAX)
name = GR_EVENT_MAX;
- if(do_gr_binary)
+ if(RTSflags.ParFlags.granSimStats_Binary)
{
grputw(name);
grputw(pe);
P_ tso;
I_ mandatory_thread;
{
- if(do_gr_binary)
+ if(RTSflags.ParFlags.granSimStats_Binary)
{
grputw(GR_END);
grputw(pe);
fprintf(stderr," [GA: 0x%lx]",GA(node));
#endif
-#if defined(USE_COST_CENTRES)
+#if defined(PROFILING)
fprintf(stderr," [CC: 0x%lx]",CC_HDR(node));
#endif
fprintf(stderr,"Enter Flush Entry: 0x%lx;\tExit Flush Entry: 0x%lx\n",INFO_FLUSHENT(info_ptr),INFO_FLUSH(info_ptr));
#endif
-#if defined(USE_COST_CENTRES)
+#if defined(PROFILING)
fprintf(stderr,"Cost Centre (???): 0x%lx\n",INFO_CAT(info_ptr));
#endif
fputc(' ', qp_file);
fputs(prog_argv[i], qp_file);
}
- fprintf(qp_file, "+RTS -C%ld -t%ld\n", contextSwitchTime, MaxThreads);
+ fprintf(qp_file, "+RTS -C%ld -t%ld\n"
+ , RTSflags.ConcFlags.ctxtSwitchTime
+ , RTSflags.ConcFlags.maxThreads);
+
fputs(time_str(), qp_file);
fputc('\n', qp_file);
}
W_ IdleProcs = ~0l, Idlers = 32;
void
-GranSimAllocate(n,node,liveness)
-I_ n;
-P_ node;
-W_ liveness;
+GranSimAllocate(I_ n, P_ node, W_ liveness)
{ }
void
-GranSimUnallocate(n,node,liveness)
-W_ n;
-P_ node;
-W_ liveness;
+GranSimUnallocate(W_ n, P_ node, W_ liveness)
{ }
-
void
-GranSimExec(ariths,branches,loads,stores,floats)
-W_ ariths,branches,loads,stores,floats;
+GranSimExec(W_ ariths, W_ branches, W_ loads, W_ stores, W_ floats)
{ }
-I_
-GranSimFetch(node /* , liveness_mask */ )
-P_ node;
+int
+GranSimFetch(P_ node /* , liveness_mask */ )
/* I_ liveness_mask; */
-{ }
+{ return(9999999); }
void
-GranSimSpark(local,node)
-W_ local;
-P_ node;
+GranSimSpark(W_ local, P_ node)
{ }
#if 0
#endif
void
-GranSimBlock()
+GranSimBlock(STG_NO_ARGS)
{ }
#endif