/*
- Time-stamp: <Sat Dec 11 1999 17:25:27 Stardate: [-30]4033.42 software>
- $Id: GranSim.c,v 1.2 2000/01/13 14:34:06 hwloidl Exp $
+ Time-stamp: <Tue Mar 06 2001 00:17:42 Stardate: [-30]6285.06 hwloidl>
Variables and functions specific to GranSim the parallelism simulator
for GPH.
#include "StgTypes.h"
#include "Schedule.h"
#include "SchedAPI.h" // for pushClosure
-#include "GC.h"
#include "GranSimRts.h"
#include "GranSim.h"
#include "ParallelRts.h"
#include "ParallelDebug.h"
+#include "Sparks.h"
#include "Storage.h" // for recordMutable
PEs where_is(StgClosure *node);
static rtsBool stealSomething(PEs proc, rtsBool steal_spark, rtsBool steal_thread);
-static inline rtsBool stealSpark(PEs proc);
-static inline rtsBool stealThread(PEs proc);
+static rtsBool stealSpark(PEs proc);
+static rtsBool stealThread(PEs proc);
static rtsBool stealSparkMagic(PEs proc);
static rtsBool stealThreadMagic(PEs proc);
/* subsumed by stealSomething
{
PEs i;
for (i = 0; i < RtsFlags.GranFlags.proc && !IS_LOCAL_TO(ga, i); i++);
- ASSERT(0<=i && i<RtsFlags.GranFlags.proc);
+ ASSERT(i<RtsFlags.GranFlags.proc);
return (i);
}
IF_DEBUG(gran,
fprintf(stderr, "GRAN: new_event: \n");
- print_event(newentry))
+ print_event(newentry));
}
//@cindex prepend_event
// ToDo: traverse_eventw_for_gc if GUM-Fetching!!! HWL
belch("ghuH: packets in BulkFetching not marked as roots; mayb be fatal");
else
- event->node = (StgTSO *)MarkRoot((StgClosure *)event->node);
+ event->node = (StgClosure *)MarkRoot((StgClosure *)event->node);
break;
case GlobalBlock:
event->tso = (StgTSO *)MarkRoot((StgClosure *)event->tso);
Prune all ContinueThread events related to tso or node in the eventq.
Currently used if a thread leaves STG land with ThreadBlocked status,
i.e. it blocked on a closure and has been put on its blocking queue. It
- will be reawakended via a call to awaken_blocked_queue. Until then no
+ will be reawakended via a call to awakenBlockedQueue. Until then no
event effecting this tso should appear in the eventq. A bit of a hack,
because ideally we shouldn't generate such spurious ContinueThread events
in the first place.
endThread(StgTSO *tso, PEs proc)
{
ASSERT(procStatus[proc]==Busy); // coming straight out of STG land
- ASSERT(tso->whatNext==ThreadComplete);
+ ASSERT(tso->what_next==ThreadComplete);
// ToDo: prune ContinueThreads for this TSO from event queue
DumpEndEvent(proc, tso, rtsFalse /* not mandatory */);
fprintf(RtsFlags.GcFlags.statsFile,"***** veQ boSwI' PackNearbyGraph(node %p, tso %p (%d))\n",
node, tso, tso->id);
# endif
+ barf("//// do_the_fetchnode: out of heap after handleFetchRequest; ToDo: call GarbageCollect()");
prepend_event(event);
- GarbageCollect(GetRoots);
+ GarbageCollect(GetRoots, rtsFalse);
// HWL: ToDo: check whether a ContinueThread has to be issued
// HWL old: ReallyPerformThreadGC(PACK_HEAP_REQUIRED, rtsFalse);
-# if defined(GRAN_CHECK) && defined(GRAN)
+# if 0 && defined(GRAN_CHECK) && defined(GRAN)
if (RtsFlags.GcFlags.giveStats) {
fprintf(RtsFlags.GcFlags.statsFile,"***** SAVE_Hp=%p, SAVE_HpLim=%p, PACK_HEAP_REQUIRED=%d\n",
Hp, HpLim, 0) ; // PACK_HEAP_REQUIRED); ???
within GranSimBlock;
since tso is both in the EVQ and the BQ for node, we have to take it out
of the BQ first before we can handle the FetchReply;
- ToDo: special cases in awaken_blocked_queue, since the BQ magically moved.
+ ToDo: special cases in awakenBlockedQueue, since the BQ magically moved.
*/
- if (tso->blocked_on!=(StgClosure*)NULL) {
+ if (tso->block_info.closure!=(StgClosure*)NULL) {
IF_GRAN_DEBUG(bq,
belch("## ghuH: TSO %d (%p) in FetchReply is blocked on node %p (shouldn't happen AFAIK)",
tso->id, tso, node));
tso->gran.sparkname, spark_queue_len(proc));
*/
+ ASSERT(OutstandingFetches[proc] > 0);
--OutstandingFetches[proc];
- ASSERT(OutstandingFetches[proc] >= 0);
new_event(proc, proc, CurrentTime[proc],
ResumeThread,
event->tso, (RtsFlags.GranFlags.DoBulkFetching ?
creator = event->creator; /* proc that requested work */
rtsSparkQ spark = event->spark;
/* ToDo: check that this size is safe -- HWL */
+#if 0
+ ToDo: check available heap
+
nat req_heap = sizeofW(StgTSO) + MIN_STACK_WORDS;
// add this? -- HWL:RtsFlags.ConcFlags.stkChunkSize;
+#endif
IF_DEBUG(gran, fprintf(stderr, "GRAN: doing the Findwork\n"));
thread. This is a conservative estimate of the required heap.
This eliminates special checks for GC around NewThread within
ActivateSpark. */
-
+
+#if 0
+ ToDo: check available heap
+
if (Hp + req_heap > HpLim ) {
IF_DEBUG(gc,
belch("GC: Doing GC from within Findwork handling (that's bloody dangerous if you ask me)");)
procStatus[CurrentProc]=Idle;
return;
}
+#endif
if ( RtsFlags.GranFlags.DoAlwaysCreateThreads ||
RtsFlags.GranFlags.Fishing ||
graph = PackOneNode(node, tso, &size);
new_event(from, to, CurrentTime[to],
FetchReply,
- tso, graph, (rtsSpark*)NULL);
+ tso, (StgClosure *)graph, (rtsSpark*)NULL);
} else {
new_event(from, to, CurrentTime[to],
FetchReply,
/* The tso requesting the node is blocked and cannot be on a run queue */
ASSERT(!is_on_queue(tso, from));
-
- if ((graph = PackNearbyGraph(node, tso, &size)) == NULL)
+
+ // ToDo: check whether graph is ever used as an rtsPackBuffer!!
+ if ((graph = (StgClosure *)PackNearbyGraph(node, tso, &size, 0)) == NULL)
return (OutOfHeap); /* out of heap */
/* Actual moving/copying of node is done on arrival; see FETCHREPLY */
/* Put ourselves on the blocking queue for this black hole */
// tso->link=END_TSO_QUEUE; not necessary; see assertion above
((StgBlockingQueue *)bh)->blocking_queue = (StgBlockingQueueElement *)tso;
- tso->blocked_on = bh;
+ tso->block_info.closure = bh;
recordMutable((StgMutClosure *)bh);
break;
{
G_PRINT_NODE(bh);
barf("Qagh: thought %p was a black hole (IP %p (%s))",
- bh, info, info_type(get_itbl(bh)));
+ bh, info, info_type(bh));
}
}
return (Ok);
}
IF_GRAN_DEBUG(randomSteal,
belch("^^ RANDOM_STEAL (fishing): stealing from PE %d (current proc is %d)",
- p, proc);)
+ p, proc));
return (PEs)p;
}
Steal a spark (piece of work) from any processor and bring it to proc.
*/
//@cindex stealSpark
-static inline rtsBool
+static rtsBool
stealSpark(PEs proc) { stealSomething(proc, rtsTrue, rtsFalse); }
/*
Steal a thread from any processor and bring it to proc i.e. thread migration
*/
//@cindex stealThread
-static inline rtsBool
+static rtsBool
stealThread(PEs proc) { stealSomething(proc, rtsFalse, rtsTrue); }
/*
stealSparkMagic(proc)
PEs proc;
{
- PEs p, i, j, n, first, upb;
- rtsSpark *spark, *next;
+ PEs p=0, i=0, j=0, n=0, first, upb;
+ rtsSpark *spark=NULL, *next;
PEs pes_by_time[MAX_PROC];
rtsBool stolen = rtsFalse;
rtsTime stealtime;
stealThreadMagic(proc)
PEs proc;
{
- PEs p, i, j, n, first, upb;
- StgTSO *tso;
+ PEs p=0, i=0, j=0, n=0, first, upb;
+ StgTSO *tso=END_TSO_QUEUE;
PEs pes_by_time[MAX_PROC];
rtsBool stolen = rtsFalse;
rtsTime stealtime;
double fishdelay, sparkdelay, latencydelay;
fishdelay = (double)RtsFlags.GranFlags.proc/2;
sparkdelay = fishdelay -
- ((fishdelay-1)/(double)(RtsFlags.GranFlags.proc-1))*(double)idlers();
+ ((fishdelay-1.0)/(double)(RtsFlags.GranFlags.proc-1))*((double)idlers());
latencydelay = sparkdelay*((double)RtsFlags.GranFlags.Costs.latency);
return((rtsTime)latencydelay);
PEs proc;
StgClosure *node;
{
- PEs node_proc = where_is(node), tso_proc = where_is(tso);
+ PEs node_proc = where_is(node),
+ tso_proc = where_is((StgClosure *)tso);
ASSERT(tso_proc==CurrentProc);
// ASSERT(node_proc==CurrentProc);
IF_DEBUG(gran,
belch("GRAN: TSO %d (%p) [PE %d] blocks on closure %p @ %lx",
- tso->id, tso, proc, node, CurrentTime[proc]);)
+ tso->id, tso, proc, node, CurrentTime[proc]));
/* THIS SHOULD NEVER HAPPEN!