/* ----------------------------------------------------------------------------
- Time-stamp: <Wed Jan 12 2000 13:39:33 Stardate: [-30]4193.88 hwloidl>
- $Id: FetchMe.hc,v 1.2 2000/01/13 14:34:06 hwloidl Exp $
+ Time-stamp: <Tue Mar 06 2001 17:01:46 Stardate: [-30]6288.54 hwloidl>
+ $Id: FetchMe.hc,v 1.7 2001/03/23 16:36:21 simonmar Exp $
Entry code for a FETCH_ME closure
//@node Includes, Info tables
//@subsection Includes
+#include "Stg.h"
#include "Rts.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
another PE. We issue a fetch message, and wait for the data to be
retrieved.
- About the difference between std and PAR in returning to the RTS:
- in PAR we call RTS functions from within the entry code (see also
- BLACKHOLE_entry and friends in StgMiscClosures.hc); therefore, we
- have to save the thread state before calling these functions ---
- this is done via SAVE_THREAD_STATE; we then just load the return
- code into R1 before jumping into the RTS --- this is done via
- THREAD_RETURN; so, in short we have something like
- SAVE_THREAD_STATE + THREAD_RETURN = BLOCK_NP
-
+ A word on the ptr/nonptr fields in the macros: they are unused at the
+ moment; all closures defined here have constant size (ie. no payload
+ that varies from closure to closure). Therefore, all routines that
+ need to know the size of these closures have to do a sizeofW(StgFetchMe)
+ etc to get the closure size. See get_closure_info(), evacuate() and
+ checkClosure() (using the same fcts for determining the size of the
+ closures would be a good idea; at least it would be a nice step towards
+ making this code bug free).
------------------------------------------------------------------------ */
//@node Info tables, Index, Includes
//@subsection Info tables
//@cindex FETCH_ME_info
-INFO_TABLE(FETCH_ME_info, FETCH_ME_entry, 0,2, FETCH_ME, const, EF_,0,0);
+INFO_TABLE(stg_FETCH_ME_info, stg_FETCH_ME_entry, 0,2, FETCH_ME,, EF_,"FETCH_ME","FETCH_ME");
//@cindex FETCH_ME_entry
-STGFUN(FETCH_ME_entry)
+STGFUN(stg_FETCH_ME_entry)
{
- extern globalAddr *rga_GLOBAL;
- extern globalAddr *lga_GLOBAL;
- extern globalAddr fmbqga_GLOBAL;
- extern StgClosure *p_GLOBAL;
- /*
- globalAddr *rga;
- globalAddr *lga;
- globalAddr fmbqga;
- StgClosure *p;
- */
-
- rga_GLOBAL = ((StgFetchMe *)R1.p)->ga;
- ASSERT(rga->payload.gc.gtid != mytid);
-
- /* Turn the FETCH_ME into a FETCH_ME_BQ, and place the current thread
- * on the blocking queue.
- */
- // R1.cl->header.info = FETCH_ME_BQ_info;
- SET_INFO((StgClosure *)R1.cl, &FETCH_ME_BQ_info);
-
- CurrentTSO->link = END_BQ_QUEUE;
- ((StgFetchMeBlockingQueue *)R1.cl)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
-
- /* record onto which closure the current thread is blcoking */
- CurrentTSO->block_info.closure = R1.cl;
- //recordMutable((StgMutClosure *)R1.cl);
- p_GLOBAL = R1.cl;
-
- /* Save the Thread State here, before calling RTS routines below! */
- //BLOCK_NP_NO_JUMP(1);
- SAVE_THREAD_STATE(1);
-
- /* unknown junk... needed? --SDM yes, want to see what's happening -- HWL */
- if (RtsFlags.ParFlags.ParStats.Full) {
- /* Note that CURRENT_TIME may perform an unsafe call */
- //rtsTime now = CURRENT_TIME; /* Now */
- CurrentTSO->par.exectime += CURRENT_TIME - CurrentTSO->par.blockedat;
- CurrentTSO->par.fetchcount++;
- /* TSO_QUEUE(CurrentTSO) = Q_FETCHING; */
- CurrentTSO->par.blockedat = CURRENT_TIME;
- /* we are about to send off a FETCH message, so dump a FETCH event */
- DumpRawGranEvent(CURRENT_PROC, taskIDtoPE(rga_GLOBAL->payload.gc.gtid),
- GR_FETCH, CurrentTSO, (StgClosure *)R1.p, 0);
- }
-
- /* Phil T. claims that this was a workaround for a hard-to-find
- * bug, hence I'm leaving it out for now --SDM
- */
- /* Assign a brand-new global address to the newly created FMBQ */
- lga_GLOBAL = makeGlobal(p_GLOBAL, rtsFalse);
- splitWeight(&fmbqga_GLOBAL, lga_GLOBAL);
- ASSERT(fmbqga_GLOBAL.weight == 1L << (BITS_IN(unsigned) - 1));
-
- /* I *hope* it's ok to call this from STG land. --SDM */
- STGCALL3(sendFetch, rga_GLOBAL, &fmbqga_GLOBAL, 0/*load*/);
-
- // sendFetch now called from processTheRealFetch, to make SDM happy
- //theGlobalFromGA.payload.gc.gtid = rga->payload.gc.gtid;
- //theGlobalFromGA.payload.gc.slot = rga->payload.gc.slot;
- //theGlobalFromGA.weight = rga->weight;
- //theGlobalToGA.payload.gc.gtid = fmbqga.payload.gc.gtid;
- //theGlobalToGA.payload.gc.slot = fmbqga.payload.gc.slot;
- //theGlobalToGA.weight = fmbqga.weight;
-
- // STGCALL6(fprintf,stderr,"%% Fetching %p from remote PE ((%x,%d,%x))\n",R1.p,rga->payload.gc.gtid, rga->payload.gc.slot, rga->weight);
-
- THREAD_RETURN(1); /* back to the scheduler */
- // was: BLOCK_NP(1);
+ FB_
+ TICK_ENT_BH();
+
+ ASSERT(((StgFetchMe *)R1.p)->ga->payload.gc.gtid != mytid);
+
+ /* Turn the FETCH_ME into a FETCH_ME_BQ, and place the current thread
+ * on the blocking queue.
+ */
+ // ((StgFetchMeBlockingQueue *)R1.cl)->header.info = &FETCH_ME_BQ_info; // does the same as SET_INFO
+ SET_INFO((StgClosure *)R1.cl, &stg_FETCH_ME_BQ_info);
+
+ /* Remember GA as a global var (used in blockThread); NB: not thread safe! */
+ ASSERT(theGlobalFromGA.payload.gc.gtid == (GlobalTaskId)0);
+ theGlobalFromGA = *((StgFetchMe *)R1.p)->ga;
+
+ /* Put ourselves on the blocking queue for this black hole */
+ ASSERT(looks_like_ga(((StgFetchMe *)R1.p)->ga));
+ CurrentTSO->link = END_BQ_QUEUE;
+ ((StgFetchMeBlockingQueue *)R1.cl)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
+
+ /* jot down why and on what closure we are blocked */
+ CurrentTSO->why_blocked = BlockedOnGA;
+ CurrentTSO->block_info.closure = R1.cl;
+ /* closure is mutable since something has just been added to its BQ */
+ //recordMutable((StgMutClosure *)R1.cl);
+
+ /* sendFetch etc is now done in blockThread, which is called from the
+ scheduler -- HWL */
+
+ BLOCK_NP(1);
FE_
}
When the data arrives from the remote PE, all waiting threads are
woken up and the FETCH_ME_BQ is overwritten with the fetched data.
- FETCH_ME_BQ_entry is a copy of BLACKHOLE_BQ_entry -- HWL
+ FETCH_ME_BQ_entry is almost identical to BLACKHOLE_BQ_entry -- HWL
------------------------------------------------------------------------ */
-INFO_TABLE(FETCH_ME_BQ_info, FETCH_ME_BQ_entry,0,2,FETCH_ME_BQ,const,EF_,0,0);
+INFO_TABLE(stg_FETCH_ME_BQ_info, stg_FETCH_ME_BQ_entry,0,2,FETCH_ME_BQ,,EF_,"FETCH_ME_BQ","FETCH_ME_BQ");
//@cindex FETCH_ME_BQ_info
-STGFUN(FETCH_ME_BQ_entry)
+STGFUN(stg_FETCH_ME_BQ_entry)
{
FB_
TICK_ENT_BH();
- /* Put ourselves on the blocking queue for this black hole */
+ /* Put ourselves on the blocking queue for this node */
+ CurrentTSO->link = (StgTSO*)((StgBlockingQueue *)R1.p)->blocking_queue;
+ ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
+
+ /* jot down why and on what closure we are blocked */
+ CurrentTSO->why_blocked = BlockedOnGA_NoSend;
CurrentTSO->block_info.closure = R1.cl;
- CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
- ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
-
-#if defined(PAR)
- /* Save the Thread State here, before calling RTS routines below! */
- SAVE_THREAD_STATE(1);
-
- if (RtsFlags.ParFlags.ParStats.Full) {
- /* Note that CURRENT_TIME may perform an unsafe call */
- //rtsTime now = CURRENT_TIME; /* Now */
- CurrentTSO->par.exectime += CURRENT_TIME - CurrentTSO->par.blockedat;
- CurrentTSO->par.blockcount++;
- CurrentTSO->par.blockedat = CURRENT_TIME;
- DumpRawGranEvent(CURRENT_PROC, thisPE,
- GR_BLOCK, CurrentTSO, (StgClosure *)R1.p, 0);
- }
-
- THREAD_RETURN(1); /* back to the scheduler */
-#else
+
/* stg_gen_block is too heavyweight, use a specialised one */
BLOCK_NP(1);
-#endif
FE_
}
globally visible closure i.e. one with a GA. A BLOCKED_FETCH closure
indicates that a TSO on another PE is waiting for the result of this
computation. Thus, when updating the closure, the result has to be sent
- to that PE. The relevant routines handling that are awaken_blocked_queue
+ to that PE. The relevant routines handling that are awakenBlockedQueue
and blockFetch (for putting BLOCKED_FETCH closure into a BQ).
-*/
+ ------------------------------------------------------------------------ */
//@cindex BLOCKED_FETCH_info
-INFO_TABLE(BLOCKED_FETCH_info, BLOCKED_FETCH_entry,0,2,BLOCKED_FETCH,const,EF_,0,0);
+INFO_TABLE(stg_BLOCKED_FETCH_info, stg_BLOCKED_FETCH_entry,0,2,BLOCKED_FETCH,,EF_,"BLOCKED_FETCH","BLOCKED_FETCH");
//@cindex BLOCKED_FETCH_entry
-STGFUN(BLOCKED_FETCH_entry)
+STGFUN(stg_BLOCKED_FETCH_entry)
+{
+ FB_
+ /* see NON_ENTERABLE_ENTRY_CODE in StgMiscClosures.hc */
+ STGCALL2(fprintf,stderr,"BLOCKED_FETCH object entered!\n");
+ STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE);
+ FE_
+}
+
+
+/* ---------------------------------------------------------------------------
+ REMOTE_REF
+
+ A REMOTE_REF closure is generated whenever we wish to refer to a sticky
+ object on another PE.
+ ------------------------------------------------------------------------ */
+
+//@cindex REMOTE_REF_info
+INFO_TABLE(stg_REMOTE_REF_info, stg_REMOTE_REF_entry,0,2,REMOTE_REF,,EF_,"REMOTE_REF","REMOTE_REF");
+//@cindex REMOTE_REF_entry
+STGFUN(stg_REMOTE_REF_entry)
{
FB_
/* see NON_ENTERABLE_ENTRY_CODE in StgMiscClosures.hc */
- fprintf(stderr,"Qagh: BLOCKED_FETCH entered!\n");
- STGCALL1(raiseError, errorHandler);
- stg_exit(EXIT_FAILURE); /* not executed */
+ STGCALL2(fprintf,stderr,"REMOTE REF object entered!\n");
+ STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE);
FE_
}