2 % (c) The AQUA Project, Glasgow University, 1994
4 %************************************************************************
6 \section[StgThreads.lhc]{Threaded Threads Support}
8 %************************************************************************
10 Some of the threads support is done in threaded code. How's that for ambiguous
17 #define MAIN_REG_MAP /* STG world */
22 #include "Statistics.h"
28 %************************************************************************
30 \subsection[thread-objects]{Special objects for thread support}
32 %************************************************************************
34 TSO's are Thread State Objects, where the thread context is stored when the
35 thread is sleeping, and where we have slots for STG registers that don't
36 live in real machine registers.
46 fprintf(stderr, "TSO Entry: panic");
53 Stack objects are chunks of stack words allocated out of the heap and
54 linked together in a chain.
64 fprintf(stderr, "StkO Entry: panic");
74 STGFUN(StkO_static_entry)
78 fprintf(stderr, "StkO_static Entry: panic");
88 Blocking queues are essentially black holes with threads attached. These
89 are the threads to be awakened when the closure is updated.
93 EXTFUN(EnterNodeCode);
100 /* Before overwriting TSO_LINK */
101 STGCALL3(void,(),GranSimBlock,CurrentTSO,CurrentProc,Node);
104 TSO_LINK(CurrentTSO) = (P_) BQ_ENTRIES(Node);
105 BQ_ENTRIES(Node) = (W_) CurrentTSO;
107 LivenessReg = LIVENESS_R1;
109 TSO_PC1(CurrentTSO) = EnterNodeCode;
112 QP_Event1("GR", CurrentTSO);
115 if(RTSflags.ParFlags.granSimStats) {
116 /* Note that CURRENT_TIME may perform an unsafe call */
117 TIME now = CURRENT_TIME;
118 TSO_EXECTIME(CurrentTSO) += now - TSO_BLOCKEDAT(CurrentTSO);
119 TSO_BLOCKCOUNT(CurrentTSO)++;
120 TSO_QUEUE(CurrentTSO) = Q_BLOCKED;
121 TSO_BLOCKEDAT(CurrentTSO) = now;
122 DumpGranEvent(GR_BLOCK, CurrentTSO);
126 ReSchedule(SAME_THREAD); /* NB: GranSimBlock activated next thread */
137 Revertible black holes are needed in the parallel world, to handle
138 negative acknowledgements of messages containing updatable closures.
139 The idea is that when the original message is transmitted, the closure
140 is turned into a revertible black hole...an object which acts like a
141 black hole when local threads try to enter it, but which can be
142 reverted back to the original closure if necessary.
144 It's actually a lot like a blocking queue (BQ) entry, because
145 revertible black holes are initially set up with an empty blocking
148 The combination of GrAnSim with revertible black holes has not been
153 #if defined(PAR) || defined(GRAN)
160 /* Before overwriting TSO_LINK */
161 STGCALL3(void,(),GranSimBlock,CurrentTSO,CurrentProc,Node);
164 /* In GranSim and GUM on 2.04 the InfoPtr seems to be invalid when entering
165 this routine (exact reason is unknown). This change does the safe
166 thing instead. -- HWL */
168 switch (INFO_TYPE(INFO_PTR(Node))) { /* HWL orig: INFO_TYPE(InfoPtr) */
169 case INFO_SPEC_RBH_TYPE:
170 TSO_LINK(CurrentTSO) = (P_) SPEC_RBH_BQ(Node);
171 SPEC_RBH_BQ(Node) = (W_) CurrentTSO;
173 case INFO_GEN_RBH_TYPE:
174 TSO_LINK(CurrentTSO) = (P_) GEN_RBH_BQ(Node);
175 GEN_RBH_BQ(Node) = (W_) CurrentTSO;
179 fprintf(stderr, "Panic: non-{SPEC,GEN} RBH %#lx (IP %#lx)\n", Node, InfoPtr);
183 LivenessReg = LIVENESS_R1;
185 TSO_PC1(CurrentTSO) = EnterNodeCode;
188 QP_Event1("GR", CurrentTSO);
192 if(RTSflags.ParFlags.granSimStats) {
193 /* Note that CURRENT_TIME may perform an unsafe call */
194 TIME now = CURRENT_TIME;
195 TSO_EXECTIME(CurrentTSO) += now - TSO_BLOCKEDAT(CurrentTSO);
196 TSO_BLOCKCOUNT(CurrentTSO)++;
197 TSO_QUEUE(CurrentTSO) = Q_BLOCKED;
198 TSO_BLOCKEDAT(CurrentTSO) = now;
199 DumpGranEvent(GR_BLOCK, CurrentTSO);
203 ReSchedule(SAME_THREAD); /* NB: GranSimBlock activated next thread */
215 %************************************************************************
217 \subsection[thread-entrypoints]{Scheduler-Thread Interfaces}
219 %************************************************************************
221 The normal way of entering a thread is through \tr{resumeThread},
222 which short-circuits any indirections to the TSO and StkO, sets up STG
223 registers, and jumps to the saved PC.
230 while(IS_INDIRECTION(INFO_PTR(CurrentTSO))) {
231 CurrentTSO = (P_) IND_CLOSURE_PTR(CurrentTSO);
235 if (RTSflags.ParFlags.granSimStats) {
236 TSO_QUEUE(CurrentTSO) = Q_RUNNING;
237 /* Note that CURRENT_TIME may perform an unsafe call */
238 TSO_BLOCKEDAT(CurrentTSO) = CURRENT_TIME;
242 CurrentRegTable = TSO_INTERNAL_PTR(CurrentTSO);
244 while(IS_INDIRECTION(INFO_PTR(SAVE_StkO))) {
245 SAVE_StkO = (P_) IND_CLOSURE_PTR(SAVE_StkO);
249 SET_TASK_ACTIVITY(ST_REDUCING);
250 RESTORE_CCC(TSO_CCC(CurrentTSO));
251 JMP_(TSO_PC1(CurrentTSO));
256 Since we normally context switch during a heap check, it is possible
257 that we will return to a previously suspended thread without
258 sufficient heap for the thread to continue. However, we have cleverly
259 stashed away the heap requirements in @TSO_ARG1@ so that we can decide
260 whether or not to perform a garbage collection before resuming the
261 thread. The actual thread resumption address (either @EnterNodeCode@
262 or elsewhere) is stashed in @TSO_PC2@.
265 STGFUN(CheckHeapCode)
269 ALLOC_HEAP(TSO_ARG1(CurrentTSO)); /* ticky profiling */
270 if ((Hp += TSO_ARG1(CurrentTSO)) > HpLim) {
271 ReallyPerformThreadGC(TSO_ARG1(CurrentTSO), rtsFalse);
274 SET_TASK_ACTIVITY(ST_REDUCING);
275 RESUME_(TSO_PC2(CurrentTSO));
280 Often, a thread starts (or rather, resumes) by entering the closure
281 that Node points to. Here's a tiny code fragment to do just that.
282 The saved PC in the TSO can be set to @EnterNodeCode@ whenever we
283 want this to happen upon resumption of the thread.
286 STGFUN(EnterNodeCode)
290 InfoPtr=(D_)(INFO_PTR(Node));
291 JMP_(ENTRY_CODE(InfoPtr));
296 Then, there are the occasions when we just want to pick up where we
297 left off. We use \tr{RESUME_} here instead of \tr{JMP_}, because when
298 we return to a call site, the Alpha is going to try to load \tr{%gp}
299 from \tr{%ra} rather than \tr{%pv}, and \tr{JMP_} only sets \tr{%pv}.
300 Resuming to the start of a function is currently okay, but an
301 extremely bad practice. As we add support for more architectures, we
302 can expect the difference between \tr{RESUME_} and \tr{JMP_} to become
310 SET_TASK_ACTIVITY(ST_REDUCING);
311 RESUME_(TSO_PC2(CurrentTSO));
316 %************************************************************************
318 \subsection[stack-chunk-underflow-code]{Underflow code for stack chunks}
320 %************************************************************************
326 On a uniprocessor, stack underflow causes us no great headaches. The
327 old value of RetReg is squirreled away at the base of the top stack
328 object (the one that's about to get blown away). We just yank it
329 outta there and perform the same kind of return that got us here in
332 This simplicity is due to the fact that we never have to fetch a stack
337 #define DO_RETURN_TEMPLATE(label, cont) \
342 temp = STKO_LINK(StkOReg); \
343 RetReg = STKO_RETURN(StkOReg); \
345 RestoreStackStgRegs(); \
350 DO_RETURN_TEMPLATE(UnderflowDirectReturn, DIRECT(((P_)RetReg)))
351 DO_RETURN_TEMPLATE(UnderflowVect0, ((P_)RetReg)[RVREL(0)])
352 DO_RETURN_TEMPLATE(UnderflowVect1, ((P_)RetReg)[RVREL(1)])
353 DO_RETURN_TEMPLATE(UnderflowVect2, ((P_)RetReg)[RVREL(2)])
354 DO_RETURN_TEMPLATE(UnderflowVect3, ((P_)RetReg)[RVREL(3)])
355 DO_RETURN_TEMPLATE(UnderflowVect4, ((P_)RetReg)[RVREL(4)])
357 DO_RETURN_TEMPLATE(UnderflowVect5, ((P_)RetReg)[RVREL(5)])
358 DO_RETURN_TEMPLATE(UnderflowVect6, ((P_)RetReg)[RVREL(6)])
359 DO_RETURN_TEMPLATE(UnderflowVect7, ((P_)RetReg)[RVREL(7)])
361 DO_RETURN_TEMPLATE(StackUnderflowEnterNode, EnterNodeCode)
367 In the parallel world, we may have to fetch the StkO from a remote
368 location before we can load up the stack registers and perform the
369 return. Our convention is that we load RetReg up with the exact
370 continuation address (after a vector table lookup, if necessary),
371 and tail-call the code to fetch the stack object. (Of course, if
372 the stack object is already local, we then just jump to the
373 continuation address.)
377 STGFUN(CommonUnderflow)
382 temp = STKO_LINK(StkOReg);
384 /* fprintf(stderr,"Stk Underflow from: %lx to: %lx size abandoned: %d\n",StkOReg,temp,STKO_CLOSURE_CTS_SIZE(StkOReg)); */
386 /* change the guy we are abandoning into something
387 that will not be "interesting" on the mutables
388 list. (As long as it is there, it will be
389 scavenged in GC, and we cannot guarantee that
390 it is still a "sane" StkO object). (And, besides,
391 why continue to keep it [and all it pts to] alive?)
394 FREEZE_MUT_HDR(StkOReg, ImMutArrayOfPtrs_info);
395 MUTUPLE_CLOSURE_SIZE(StkOReg) = MUTUPLE_VHS;
398 /* ToDo: Fetch the remote stack object here! */
399 RestoreStackStgRegs();
404 #define DO_RETURN_TEMPLATE(label, cont) \
408 RetReg = STKO_RETURN(StkOReg); \
409 RetReg = (StgRetAddr)(cont); \
410 LivenessReg = INFO_LIVENESS(InfoPtr); \
411 JMP_(CommonUnderflow); \
415 DO_RETURN_TEMPLATE(UnderflowDirectReturn, DIRECT(((P_)RetReg)))
416 DO_RETURN_TEMPLATE(UnderflowVect0, ((P_)RetReg)[RVREL(0)])
417 DO_RETURN_TEMPLATE(UnderflowVect1, ((P_)RetReg)[RVREL(1)])
418 DO_RETURN_TEMPLATE(UnderflowVect2, ((P_)RetReg)[RVREL(2)])
419 DO_RETURN_TEMPLATE(UnderflowVect3, ((P_)RetReg)[RVREL(3)])
420 DO_RETURN_TEMPLATE(UnderflowVect4, ((P_)RetReg)[RVREL(4)])
421 DO_RETURN_TEMPLATE(UnderflowVect5, ((P_)RetReg)[RVREL(5)])
422 DO_RETURN_TEMPLATE(UnderflowVect6, ((P_)RetReg)[RVREL(6)])
423 DO_RETURN_TEMPLATE(UnderflowVect7, ((P_)RetReg)[RVREL(7)])
425 STGFUN(PrimUnderflow)
428 RetReg = STKO_RETURN(StkOReg);
429 RetReg = (StgRetAddr)DIRECT(((P_)RetReg));
430 LivenessReg = NO_LIVENESS;
431 JMP_(CommonUnderflow);
436 * This one is similar, but isn't part of the return vector. It's only used
437 * when we fall off of a stack chunk and want to enter Node rather than
438 * returning through RetReg. (This occurs during UpdatePAP, when the updatee
439 * isn't on the current stack chunk.) It can't be done with the template,
440 * because R2 is dead, and R1 points to a PAP. Only R1 is live.
445 /* old version of the code */
446 STGFUN(StackUnderflowEnterNode)
449 RetReg = (StgRetAddr)(EnterNodeCode);
450 LivenessReg = LIVENESS_R1;
451 JMP_(CommonUnderflow);
458 We've inlined CommonUnderFlow because setting RetReg would zap
459 the return vector that the node needs.
460 We pick up the RetReg from the STkO header instead.
464 STGFUN(StackUnderflowEnterNode)
468 RetReg = STKO_RETURN(StkOReg); /* pick up return code from the StkO hdr
469 needed because we come from UpdatePAP */
470 LivenessReg = LIVENESS_R1;
472 temp = STKO_LINK(StkOReg);
474 /*? fprintf(stderr,"Stk Underflow from: %lx to: %lx size abandoned: %d\n",StkOReg,temp,STKO_CLOSURE_CTS_SIZE(StkOReg)); ?*/
476 /* change the guy we are abandoning into something
477 that will not be "interesting" on the mutables
478 list. (As long as it is there, it will be
479 scavenged in GC, and we cannot guarantee that
480 it is still a "sane" StkO object). (And, besides,
481 why continue to keep it [and all it pts to] alive?)
484 FREEZE_MUT_HDR(StkOReg, ImMutArrayOfPtrs_info);
485 MUTUPLE_CLOSURE_SIZE(StkOReg) = MUTUPLE_VHS;
488 /* ToDo: Fetch the remote stack object here! */
489 RestoreStackStgRegs();
490 JMP_(EnterNodeCode); /* this will enter a PAP containing the old stkos
501 /* "MAX_VECTORED_RTN" elements (see GhcConstants.lh) */
512 #endif /* CONCURRENT */