2 % (c) The AQUA Project, Glasgow University, 1994
4 %************************************************************************
6 \section[StgThreads.lhc]{Threaded Threads Support}
8 %************************************************************************
10 Some of the threads support is done in threaded code. How's that for ambiguous
17 #define MAIN_REG_MAP /* STG world */
22 #include "Statistics.h"
28 %************************************************************************
30 \subsection[thread-objects]{Special objects for thread support}
32 %************************************************************************
34 TSO's are Thread State Objects, where the thread context is stored when the
35 thread is sleeping, and where we have slots for STG registers that don't
36 live in real machine registers.
46 fprintf(stderr, "TSO Entry: panic");
53 Stack objects are chunks of stack words allocated out of the heap and
54 linked together in a chain.
64 fprintf(stderr, "StkO Entry: panic");
74 STGFUN(StkO_static_entry)
78 fprintf(stderr, "StkO_static Entry: panic");
88 Blocking queues are essentially black holes with threads attached. These
89 are the threads to be awakened when the closure is updated.
93 EXTFUN(EnterNodeCode);
100 /* Before overwriting TSO_LINK */
101 STGCALL3(void,(),GranSimBlock,CurrentTSO,CurrentProc,Node);
104 TSO_LINK(CurrentTSO) = (P_) BQ_ENTRIES(Node);
105 BQ_ENTRIES(Node) = (W_) CurrentTSO;
107 LivenessReg = LIVENESS_R1;
109 TSO_PC1(CurrentTSO) = EnterNodeCode;
112 QP_Event1("GR", CurrentTSO);
115 if(RTSflags.ParFlags.granSimStats) {
116 /* Note that CURRENT_TIME may perform an unsafe call */
117 TIME now = CURRENT_TIME;
118 TSO_EXECTIME(CurrentTSO) += now - TSO_BLOCKEDAT(CurrentTSO);
119 TSO_BLOCKCOUNT(CurrentTSO)++;
120 TSO_QUEUE(CurrentTSO) = Q_BLOCKED;
121 TSO_BLOCKEDAT(CurrentTSO) = now;
122 DumpGranEvent(GR_BLOCK, CurrentTSO);
126 ReSchedule(SAME_THREAD); /* NB: GranSimBlock activated next thread */
137 Revertible black holes are needed in the parallel world, to handle
138 negative acknowledgements of messages containing updatable closures.
139 The idea is that when the original message is transmitted, the closure
140 is turned into a revertible black hole...an object which acts like a
141 black hole when local threads try to enter it, but which can be
142 reverted back to the original closure if necessary.
144 It's actually a lot like a blocking queue (BQ) entry, because
145 revertible black holes are initially set up with an empty blocking
148 The combination of GrAnSim with revertible black holes has not been
153 #if defined(PAR) || defined(GRAN)
160 /* Before overwriting TSO_LINK */
161 STGCALL3(void,(),GranSimBlock,CurrentTSO,CurrentProc,Node);
164 switch (INFO_TYPE(InfoPtr)) {
165 case INFO_SPEC_RBH_TYPE:
166 TSO_LINK(CurrentTSO) = (P_) SPEC_RBH_BQ(Node);
167 SPEC_RBH_BQ(Node) = (W_) CurrentTSO;
169 case INFO_GEN_RBH_TYPE:
170 TSO_LINK(CurrentTSO) = (P_) GEN_RBH_BQ(Node);
171 GEN_RBH_BQ(Node) = (W_) CurrentTSO;
175 fprintf(stderr, "Panic: non-{SPEC,GEN} RBH %#lx (IP %#lx)\n", Node, InfoPtr);
179 LivenessReg = LIVENESS_R1;
181 TSO_PC1(CurrentTSO) = EnterNodeCode;
184 QP_Event1("GR", CurrentTSO);
188 if(RTSflags.ParFlags.granSimStats) {
189 /* Note that CURRENT_TIME may perform an unsafe call */
190 TIME now = CURRENT_TIME;
191 TSO_EXECTIME(CurrentTSO) += now - TSO_BLOCKEDAT(CurrentTSO);
192 TSO_BLOCKCOUNT(CurrentTSO)++;
193 TSO_QUEUE(CurrentTSO) = Q_BLOCKED;
194 TSO_BLOCKEDAT(CurrentTSO) = now;
195 DumpGranEvent(GR_BLOCK, CurrentTSO);
199 ReSchedule(SAME_THREAD); /* NB: GranSimBlock activated next thread */
211 %************************************************************************
213 \subsection[thread-entrypoints]{Scheduler-Thread Interfaces}
215 %************************************************************************
217 The normal way of entering a thread is through \tr{resumeThread},
218 which short-circuits any indirections to the TSO and StkO, sets up STG
219 registers, and jumps to the saved PC.
226 while(IS_INDIRECTION(INFO_PTR(CurrentTSO))) {
227 CurrentTSO = (P_) IND_CLOSURE_PTR(CurrentTSO);
231 if (RTSflags.ParFlags.granSimStats) {
232 TSO_QUEUE(CurrentTSO) = Q_RUNNING;
233 /* Note that CURRENT_TIME may perform an unsafe call */
234 TSO_BLOCKEDAT(CurrentTSO) = CURRENT_TIME;
238 CurrentRegTable = TSO_INTERNAL_PTR(CurrentTSO);
240 while(IS_INDIRECTION(INFO_PTR(SAVE_StkO))) {
241 SAVE_StkO = (P_) IND_CLOSURE_PTR(SAVE_StkO);
245 SET_TASK_ACTIVITY(ST_REDUCING);
246 RESTORE_CCC(TSO_CCC(CurrentTSO));
247 JMP_(TSO_PC1(CurrentTSO));
252 Since we normally context switch during a heap check, it is possible
253 that we will return to a previously suspended thread without
254 sufficient heap for the thread to continue. However, we have cleverly
255 stashed away the heap requirements in @TSO_ARG1@ so that we can decide
256 whether or not to perform a garbage collection before resuming the
257 thread. The actual thread resumption address (either @EnterNodeCode@
258 or elsewhere) is stashed in @TSO_PC2@.
261 STGFUN(CheckHeapCode)
265 ALLOC_HEAP(TSO_ARG1(CurrentTSO)); /* ticky profiling */
266 if ((Hp += TSO_ARG1(CurrentTSO)) > HpLim) {
267 ReallyPerformThreadGC(TSO_ARG1(CurrentTSO), rtsFalse);
270 SET_TASK_ACTIVITY(ST_REDUCING);
271 RESUME_(TSO_PC2(CurrentTSO));
276 Often, a thread starts (or rather, resumes) by entering the closure
277 that Node points to. Here's a tiny code fragment to do just that.
278 The saved PC in the TSO can be set to @EnterNodeCode@ whenever we
279 want this to happen upon resumption of the thread.
282 STGFUN(EnterNodeCode)
286 InfoPtr=(D_)(INFO_PTR(Node));
287 JMP_(ENTRY_CODE(InfoPtr));
292 Then, there are the occasions when we just want to pick up where we
293 left off. We use \tr{RESUME_} here instead of \tr{JMP_}, because when
294 we return to a call site, the Alpha is going to try to load \tr{%gp}
295 from \tr{%ra} rather than \tr{%pv}, and \tr{JMP_} only sets \tr{%pv}.
296 Resuming to the start of a function is currently okay, but an
297 extremely bad practice. As we add support for more architectures, we
298 can expect the difference between \tr{RESUME_} and \tr{JMP_} to become
306 SET_TASK_ACTIVITY(ST_REDUCING);
307 RESUME_(TSO_PC2(CurrentTSO));
312 %************************************************************************
314 \subsection[stack-chunk-underflow-code]{Underflow code for stack chunks}
316 %************************************************************************
322 On a uniprocessor, stack underflow causes us no great headaches. The
323 old value of RetReg is squirreled away at the base of the top stack
324 object (the one that's about to get blown away). We just yank it
325 outta there and perform the same kind of return that got us here in
328 This simplicity is due to the fact that we never have to fetch a stack
333 #define DO_RETURN_TEMPLATE(label, cont) \
338 temp = STKO_LINK(StkOReg); \
339 RetReg = STKO_RETURN(StkOReg); \
341 RestoreStackStgRegs(); \
346 DO_RETURN_TEMPLATE(UnderflowDirectReturn, DIRECT(((P_)RetReg)))
347 DO_RETURN_TEMPLATE(UnderflowVect0, ((P_)RetReg)[RVREL(0)])
348 DO_RETURN_TEMPLATE(UnderflowVect1, ((P_)RetReg)[RVREL(1)])
349 DO_RETURN_TEMPLATE(UnderflowVect2, ((P_)RetReg)[RVREL(2)])
350 DO_RETURN_TEMPLATE(UnderflowVect3, ((P_)RetReg)[RVREL(3)])
351 DO_RETURN_TEMPLATE(UnderflowVect4, ((P_)RetReg)[RVREL(4)])
353 DO_RETURN_TEMPLATE(UnderflowVect5, ((P_)RetReg)[RVREL(5)])
354 DO_RETURN_TEMPLATE(UnderflowVect6, ((P_)RetReg)[RVREL(6)])
355 DO_RETURN_TEMPLATE(UnderflowVect7, ((P_)RetReg)[RVREL(7)])
357 DO_RETURN_TEMPLATE(StackUnderflowEnterNode, EnterNodeCode)
363 In the parallel world, we may have to fetch the StkO from a remote
364 location before we can load up the stack registers and perform the
365 return. Our convention is that we load RetReg up with the exact
366 continuation address (after a vector table lookup, if necessary),
367 and tail-call the code to fetch the stack object. (Of course, if
368 the stack object is already local, we then just jump to the
369 continuation address.)
373 STGFUN(CommonUnderflow)
378 temp = STKO_LINK(StkOReg);
380 /* fprintf(stderr,"Stk Underflow from: %lx to: %lx size abandoned: %d\n",StkOReg,temp,STKO_CLOSURE_CTS_SIZE(StkOReg)); */
382 /* change the guy we are abandoning into something
383 that will not be "interesting" on the mutables
384 list. (As long as it is there, it will be
385 scavenged in GC, and we cannot guarantee that
386 it is still a "sane" StkO object). (And, besides,
387 why continue to keep it [and all it pts to] alive?)
390 FREEZE_MUT_HDR(StkOReg, ImMutArrayOfPtrs_info);
391 MUTUPLE_CLOSURE_SIZE(StkOReg) = MUTUPLE_VHS;
394 /* ToDo: Fetch the remote stack object here! */
395 RestoreStackStgRegs();
400 #define DO_RETURN_TEMPLATE(label, cont) \
404 RetReg = STKO_RETURN(StkOReg); \
405 RetReg = (StgRetAddr)(cont); \
406 LivenessReg = INFO_LIVENESS(InfoPtr); \
407 JMP_(CommonUnderflow); \
411 DO_RETURN_TEMPLATE(UnderflowDirectReturn, DIRECT(((P_)RetReg)))
412 DO_RETURN_TEMPLATE(UnderflowVect0, ((P_)RetReg)[RVREL(0)])
413 DO_RETURN_TEMPLATE(UnderflowVect1, ((P_)RetReg)[RVREL(1)])
414 DO_RETURN_TEMPLATE(UnderflowVect2, ((P_)RetReg)[RVREL(2)])
415 DO_RETURN_TEMPLATE(UnderflowVect3, ((P_)RetReg)[RVREL(3)])
416 DO_RETURN_TEMPLATE(UnderflowVect4, ((P_)RetReg)[RVREL(4)])
417 DO_RETURN_TEMPLATE(UnderflowVect5, ((P_)RetReg)[RVREL(5)])
418 DO_RETURN_TEMPLATE(UnderflowVect6, ((P_)RetReg)[RVREL(6)])
419 DO_RETURN_TEMPLATE(UnderflowVect7, ((P_)RetReg)[RVREL(7)])
421 STGFUN(PrimUnderflow)
424 RetReg = STKO_RETURN(StkOReg);
425 RetReg = (StgRetAddr)DIRECT(((P_)RetReg));
426 LivenessReg = NO_LIVENESS;
427 JMP_(CommonUnderflow);
432 * This one is similar, but isn't part of the return vector. It's only used
433 * when we fall off of a stack chunk and want to enter Node rather than
434 * returning through RetReg. (This occurs during UpdatePAP, when the updatee
435 * isn't on the current stack chunk.) It can't be done with the template,
436 * because R2 is dead, and R1 points to a PAP. Only R1 is live.
439 STGFUN(StackUnderflowEnterNode)
442 RetReg = (StgRetAddr)(EnterNodeCode);
443 LivenessReg = LIVENESS_R1;
444 JMP_(CommonUnderflow);
452 /* "MAX_VECTORED_RTN" elements (see GhcConstants.lh) */
467 IFN_(seqDirectReturn) {
471 RetReg = (StgRetAddr) SpB[BREL(0)];
472 cont = (void *) SpB[BREL(1)];
473 /* SpB += BREL(2); */
479 NB: For direct returns to work properly, the name of the routine must be
480 the same as the name of the vector table with vtbl_ removed and DirectReturn
481 appended. This is all the mangler understands.
486 (W_) seqDirectReturn,
487 (W_) seqDirectReturn,
488 (W_) seqDirectReturn,
489 (W_) seqDirectReturn,
490 (W_) seqDirectReturn,
491 (W_) seqDirectReturn,
492 (W_) seqDirectReturn,
496 #endif /* CONCURRENT */