2 % (c) The GRASP Project, Glasgow University, 1994-1995
4 \section[Thread]{Thread support macros used in \tr{.hc} files}
13 #define GRAN_ALLOC_HEAP(n,liveness) /* nothing */
14 #define GRAN_UNALLOC_HEAP(n,liveness) /* nothing */
15 #define GRAN_FETCH() /* nothing */
16 #define GRAN_FETCH_AND_RESCHEDULE(liveness) /* nothing */
17 #define GRAN_RESCHEDULE(liveness, reenter) /* nothing */
18 #define GRAN_EXEC(arith,branch,loads,stores,floats) /* nothing */
19 #define GRAN_SPARK() /* nothing */
26 #define OR_CONTEXT_SWITCH
30 #define DEFAULT_MAX_THREADS (32)
32 extern I_ do_gr_sim; /* Are we simulating granularity? */
35 extern I_ do_qp_prof; /* Are we quasi-parallel profiling? */
41 #define DO_QP_PROF do_qp_prof
46 extern I_ context_switch; /* Flag set by signal handler */
47 extern I_ contextSwitchTime;
48 #if defined(USE_COST_CENTRES) || defined(GUM)
49 extern I_ contextSwitchTicks;
52 #define CS_MAX_FREQUENCY 100 /* context switches per second */
53 #define CS_MIN_MILLISECS (1000/CS_MAX_FREQUENCY) /* milliseconds per slice */
55 #ifdef __STG_GCC_REGS__
56 #define OR_CONTEXT_SWITCH || context_switch
58 #define OR_CONTEXT_SWITCH /* in miniInterpret */
61 #define REQUIRED_POOL 0
62 #define ADVISORY_POOL 1
67 extern PP_ PendingSparksBase[SPARK_POOLS], PendingSparksLim[SPARK_POOLS];
68 extern PP_ PendingSparksHd[SPARK_POOLS], PendingSparksTl[SPARK_POOLS];
70 extern I_ SparkLimit[SPARK_POOLS];
72 extern P_ RunnableThreadsHd, RunnableThreadsTl;
73 extern P_ WaitingThreadsHd, WaitingThreadsTl;
75 #define DEFAULT_MAX_LOCAL_SPARKS 100
77 extern I_ MaxLocalSparks;
79 IF_RTS(extern void AwaitEvent(I_);)
83 extern sparkq PendingSparksHd[][SPARK_POOLS], PendingSparksTl[][SPARK_POOLS];
84 extern P_ RunnableThreadsHd[], RunnableThreadsTl[],
85 WaitThreadsHd[], WaitThreadsTl[];
87 #define SparkQueueHd PendingSparksHd[CurrentProc][ADVISORY_POOL]
88 #define SparkQueueTl PendingSparksTl[CurrentProc][ADVISORY_POOL]
89 #define ThreadQueueHd RunnableThreadsHd[CurrentProc]
90 #define ThreadQueueTl RunnableThreadsTl[CurrentProc]
91 #define WaitingThreadsHd WaitThreadsHd[CurrentProc]
92 #define WaitingThreadsTl WaitThreadsTl[CurrentProc]
96 IF_RTS(extern void PruneSparks(STG_NO_ARGS);)
100 /* Codes that can be used as params for ReSchedule */
101 /* I distinguish them from the values 0/1 in the -UGRAN setup for security */
103 #define FIND_THREAD 10
104 #define SAME_THREAD 11
105 #define NEW_THREAD SAME_THREAD
106 #define CHANGE_THREAD 13
108 #define MAX_PROC (BITS_IN(W_)) /* Maximum number of PEs that can be simulated */
111 extern W_ IdleProcs, Idlers;
113 extern unsigned CurrentProc;
114 extern W_ CurrentTime[];
115 extern W_ SparksAvail, SurplusThreads;
117 /* Processor numbers to bitmasks and vice-versa */
120 #define PE_NUMBER(n) (1l << (long)n)
121 #define ThisPE PE_NUMBER(CurrentProc)
122 #define MainPE PE_NUMBER(MainProc)
124 #define IS_LOCAL_TO(ga,proc) ((1l << (long) proc) & ga)
126 /* These constants should eventually be program parameters */
128 /* Communication Cost Model (EDS-like), max_proc > 2. */
130 #define LATENCY 1000 /* Latency for single packet */
131 #define ADDITIONAL_LATENCY 100 /* Latency for additional packets */
132 #define BASICBLOCKTIME 10
133 #define FETCHTIME (LATENCY*2+MSGUNPACKTIME)
134 #define LOCALUNBLOCKTIME 10
135 #define GLOBALUNBLOCKTIME (LATENCY+MSGUNPACKTIME)
137 extern W_ gran_latency, gran_additional_latency, gran_fetchtime,
138 gran_lunblocktime, gran_gunblocktime;
140 #define MSGPACKTIME 0 /* Cost of creating a packet */
141 #define MSGUNPACKTIME 0 /* Cost of receiving a packet */
143 extern W_ gran_mpacktime, gran_mtidytime, gran_munpacktime;
145 /* Thread cost model */
146 #define THREADCREATETIME (25+THREADSCHEDULETIME)
147 #define THREADQUEUETIME 12 /* Cost of adding a thread to the running/runnable queue */
148 #define THREADDESCHEDULETIME 75 /* Cost of descheduling a thread */
149 #define THREADSCHEDULETIME 75 /* Cost of scheduling a thread */
150 #define THREADCONTEXTSWITCHTIME (THREADDESCHEDULETIME+THREADSCHEDULETIME)
152 extern W_ gran_threadcreatetime, gran_threadqueuetime,
153 gran_threadscheduletime, gran_threaddescheduletime,
154 gran_threadcontextswitchtime;
156 /* Instruction Cost model (SPARC, including cache misses) */
158 #define BRANCH_COST 2
161 #define FLOAT_COST 1 /* ? */
163 extern W_ gran_arith_cost, gran_branch_cost,
164 gran_load_cost, gran_store_cost, gran_float_cost,
167 /* Miscellaneous Parameters */
168 extern I_ DoFairSchedule;
169 extern I_ DoReScheduleOnFetch;
170 extern I_ SimplifiedFetch;
171 extern I_ DoStealThreadsFirst;
172 extern I_ DoAlwaysCreateThreads;
173 extern I_ DoThreadMigration;
174 extern I_ DoGUMMFetching;
175 extern I_ FetchStrategy;
176 extern I_ PreferSparksOfLocalNodes;
177 /* These come from debug options -bD? */
179 extern I_ PrintFetchMisses, fetch_misses;
181 extern I_ nUPDs, nUPDs_old, nUPDs_new, nUPDs_BQ, nPAPs, BQ_lens;
184 extern I_ do_gr_binary;
185 extern I_ do_gr_profile;
186 extern I_ no_gr_profile;
187 extern I_ do_sp_profile;
189 extern I_ NeedToReSchedule;
191 extern void GranSimAllocate PROTO((I_, P_, W_));
192 extern void GranSimUnAllocate PROTO((I_, P_, W_));
193 extern I_ GranSimFetch PROTO((P_));
194 extern void GranSimExec PROTO((W_,W_,W_,W_,W_));
195 extern void GranSimSpark PROTO((W_,P_));
196 extern void GranSimBlock PROTO(());
197 extern void PerformReschedule PROTO((W_, W_));
199 #if 0 /* 'ngo Dochmey */
201 #define GRAN_ALLOC_HEAP(n,liveness) STGCALL3(void,(),GranSimAllocate,n,0,0)
202 #define GRAN_UNALLOC_HEAP(n,liveness) STGCALL3(void,(),GranSimUnallocate,n,0,0)
204 #define GRAN_FETCH() STGCALL1(I_,(),GranSimFetch,Node)
206 #define GRAN_FETCH_AND_RESCHEDULE(liveness_mask) \
207 do { if(liveness_mask&LIVENESS_R1) \
208 STGCALL1(I_,(),GranSimFetch,Node); \
209 GRAN_RESCHEDULE(liveness_mask,1); \
212 #define GRAN_RESCHEDULE(liveness_mask,reenter) \
213 STGCALL2_GC(void,(), \
214 PerformReschedule,liveness_mask,reenter)
216 #define THREAD_CONTEXT_SWITCH(liveness_mask,reenter) \
218 if (context_switch /* OR_INTERVAL_EXPIRED */) { \
219 GRAN_RESCHEDULE(liveness_mask,reenter); \
222 #define GRAN_EXEC(arith,branch,load,store,floats) \
223 STGCALL5(void,(),GranSimExec,arith,branch,load,store,floats)
226 #else /* 1 */ /* chu' Dochmey */
228 #define GRAN_ALLOC_HEAP(n,liveness) \
230 GranSimAllocate(n,0,0); \
233 #define GRAN_UNALLOC_HEAP(n,liveness) \
235 GranSimUnallocate(n,0,0); \
238 #define GRAN_FETCH() \
240 GranSimFetch(Node); \
243 #define GRAN_FETCH_AND_RESCHEDULE(liveness_mask) \
244 do { if(liveness_mask&LIVENESS_R1) \
246 GranSimFetch(Node); \
247 RestoreAllStgRegs(); \
248 GRAN_RESCHEDULE(liveness_mask,1); \
251 #define GRAN_RESCHEDULE(liveness_mask,reenter) \
252 PerformReschedule_wrapper(liveness_mask,reenter)
254 #define THREAD_CONTEXT_SWITCH(liveness_mask,reenter) \
256 if (context_switch /* OR_INTERVAL_EXPIRED */) { \
257 GRAN_RESCHEDULE(liveness_mask,reenter); \
260 #define GRAN_EXEC(arith,branch,load,store,floats) \
262 GranSimExec(arith,branch,load,store,floats); \
268 #define ADD_TO_SPARK_QUEUE(spark) \
269 SPARK_NEXT(spark) = NULL; \
270 SPARK_PREV(spark) = PendingSparksTl[CurrentProc][ADVISORY_POOL]; \
271 if (PendingSparksHd[CurrentProc][ADVISORY_POOL] == NULL) \
272 PendingSparksHd[CurrentProc][ADVISORY_POOL] = spark; \
274 SPARK_NEXT(PendingSparksTl[CurrentProc][ADVISORY_POOL]) = spark; \
275 PendingSparksTl[CurrentProc][ADVISORY_POOL] = spark; \
279 extern P_ CurrentTSO; /* thread state object now in use */
281 extern P_ AvailableStack;
282 extern P_ AvailableTSO;
286 void ScheduleThreads PROTO((P_ topClosure));
288 void ReSchedule PROTO((int what_next)) STG_NORETURN;
290 void ReSchedule PROTO((int again)) STG_NORETURN;
292 void EndThread(STG_NO_ARGS) STG_NORETURN;
294 void QP_Event0 PROTO((I_, P_));
295 void QP_Event1 PROTO((char *, P_));
296 void QP_Event2 PROTO((char *, P_, P_));
297 long qp_elapsed_time(STG_NO_ARGS);
300 %************************************************************************
302 \subsection[thread-heap-objs]{Special threads-only heap objects (`closures')}
304 %************************************************************************
306 %************************************************************************
308 \subsubsection[TSO-closures]{@TSO@ (thread state object) heap objects}
310 %************************************************************************
312 We now enter the realm of the Deeply Magical.
314 Reduction threads come and go, resume and suspend, etc., in the threaded
315 world. Obviously, there must be a place to squirrel away state information
316 when a thread is suspended. Hence these {\em thread state objects} (TSOs).
318 Rather than manage TSOs' alloc/dealloc, etc., in some {\em ad hoc} way, we
319 instead alloc/dealloc/etc them in the heap; then we can use all the
320 standard garbage-collection/fetching/flushing/etc machinery on them.
321 So that's why TSOs are ``heap objects,'' albeit very special ones.
323 We use all the standard heap-object/closure jargon... (e.g.,
324 @SET_TSO_HDR@, fixed headers, variable-hdr size, ...).
326 A TSO is a fixed-size object with (post-header) words arranged like
327 the main register table, and enough slop so that the register table
328 can be properly aligned. The last header word of the TSO is
329 a pointer to the (internal) start of the interesting data.
331 Note that the heap and stack pointers in the TSO are only valid while
332 the thread is executing, and only if the corresponding values are not
333 stored in machine registers (i.e. the TSO becomes the backing register
334 table for those values).
337 #define TSO_INFO_WORDS 10
339 #ifdef DO_REDN_COUNTING
340 #define TSO_REDN_WORDS 2
342 #define TSO_REDN_WORDS 0
345 #if defined(GRAN) || defined(PAR)
346 #define TSO_GRAN_WORDS 15
348 #define TSO_GRAN_WORDS 0
352 (GC_MUT_RESERVED_WORDS + TSO_INFO_WORDS + TSO_REDN_WORDS + TSO_GRAN_WORDS)
354 #define TSO_HS (FIXED_HS + TSO_VHS)
355 #define TSO_CTS_SIZE (BYTES_TO_STGWORDS(sizeof(STGRegisterTable) + sizeof(StgDouble)))
357 #define TSO_PTRS (MAX_VANILLA_REG + 2)
359 /* std start-filling-in macro: */
360 #define SET_TSO_HDR(closure,infolbl,cc) \
361 { SET_FIXED_HDR(closure,infolbl,cc); \
362 SET_MUT_RESERVED_WORDS(closure); \
365 #define TSO_INFO_START (FIXED_HS + GC_MUT_RESERVED_WORDS)
366 #define TSO_LINK_LOCN (TSO_INFO_START + 0)
367 #define TSO_CCC_LOCN (TSO_INFO_START + 1)
368 #define TSO_NAME_LOCN (TSO_INFO_START + 2)
369 #define TSO_ID_LOCN (TSO_INFO_START + 3)
370 #define TSO_TYPE_LOCN (TSO_INFO_START + 4)
371 #define TSO_PC1_LOCN (TSO_INFO_START + 5)
372 #define TSO_PC2_LOCN (TSO_INFO_START + 6)
373 #define TSO_ARG1_LOCN (TSO_INFO_START + 7)
374 #define TSO_EVENT_LOCN (TSO_INFO_START + 8)
375 #define TSO_SWITCH_LOCN (TSO_INFO_START + 9)
377 #define TSO_REDN_START (TSO_INFO_START + TSO_INFO_WORDS)
378 #ifdef DO_REDN_COUNTING
379 #define TSO_AHWM_LOCN (TSO_REDN_START + 0)
380 #define TSO_BHWM_LOCN (TSO_REDN_START + 1)
383 #define TSO_GRAN_START (TSO_REDN_START + TSO_REDN_WORDS)
384 #if defined(GRAN) || defined(PAR)
385 #define TSO_LOCKED_LOCN (TSO_GRAN_START + 0)
386 #define TSO_SPARKNAME_LOCN (TSO_GRAN_START + 1)
387 #define TSO_STARTEDAT_LOCN (TSO_GRAN_START + 2)
388 #define TSO_EXPORTED_LOCN (TSO_GRAN_START + 3)
389 #define TSO_BASICBLOCKS_LOCN (TSO_GRAN_START + 4)
390 #define TSO_ALLOCS_LOCN (TSO_GRAN_START + 5)
391 #define TSO_EXECTIME_LOCN (TSO_GRAN_START + 6)
392 #define TSO_FETCHTIME_LOCN (TSO_GRAN_START + 7)
393 #define TSO_FETCHCOUNT_LOCN (TSO_GRAN_START + 8)
394 #define TSO_BLOCKTIME_LOCN (TSO_GRAN_START + 9)
395 #define TSO_BLOCKCOUNT_LOCN (TSO_GRAN_START + 10)
396 #define TSO_BLOCKEDAT_LOCN (TSO_GRAN_START + 11)
397 #define TSO_GLOBALSPARKS_LOCN (TSO_GRAN_START + 12)
398 #define TSO_LOCALSPARKS_LOCN (TSO_GRAN_START + 13)
399 #define TSO_QUEUE_LOCN (TSO_GRAN_START + 14)
402 #define TSO_LINK(closure) (((PP_)closure)[TSO_LINK_LOCN])
403 #define TSO_CCC(closure) (((CostCentre *)closure)[TSO_CCC_LOCN])
404 #define TSO_NAME(closure) (((PP_)closure)[TSO_NAME_LOCN])
405 #define TSO_ID(closure) (((P_)closure)[TSO_ID_LOCN])
406 #define TSO_TYPE(closure) (((P_)closure)[TSO_TYPE_LOCN])
407 #define TSO_PC1(closure) (((FP_)closure)[TSO_PC1_LOCN])
408 #define TSO_PC2(closure) (((FP_)closure)[TSO_PC2_LOCN])
409 #define TSO_ARG1(closure) (((P_)closure)[TSO_ARG1_LOCN])
410 #define TSO_EVENT(closure) (((P_)closure)[TSO_EVENT_LOCN])
411 #define TSO_SWITCH(closure) (((FP_)closure)[TSO_SWITCH_LOCN])
413 #define TSO_AHWM(closure) (((I_ *)closure)[TSO_AHWM_LOCN])
414 #define TSO_BHWM(closure) (((I_ *)closure)[TSO_BHWM_LOCN])
416 #define TSO_LOCKED(closure) (((P_)closure)[TSO_LOCKED_LOCN])
417 #define TSO_SPARKNAME(closure) (((P_)closure)[TSO_SPARKNAME_LOCN])
418 #define TSO_STARTEDAT(closure) (((P_)closure)[TSO_STARTEDAT_LOCN])
419 #define TSO_EXPORTED(closure) (((P_)closure)[TSO_EXPORTED_LOCN])
420 #define TSO_BASICBLOCKS(closure) (((P_)closure)[TSO_BASICBLOCKS_LOCN])
421 #define TSO_ALLOCS(closure) (((P_)closure)[TSO_ALLOCS_LOCN])
422 #define TSO_EXECTIME(closure) (((P_)closure)[TSO_EXECTIME_LOCN])
423 #define TSO_FETCHTIME(closure) (((P_)closure)[TSO_FETCHTIME_LOCN])
424 #define TSO_FETCHCOUNT(closure) (((P_)closure)[TSO_FETCHCOUNT_LOCN])
425 #define TSO_BLOCKTIME(closure) (((P_)closure)[TSO_BLOCKTIME_LOCN])
426 #define TSO_BLOCKCOUNT(closure) (((P_)closure)[TSO_BLOCKCOUNT_LOCN])
427 #define TSO_BLOCKEDAT(closure) (((P_)closure)[TSO_BLOCKEDAT_LOCN])
428 #define TSO_GLOBALSPARKS(closure) (((P_)closure)[TSO_GLOBALSPARKS_LOCN])
429 #define TSO_LOCALSPARKS(closure) (((P_)closure)[TSO_LOCALSPARKS_LOCN])
430 #define TSO_QUEUE(closure) (((P_)closure)[TSO_QUEUE_LOCN])
432 #define TSO_INTERNAL_PTR(closure) \
433 ((STGRegisterTable *)(((W_)(((P_)closure) \
434 + TSO_HS + BYTES_TO_STGWORDS(sizeof(StgDouble)))) & ~(sizeof(StgDouble) - 1)))
436 #if defined(CONCURRENT) && defined(GRAN) /* HWL */
437 /* Per definitionem a tso is really awake if it has met a first */
438 /* GRAN_RESCHEDULE macro after having been rescheduled. */
439 #define REALLY_AWAKE(tso) (TSO_SWITCH(tso) != TSO_PC2(tso))
440 #define SET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = NULL
441 #define RESET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = TSO_PC2(tso)
446 The types of threads (TSO_TYPE):
448 #define T_MAIN 0 /* Must be executed locally */
449 #define T_REQUIRED 1 /* A required thread -- may be exported */
450 #define T_ADVISORY 2 /* An advisory thread -- may be exported */
451 #define T_FAIL 3 /* A failure thread -- may be exported */
454 The total space required to start a new thread (See NewThread in
457 #define THREAD_SPACE_REQUIRED (TSO_HS + TSO_CTS_SIZE + STKO_HS + StkOChunkSize)
460 Here are the various queues for GrAnSim-type events.
462 #define Q_RUNNING 'G'
463 #define Q_RUNNABLE 'A'
464 #define Q_BLOCKED 'R'
465 #define Q_FETCHING 'Y'
468 %************************************************************************
470 \subsubsection[spark-closures]{Pending Sparks}
472 %************************************************************************
477 P_ FindLocalSpark PROTO((rtsBool forexport));
479 void DisposeSpark PROTO((P_ spark));
480 rtsBool Spark PROTO((P_ closure, rtsBool required));
484 #ifdef GRAN /* For GrAnSim sparks are currently mallocated -- HWL */
486 void DisposeSpark PROTO((sparkq spark));
488 # define MAX_SPARKS 2000 /* For GC Roots Purposes */
489 # if defined(GRAN_TNG)
490 extern sparkq NewSpark PROTO((P_,I_,I_,I_));
491 # else /* !GRAN_TNG */
492 extern sparkq NewSpark PROTO((P_,I_,I_));
493 # endif /* GRAN_TNG */
495 # define SPARK_PREV(spark) (spark->prev)
496 # define SPARK_NEXT(spark) (sparkq)(spark->next)
497 # define SPARK_NODE(spark) (P_)(spark->node)
498 # define SPARK_NAME(spark) (spark->name)
499 # if defined(GRAN_TNG)
500 # define SPARK_GRAN_INFO(spark) (spark->gran_info)
501 # endif /* GRAN_TNG */
502 # define SPARK_GLOBAL(spark) (spark->global)
503 # define SPARK_EXPORTED(spark) (SPARK_GLOBAL(spark) > 1)
508 %************************************************************************
510 \subsubsection[STKO-closures]{@STKO@ (stack object) heap objects}
512 %************************************************************************
514 We linger in the Deeply Magical...
516 Each reduction thread has to have its own stack space. As there may
517 be many such threads, and as any given one may need quite a big stack,
518 a naive give-'em-a-big-stack-and-let-'em-run approach will cost a {\em
521 Our approach is to give a thread a small stack space, and then link
522 on/off extra ``chunks'' as the need arises. Again, this is a
523 storage-management problem, and, yet again, we choose to graft the
524 whole business onto the existing heap-management machinery. So stack
525 objects will live in the heap, be garbage collected, etc., etc..
527 So, as with TSOs, we use the standard heap-object (`closure') jargon.
529 Here is the picture of how a stack object is arranged:
531 <----- var hdr --------> v ---- FirstPtr --- v
532 ---------------------------------------------------------------------
533 ...|| SpB | SuB | SpA | SuA || B stk -> ... | ... <- A stk || PREV ||
534 ---------------------------------------------------------------------
538 We keep the following state-of-stack info in the {\em variable-header}
541 SpB, SuB & their {\em offsets} from 1st non-hdr word (marked \tr{XX} above)\\
542 SpA, SuA & their {\em offsets} from the next-to-last word (marked \tr{YY} above)\\
543 ctr field??? & (GC\_GEN\_WHATNOT may serve instead)\\
546 The stack-pointer offsets are from the points indicated and are {\em
547 non-negative} for pointers to this chunk of the stack space.
549 At the {\em end} of the stack object, we have a {\em link} to the
550 previous part of the overall stack. The link is \tr{NULL} if this is
551 the bottom of the overall stack.
553 After the header, we have @STKO_CHUNK_SIZE-1@ words of actual stack
554 stuff. The B-stack part begins at the lowest address and grows
555 upwards; the A-stack parts begins at the highest address and grows
558 From a storage-manager point of view, these are {\em very special}
562 #ifdef DO_REDN_COUNTING
563 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 9)
565 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 7)
567 #define STKO_HS (FIXED_HS + STKO_VHS)
569 #define DEFAULT_STKO_CHUNK_SIZE 1024
571 #define MIN_STKO_CHUNK_SIZE 16 /* Rather arbitrary */
573 extern I_ StkOChunkSize;
575 #define STKO_CLOSURE_SIZE(closure) STKO_SIZE(closure)
577 #define STKO_CLOSURE_CTS_SIZE(closure) (STKO_CLOSURE_SIZE(closure) - STKO_VHS)
578 #define STKO_CLOSURE_PTR(closure, no) (*STKO_CLOSURE_ADDR(closure, no))
580 #define STKO_CLOSURE_ADDR(s, n) (((P_)(s)) + STKO_HS + (n) - 1)
581 #define STKO_CLOSURE_OFFSET(s, p) (((P_)(p) - (P_)(s)) - STKO_HS + 1)
583 /* std start-filling-in macro: */
584 #define SET_STKO_HDR(s,infolbl,cc) \
585 { SET_FIXED_HDR(s,infolbl,cc); \
586 SET_MUT_RESERVED_WORDS(s); \
587 /* the other header words filled in some other way */ }
589 /* now we have the STKO-specific stuff
591 Note: The S[pu][AB] registers are put in this order so that
592 they will appear in monotonically increasing order in
593 the StkO...just as an aid to the poor wee soul who has
597 #ifdef DO_REDN_COUNTING
598 #define STKO_ADEP_LOCN (STKO_HS - 9)
599 #define STKO_BDEP_LOCN (STKO_HS - 8)
601 #define STKO_SIZE_LOCN (STKO_HS - 7)
602 #define STKO_RETURN_LOCN (STKO_HS - 6)
603 #define STKO_LINK_LOCN (STKO_HS - 5)
604 #define STKO_SuB_LOCN (STKO_HS - 4)
605 #define STKO_SpB_LOCN (STKO_HS - 3)
606 #define STKO_SpA_LOCN (STKO_HS - 2)
607 #define STKO_SuA_LOCN (STKO_HS - 1)
609 #define STKO_ADEP(s) (((I_ *)(s))[STKO_ADEP_LOCN])
610 #define STKO_BDEP(s) (((I_ *)(s))[STKO_BDEP_LOCN])
611 #define STKO_SIZE(s) (((P_)(s))[STKO_SIZE_LOCN])
612 #define STKO_RETURN(s) (((StgRetAddr *)(s))[STKO_RETURN_LOCN])
613 #define STKO_LINK(s) (((PP_)(s))[STKO_LINK_LOCN])
614 #define STKO_SpB(s) (((PP_)(s))[STKO_SpB_LOCN])
615 #define STKO_SuB(s) (((PP_)(s))[STKO_SuB_LOCN])
616 #define STKO_SpA(s) (((PP_ *)(s))[STKO_SpA_LOCN])
617 #define STKO_SuA(s) (((PP_ *)(s))[STKO_SuA_LOCN])
619 #define STKO_BSTK_OFFSET(closure) (STKO_HS)
620 #define STKO_ASTK_OFFSET(closure) (FIXED_HS + STKO_CLOSURE_SIZE(closure) - 1)
621 #define STKO_BSTK_BOT(closure) (((P_)(closure)) + STKO_BSTK_OFFSET(closure))
622 #define STKO_ASTK_BOT(closure) (((PP_)(closure)) + STKO_ASTK_OFFSET(closure))
625 These are offsets into the stack object proper (starting at 1 for
626 the first word after the header).
629 #define STKO_SpA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpA(s)))
630 #define STKO_SuA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuA(s)))
631 #define STKO_SpB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpB(s)))
632 #define STKO_SuB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuB(s)))
635 %************************************************************************
637 \subsubsection[BQ-closures]{@BQ@ (blocking queue) heap objects (`closures')}
639 %************************************************************************
641 Blocking queues are built in the parallel system when a local thread
642 enters a non-global node. They are similar to black holes, except
643 that when they are updated, the blocking queue must be enlivened
644 too. A blocking queue closure thus has the following structure.
649 \begin{tabular}{||l|l|l|l||}\hline
650 GA & Info ptr. & $\ldots$ & Blocking Queue \\ \hline
656 The blocking queue itself is a pointer to a list of blocking queue entries.
657 The list is formed from TSO closures. For the generational garbage collectors,
658 the BQ must have the same structure as an IND, with the blocking queue hanging
659 off of the indirection pointer. (This has to do with treating the BQ as an old
660 root if it gets updated while in the old generation.)
663 #define BQ_VHS IND_VHS
666 #define BQ_CLOSURE_SIZE(closure) IND_CLOSURE_SIZE(closure)
667 #define BQ_CLOSURE_NoPTRS(closure) IND_CLOSURE_NoPTRS(closure)
668 #define BQ_CLOSURE_NoNONPTRS(closure) IND_CLOSURE_NoNONPTRS(closure)
669 #define BQ_CLOSURE_PTR(closure, no) (((P_)(closure))[BQ_HS + (no) - 1])
672 Blocking queues store a pointer to a list of blocking queue entries.
675 #define BQ_ENTRIES(closure) IND_CLOSURE_PTR(closure)
676 #define BQ_LINK(closure) IND_CLOSURE_LINK(closure)
679 We have only one kind of blocking queue closure, so we test the info pointer
680 for a specific value rather than looking in the info table for a special bit.
685 #define IS_BQ_CLOSURE(closure) (INFO_PTR(closure) == (W_) BQ_info)
688 %************************************************************************
690 \subsubsection[TSO_ITBL]{@TSO_ITBL@}
692 %************************************************************************
694 The special info table used for thread state objects (TSOs).
699 CAT_DECLARE(TSO,INTERNAL_KIND,"TSO","<TSO>") \
701 EXTDATA_RO(MK_REP_LBL(TSO,,)); \
702 const W_ TSO_info[] = { \
704 ,(W_) INFO_OTHER_TAG \
705 ,(W_) MK_REP_REF(TSO,,) \
706 INCLUDE_PROFILING_INFO(TSO) \
710 const W_ MK_REP_LBL(TSO,,)[] = { \
711 INCLUDE_TYPE_INFO(TSO) \
712 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
714 INCLUDE_COPYING_INFO(_Evacuate_TSO,_Scavenge_TSO) \
715 INCLUDE_COMPACTING_INFO(_ScanLink_TSO,_PRStart_TSO,_ScanMove_TSO,_PRIn_TSO) \
720 %************************************************************************
722 \subsubsection[STKO_ITBL]{@STKO_ITBL@}
724 %************************************************************************
726 The special info table used for stack objects (STKOs).
729 #define STKO_ITBL() \
730 CAT_DECLARE(StkO,INTERNAL_KIND,"STKO","<STKO>") \
731 EXTFUN(StkO_entry); \
732 EXTDATA_RO(MK_REP_LBL(StkO,,)); \
733 const W_ StkO_info[] = { \
735 ,(W_) INFO_OTHER_TAG \
736 ,(W_) MK_REP_REF(StkO,,) \
737 INCLUDE_PROFILING_INFO(StkO) \
740 #define STKO_RTBL() \
741 const W_ MK_REP_LBL(StkO,,)[] = { \
742 INCLUDE_TYPE_INFO(STKO_DYNAMIC) \
743 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
745 INCLUDE_COPYING_INFO(_Evacuate_StkO,_Scavenge_StkO) \
746 INCLUDE_COMPACTING_INFO(_ScanLink_StkO,_PRStart_StkO,_ScanMove_StkO,_PRIn_StkO) \
749 #define STKO_STATIC_ITBL() \
750 CAT_DECLARE(StkO_static,INTERNAL_KIND,"STKO","<STKO>") \
751 EXTFUN(StkO_static_entry); \
752 EXTDATA_RO(MK_REP_LBL(StkO_static,,)); \
753 const W_ StkO_static_info[] = { \
754 (W_) StkO_static_entry \
755 ,(W_) INFO_OTHER_TAG \
756 ,(W_) MK_REP_REF(StkO_static,,) \
757 INCLUDE_PROFILING_INFO(StkO_static) \
760 #define STKO_STATIC_RTBL() \
761 const W_ MK_REP_LBL(StkO_static,,)[] = { \
762 INCLUDE_TYPE_INFO(STKO_STATIC) \
763 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
765 INCLUDE_COPYING_INFO(_Evacuate_Static,_Scavenge_Static) \
766 INCLUDE_COMPACTING_INFO(_Dummy_Static_entry,_PRStart_Static, \
767 _Dummy_Static_entry,_PRIn_Error) \
772 %************************************************************************
774 \subsubsection[BQ_ITBL]{@BQ_ITBL@}
776 %************************************************************************
778 Special info-table for local blocking queues.
782 CAT_DECLARE(BQ,INTERNAL_KIND,"BQ","<BQ>") \
784 EXTDATA_RO(MK_REP_LBL(BQ,,)); \
785 const W_ BQ_info[] = { \
787 ,(W_) INFO_OTHER_TAG \
788 ,(W_) MK_REP_REF(BQ,,) \
789 INCLUDE_PROFILING_INFO(BQ) \
793 const W_ MK_REP_LBL(BQ,,)[] = { \
794 INCLUDE_TYPE_INFO(BQ) \
795 INCLUDE_SIZE_INFO(MIN_UPD_SIZE,INFO_UNUSED) \
797 INCLUDE_COPYING_INFO(_Evacuate_BQ,_Scavenge_BQ) \
798 SPEC_COMPACTING_INFO(_ScanLink_BQ,_PRStart_BQ,_ScanMove_BQ,_PRIn_BQ) \
804 #endif /* CONCURRENT */
807 Even the sequential system gets to play with SynchVars, though it really
808 doesn't make too much sense (if any). Okay; maybe it makes some sense.
809 (See the 1.3 I/O stuff.)
811 %************************************************************************
813 \subsubsection[SVar-closures]{@SynchVar@ heap objects}
815 %************************************************************************
818 #define SVAR_HS (MUTUPLE_HS)
820 #define SVAR_CLOSURE_SIZE(closure) 3
822 #define SET_SVAR_HDR(closure,infolbl,cc) \
823 SET_MUTUPLE_HDR(closure,infolbl,cc,MUTUPLE_VHS+3,3)
825 /* The value must come first, because we shrink the other two fields off
826 when writing an IVar */
828 #define SVAR_VALUE_LOCN (SVAR_HS+0)
829 #define SVAR_HEAD_LOCN (SVAR_HS+1)
830 #define SVAR_TAIL_LOCN (SVAR_HS+2)
832 #define SVAR_VALUE(closure) ((PP_)(closure))[SVAR_VALUE_LOCN]
833 #define SVAR_HEAD(closure) ((PP_)(closure))[SVAR_HEAD_LOCN]
834 #define SVAR_TAIL(closure) ((PP_)(closure))[SVAR_TAIL_LOCN]
837 End multi-slurp protection:
840 #endif /* THREADS_H */