2 % (c) The GRASP Project, Glasgow University, 1994-1995
4 \section[Thread]{Thread support macros used in \tr{.hc} files}
15 #define TYPE_OF_SPARK struct spark
16 #define TYPE_OF_SPARK_PTR sparkq
17 #define SIZE_OF_SPARK (sizeof(TYPE_OF_SPARK))
21 struct spark *prev, *next;
31 #define OR_CONTEXT_SWITCH
35 extern I_ do_gr_sim; /* Are we simulating granularity? */
38 extern I_ do_qp_prof; /* Are we quasi-parallel profiling? */
44 #define DO_QP_PROF do_qp_prof
47 extern I_ context_switch; /* Flag set by signal handler */
49 #define CS_MAX_FREQUENCY 100 /* context switches per second */
50 #define CS_MIN_MILLISECS (1000/CS_MAX_FREQUENCY)/* milliseconds per slice */
52 #ifdef __STG_GCC_REGS__
53 #define OR_CONTEXT_SWITCH || context_switch
55 #define OR_CONTEXT_SWITCH /* in miniInterpret */
58 #define REQUIRED_POOL 0
59 #define ADVISORY_POOL 1
64 #define TYPE_OF_SPARK PP_
65 #define SIZE_OF_SPARK (sizeof(TYPE_OF_SPARK))
67 extern TYPE_OF_SPARK PendingSparksBase[SPARK_POOLS],
68 PendingSparksLim[SPARK_POOLS];
69 extern TYPE_OF_SPARK PendingSparksHd[SPARK_POOLS],
70 PendingSparksTl[SPARK_POOLS];
72 extern I_ SparkLimit[SPARK_POOLS];
74 extern P_ RunnableThreadsHd, RunnableThreadsTl;
75 extern P_ WaitingThreadsHd, WaitingThreadsTl;
77 extern I_ sparksIgnored;
79 IF_RTS(extern void AwaitEvent(I_);)
83 extern TYPE_OF_SPARK_PTR PendingSparksBase[][SPARK_POOLS],
84 PendingSparksLim[][SPARK_POOLS];
85 extern TYPE_OF_SPARK_PTR PendingSparksHd[][SPARK_POOLS],
86 PendingSparksTl[][SPARK_POOLS];
87 extern P_ RunnableThreadsHd[], RunnableThreadsTl[],
88 WaitThreadsHd[], WaitThreadsTl[];
90 #define SparkQueueHd PendingSparksHd[CurrentProc][ADVISORY_POOL]
91 #define SparkQueueTl PendingSparksTl[CurrentProc][ADVISORY_POOL]
92 #define ThreadQueueHd RunnableThreadsHd[CurrentProc]
93 #define ThreadQueueTl RunnableThreadsTl[CurrentProc]
94 #define WaitingThreadsHd WaitThreadsHd[CurrentProc]
95 #define WaitingThreadsTl WaitThreadsTl[CurrentProc]
99 IF_RTS(extern void PruneSparks(STG_NO_ARGS);)
103 /* Codes that can be used as params for ReSchedule */
104 /* I distinguish them from the values 0/1 in the -UGRAN setup for security */
106 #define FIND_THREAD 10
107 #define SAME_THREAD 11
108 #define NEW_THREAD SAME_THREAD
109 #define CHANGE_THREAD 13
110 #define END_OF_WORLD 14
112 extern W_ SparksAvail, SurplusThreads;
114 extern W_ CurrentTime[];
115 extern I_ OutstandingFetches[], OutstandingFishes[];
116 extern enum proc_status procStatus[];
118 # if defined(GRAN_CHECK) && defined(GRAN) /* Just for testing */
119 # define FETCH_MASK_TSO 0x08000000 /* only bits 0, 1, 2 should be used */
121 extern P_ BlockedOnFetch[];
126 extern P_ CurrentTSO; /* thread state object now in use */
128 extern P_ AvailableStack;
129 extern P_ AvailableTSO;
133 void ScheduleThreads PROTO((P_ topClosure));
136 #define OLD_SPARKNAME_MASK 0xffff0000
137 #define NEW_SPARKNAME_MASK 0x0000ffff
139 void ReSchedule PROTO((int what_next)) STG_NORETURN;
140 void add_to_spark_queue PROTO((sparkq));
141 int set_sparkname PROTO((P_, int));
142 int reset_sparkname PROTO((P_));
143 I_ spark_queue_len PROTO((PROC, I_));
144 sparkq delete_from_spark_queue PROTO((sparkq, sparkq));
145 I_ thread_queue_len PROTO((PROC));
146 void DisposeSparkQ PROTO((sparkq));
150 void ReSchedule PROTO((int again)) STG_NORETURN;
154 void EndThread(STG_NO_ARGS) STG_NORETURN;
156 /* ToDo: Check if these are still needed -- HWL */
157 void QP_Event0 PROTO((I_, P_));
158 void QP_Event1 PROTO((char *, P_));
159 void QP_Event2 PROTO((char *, P_, P_));
160 long qp_elapsed_time(STG_NO_ARGS);
163 %************************************************************************
165 \subsection[thread-heap-objs]{Special threads-only heap objects (`closures')}
167 %************************************************************************
169 %************************************************************************
171 \subsubsection[TSO-closures]{@TSO@ (thread state object) heap objects}
173 %************************************************************************
175 We now enter the realm of the Deeply Magical.
177 Reduction threads come and go, resume and suspend, etc., in the threaded
178 world. Obviously, there must be a place to squirrel away state information
179 when a thread is suspended. Hence these {\em thread state objects} (TSOs).
181 Rather than manage TSOs' alloc/dealloc, etc., in some {\em ad hoc} way, we
182 instead alloc/dealloc/etc them in the heap; then we can use all the
183 standard garbage-collection/fetching/flushing/etc machinery on them.
184 So that's why TSOs are ``heap objects,'' albeit very special ones.
186 We use all the standard heap-object/closure jargon... (e.g.,
187 @SET_TSO_HDR@, fixed headers, variable-hdr size, ...).
189 A TSO is a fixed-size object with (post-header) words arranged like
190 the main register table, and enough slop so that the register table
191 can be properly aligned. The last header word of the TSO is
192 a pointer to the (internal) start of the interesting data.
194 Note that the heap and stack pointers in the TSO are only valid while
195 the thread is executing, and only if the corresponding values are not
196 stored in machine registers (i.e. the TSO becomes the backing register
197 table for those values).
200 #define TSO_INFO_WORDS 10
203 #define TSO_REDN_WORDS 2
205 #define TSO_REDN_WORDS 0
208 #if defined(GRAN) || defined(PAR)
209 /* do we really need a whole statistics buffer in PAR setup? HWL*/
210 #define TSO_GRAN_WORDS 17
212 #define TSO_GRAN_WORDS 0
216 (GC_MUT_RESERVED_WORDS + TSO_INFO_WORDS + TSO_REDN_WORDS + TSO_GRAN_WORDS)
218 #define TSO_HS (FIXED_HS + TSO_VHS)
219 #define TSO_CTS_SIZE (BYTES_TO_STGWORDS(sizeof(STGRegisterTable) + sizeof(StgDouble)))
221 #define TSO_PTRS (MAX_VANILLA_REG + 2)
223 /* std start-filling-in macro: */
224 #define SET_TSO_HDR(closure,infolbl,cc) \
225 { SET_FIXED_HDR(closure,infolbl,cc); \
226 SET_MUT_RESERVED_WORDS(closure); \
229 #define TSO_INFO_START (FIXED_HS + GC_MUT_RESERVED_WORDS)
230 #define TSO_LINK_LOCN (TSO_INFO_START + 0)
231 #define TSO_CCC_LOCN (TSO_INFO_START + 1)
232 #define TSO_NAME_LOCN (TSO_INFO_START + 2)
233 #define TSO_ID_LOCN (TSO_INFO_START + 3)
234 #define TSO_TYPE_LOCN (TSO_INFO_START + 4)
235 #define TSO_PC1_LOCN (TSO_INFO_START + 5)
236 #define TSO_PC2_LOCN (TSO_INFO_START + 6)
237 #define TSO_ARG1_LOCN (TSO_INFO_START + 7)
238 #define TSO_EVENT_LOCN (TSO_INFO_START + 8)
239 #define TSO_SWITCH_LOCN (TSO_INFO_START + 9)
241 #define TSO_REDN_START (TSO_INFO_START + TSO_INFO_WORDS)
243 #define TSO_AHWM_LOCN (TSO_REDN_START + 0)
244 #define TSO_BHWM_LOCN (TSO_REDN_START + 1)
247 #define TSO_GRAN_START (TSO_REDN_START + TSO_REDN_WORDS)
248 #if defined(GRAN) || defined(PAR)
249 #define TSO_LOCKED_LOCN (TSO_GRAN_START + 0)
250 #define TSO_SPARKNAME_LOCN (TSO_GRAN_START + 1)
251 #define TSO_STARTEDAT_LOCN (TSO_GRAN_START + 2)
252 #define TSO_EXPORTED_LOCN (TSO_GRAN_START + 3)
253 #define TSO_BASICBLOCKS_LOCN (TSO_GRAN_START + 4)
254 #define TSO_ALLOCS_LOCN (TSO_GRAN_START + 5)
255 #define TSO_EXECTIME_LOCN (TSO_GRAN_START + 6)
256 #define TSO_FETCHTIME_LOCN (TSO_GRAN_START + 7)
257 #define TSO_FETCHCOUNT_LOCN (TSO_GRAN_START + 8)
258 #define TSO_BLOCKTIME_LOCN (TSO_GRAN_START + 9)
259 #define TSO_BLOCKCOUNT_LOCN (TSO_GRAN_START + 10)
260 #define TSO_BLOCKEDAT_LOCN (TSO_GRAN_START + 11)
261 #define TSO_GLOBALSPARKS_LOCN (TSO_GRAN_START + 12)
262 #define TSO_LOCALSPARKS_LOCN (TSO_GRAN_START + 13)
263 #define TSO_QUEUE_LOCN (TSO_GRAN_START + 14)
264 #define TSO_PRI_LOCN (TSO_GRAN_START + 15)
265 #define TSO_CLOCK_LOCN (TSO_GRAN_START + 16)
268 #define TSO_LINK(closure) (((PP_)closure)[TSO_LINK_LOCN])
269 #define TSO_CCC(closure) (((CostCentre *)closure)[TSO_CCC_LOCN])
270 #define TSO_NAME(closure) (((PP_)closure)[TSO_NAME_LOCN])
271 #define TSO_ID(closure) (((P_)closure)[TSO_ID_LOCN])
272 #define TSO_TYPE(closure) (((P_)closure)[TSO_TYPE_LOCN])
273 #define TSO_PC1(closure) (((FP_)closure)[TSO_PC1_LOCN])
274 #define TSO_PC2(closure) (((FP_)closure)[TSO_PC2_LOCN])
275 #define TSO_ARG1(closure) (((P_)closure)[TSO_ARG1_LOCN])
276 #define TSO_EVENT(closure) (((P_)closure)[TSO_EVENT_LOCN])
277 #define TSO_SWITCH(closure) (((FP_)closure)[TSO_SWITCH_LOCN])
279 #define TSO_AHWM(closure) (((I_ *)closure)[TSO_AHWM_LOCN])
280 #define TSO_BHWM(closure) (((I_ *)closure)[TSO_BHWM_LOCN])
282 #define TSO_LOCKED(closure) (((P_)closure)[TSO_LOCKED_LOCN])
283 #define TSO_SPARKNAME(closure) (((P_)closure)[TSO_SPARKNAME_LOCN])
284 #define TSO_STARTEDAT(closure) (((P_)closure)[TSO_STARTEDAT_LOCN])
285 #define TSO_EXPORTED(closure) (((P_)closure)[TSO_EXPORTED_LOCN])
286 #define TSO_BASICBLOCKS(closure) (((P_)closure)[TSO_BASICBLOCKS_LOCN])
287 #define TSO_ALLOCS(closure) (((P_)closure)[TSO_ALLOCS_LOCN])
288 #define TSO_EXECTIME(closure) (((P_)closure)[TSO_EXECTIME_LOCN])
289 #define TSO_FETCHTIME(closure) (((P_)closure)[TSO_FETCHTIME_LOCN])
290 #define TSO_FETCHCOUNT(closure) (((P_)closure)[TSO_FETCHCOUNT_LOCN])
291 #define TSO_BLOCKTIME(closure) (((P_)closure)[TSO_BLOCKTIME_LOCN])
292 #define TSO_BLOCKCOUNT(closure) (((P_)closure)[TSO_BLOCKCOUNT_LOCN])
293 #define TSO_BLOCKEDAT(closure) (((P_)closure)[TSO_BLOCKEDAT_LOCN])
294 #define TSO_GLOBALSPARKS(closure) (((P_)closure)[TSO_GLOBALSPARKS_LOCN])
295 #define TSO_LOCALSPARKS(closure) (((P_)closure)[TSO_LOCALSPARKS_LOCN])
296 #define TSO_QUEUE(closure) (((P_)closure)[TSO_QUEUE_LOCN])
297 #define TSO_PRI(closure) (((P_)closure)[TSO_PRI_LOCN])
298 /* TSO_CLOCK is only needed in GrAnSim-Light */
299 #define TSO_CLOCK(closure) (((P_)closure)[TSO_CLOCK_LOCN])
301 #define TSO_INTERNAL_PTR(closure) \
302 ((STGRegisterTable *)(((W_)(((P_)closure) \
303 + TSO_HS + BYTES_TO_STGWORDS(sizeof(StgDouble)))) & ~(sizeof(StgDouble) - 1)))
305 #if defined(CONCURRENT) && defined(GRAN) /* HWL */
306 /* Per definitionem a tso is really awake if it has met a first */
307 /* GRAN_RESCHEDULE macro after having been rescheduled. */
308 #define REALLY_AWAKE(tso) (TSO_SWITCH(tso) != TSO_PC2(tso))
309 #define SET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = NULL
310 #define RESET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = TSO_PC2(tso)
315 The types of threads (TSO_TYPE):
317 #define T_MAIN 0 /* Must be executed locally */
318 #define T_REQUIRED 1 /* A required thread -- may be exported */
319 #define T_ADVISORY 2 /* An advisory thread -- may be exported */
320 #define T_FAIL 3 /* A failure thread -- may be exported */
323 The total space required to start a new thread (See NewThread in
326 #define THREAD_SPACE_REQUIRED (TSO_HS + TSO_CTS_SIZE + STKO_HS + RTSflags.ConcFlags.stkChunkSize)
329 Here are the various queues for GrAnSim-type events.
331 #define Q_RUNNING 'G'
332 #define Q_RUNNABLE 'A'
333 #define Q_BLOCKED 'R'
334 #define Q_FETCHING 'Y'
335 #define Q_MIGRATING 'B'
338 %************************************************************************
340 \subsubsection[spark-closures]{Pending Sparks}
342 %************************************************************************
347 P_ FindLocalSpark PROTO((rtsBool forexport));
349 void DisposeSpark PROTO((P_ spark));
350 rtsBool Spark PROTO((P_ closure, rtsBool required));
354 #ifdef GRAN /* For GrAnSim sparks are currently mallocated -- HWL */
356 void DisposeSpark PROTO((sparkq spark));
357 sparkq NewSpark PROTO((P_,I_,I_,I_,I_,I_));
359 /* # define MAX_EVENTS 1000 */ /* For GC Roots Purposes */
360 # define MAX_SPARKS 0 /* i.e. infinite */
362 #if defined(GRAN_JSM_SPARKS)
363 /* spark is a pointer into some sparkq (which is for JSM sparls just an
364 array of (struct sparks) */
366 # define SPARK_PREV(spark) { fprintf(stderr,"Error: SPARK_PREV not supported for JSM sparks") \
367 EXIT(EXIT_FAILURE); }
368 /* NB: SPARK_NEXT may only be used as a rhs but NOT as a lhs */
369 # define SPARK_NEXT(spark) (spark++)
370 # define SPARK_NODE(spark) (P_)(spark->node)
371 # define SPARK_NAME(spark) (spark->name)
372 # define SPARK_GRAN_INFO(spark) (spark->gran_info)
373 # define SPARK_GLOBAL(spark) (spark->global)
374 # define SPARK_EXPORTED(spark) (SPARK_GLOBAL(spark) > 1)
376 # define SPARK_PREV(spark) (spark->prev)
377 # define SPARK_NEXT(spark) (sparkq)(spark->next)
378 # define SPARK_NODE(spark) (spark->node)
379 # define SPARK_NAME(spark) (spark->name)
380 # define SPARK_GRAN_INFO(spark) (spark->gran_info)
381 # define SPARK_GLOBAL(spark) (spark->global)
382 # define SPARK_EXPORTED(spark) (SPARK_GLOBAL(spark) > 1)
388 %************************************************************************
390 \subsubsection[STKO-closures]{@STKO@ (stack object) heap objects}
392 %************************************************************************
394 We linger in the Deeply Magical...
396 Each reduction thread has to have its own stack space. As there may
397 be many such threads, and as any given one may need quite a big stack,
398 a naive give-'em-a-big-stack-and-let-'em-run approach will cost a {\em
401 Our approach is to give a thread a small stack space, and then link
402 on/off extra ``chunks'' as the need arises. Again, this is a
403 storage-management problem, and, yet again, we choose to graft the
404 whole business onto the existing heap-management machinery. So stack
405 objects will live in the heap, be garbage collected, etc., etc..
407 So, as with TSOs, we use the standard heap-object (`closure') jargon.
409 Here is the picture of how a stack object is arranged:
411 <----- var hdr --------> v ---- FirstPtr --- v
412 ---------------------------------------------------------------------
413 ...|| SpB | SuB | SpA | SuA || B stk -> ... | ... <- A stk || PREV ||
414 ---------------------------------------------------------------------
418 We keep the following state-of-stack info in the {\em variable-header}
421 SpB, SuB & their {\em offsets} from 1st non-hdr word (marked \tr{XX} above)\\
422 SpA, SuA & their {\em offsets} from the next-to-last word (marked \tr{YY} above)\\
423 ctr field??? & (GC\_GEN\_WHATNOT may serve instead)\\
426 The stack-pointer offsets are from the points indicated and are {\em
427 non-negative} for pointers to this chunk of the stack space.
429 At the {\em end} of the stack object, we have a {\em link} to the
430 previous part of the overall stack. The link is \tr{NULL} if this is
431 the bottom of the overall stack.
433 After the header, we have @STKO_CHUNK_SIZE-1@ words of actual stack
434 stuff. The B-stack part begins at the lowest address and grows
435 upwards; the A-stack parts begins at the highest address and grows
438 From a storage-manager point of view, these are {\em very special}
443 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 9)
445 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 7)
447 #define STKO_HS (FIXED_HS + STKO_VHS)
449 #define MIN_STKO_CHUNK_SIZE 16 /* Rather arbitrary */
451 #define STKO_CLOSURE_SIZE(closure) STKO_SIZE(closure)
453 #define STKO_CLOSURE_CTS_SIZE(closure) (STKO_CLOSURE_SIZE(closure) - STKO_VHS)
454 #define STKO_CLOSURE_PTR(closure, no) (*STKO_CLOSURE_ADDR(closure, no))
456 #define STKO_CLOSURE_ADDR(s, n) (((P_)(s)) + STKO_HS + (n) - 1)
457 #define STKO_CLOSURE_OFFSET(s, p) (((P_)(p) - (P_)(s)) - STKO_HS + 1)
459 /* std start-filling-in macro: */
460 #define SET_STKO_HDR(s,infolbl,cc) \
461 { SET_FIXED_HDR(s,infolbl,cc); \
462 SET_MUT_RESERVED_WORDS(s); \
463 /* the other header words filled in some other way */ }
465 /* now we have the STKO-specific stuff
467 Note: The S[pu][AB] registers are put in this order so that
468 they will appear in monotonically increasing order in
469 the StkO...just as an aid to the poor wee soul who has
474 #define STKO_ADEP_LOCN (STKO_HS - 9)
475 #define STKO_BDEP_LOCN (STKO_HS - 8)
477 #define STKO_SIZE_LOCN (STKO_HS - 7)
478 #define STKO_RETURN_LOCN (STKO_HS - 6)
479 #define STKO_LINK_LOCN (STKO_HS - 5)
480 #define STKO_SuB_LOCN (STKO_HS - 4)
481 #define STKO_SpB_LOCN (STKO_HS - 3)
482 #define STKO_SpA_LOCN (STKO_HS - 2)
483 #define STKO_SuA_LOCN (STKO_HS - 1)
485 #define STKO_ADEP(s) (((I_ *)(s))[STKO_ADEP_LOCN])
486 #define STKO_BDEP(s) (((I_ *)(s))[STKO_BDEP_LOCN])
487 #define STKO_SIZE(s) (((P_)(s))[STKO_SIZE_LOCN])
488 #define STKO_RETURN(s) (((StgRetAddr *)(s))[STKO_RETURN_LOCN])
489 #define STKO_LINK(s) (((PP_)(s))[STKO_LINK_LOCN])
490 #define STKO_SpB(s) (((PP_)(s))[STKO_SpB_LOCN])
491 #define STKO_SuB(s) (((PP_)(s))[STKO_SuB_LOCN])
492 #define STKO_SpA(s) (((PP_ *)(s))[STKO_SpA_LOCN])
493 #define STKO_SuA(s) (((PP_ *)(s))[STKO_SuA_LOCN])
495 #define STKO_BSTK_OFFSET(closure) (STKO_HS)
496 #define STKO_ASTK_OFFSET(closure) (FIXED_HS + STKO_CLOSURE_SIZE(closure) - 1)
497 #define STKO_BSTK_BOT(closure) (((P_)(closure)) + STKO_BSTK_OFFSET(closure))
498 #define STKO_ASTK_BOT(closure) (((PP_)(closure)) + STKO_ASTK_OFFSET(closure))
501 These are offsets into the stack object proper (starting at 1 for
502 the first word after the header).
505 #define STKO_SpA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpA(s)))
506 #define STKO_SuA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuA(s)))
507 #define STKO_SpB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpB(s)))
508 #define STKO_SuB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuB(s)))
511 %************************************************************************
513 \subsubsection[BQ-closures]{@BQ@ (blocking queue) heap objects (`closures')}
515 %************************************************************************
517 Blocking queues are built in the parallel system when a local thread
518 enters a non-global node. They are similar to black holes, except
519 that when they are updated, the blocking queue must be enlivened
520 too. A blocking queue closure thus has the following structure.
525 \begin{tabular}{||l|l|l|l||}\hline
526 GA & Info ptr. & $\ldots$ & Blocking Queue \\ \hline
532 The blocking queue itself is a pointer to a list of blocking queue entries.
533 The list is formed from TSO closures. For the generational garbage collectors,
534 the BQ must have the same structure as an IND, with the blocking queue hanging
535 off of the indirection pointer. (This has to do with treating the BQ as an old
536 root if it gets updated while in the old generation.)
539 #define BQ_VHS IND_VHS
542 #define BQ_CLOSURE_SIZE(closure) IND_CLOSURE_SIZE(closure)
543 #define BQ_CLOSURE_NoPTRS(closure) IND_CLOSURE_NoPTRS(closure)
544 #define BQ_CLOSURE_NoNONPTRS(closure) IND_CLOSURE_NoNONPTRS(closure)
545 #define BQ_CLOSURE_PTR(closure, no) (((P_)(closure))[BQ_HS + (no) - 1])
548 Blocking queues store a pointer to a list of blocking queue entries.
551 #define BQ_ENTRIES(closure) IND_CLOSURE_PTR(closure)
552 #define BQ_LINK(closure) IND_CLOSURE_LINK(closure)
555 We have only one kind of blocking queue closure, so we test the info pointer
556 for a specific value rather than looking in the info table for a special bit.
561 #define IS_BQ_CLOSURE(closure) (INFO_PTR(closure) == (W_) BQ_info)
564 %************************************************************************
566 \subsubsection[TSO_ITBL]{@TSO_ITBL@}
568 %************************************************************************
570 The special info table used for thread state objects (TSOs).
575 CAT_DECLARE(TSO,INTERNAL_KIND,"TSO","<TSO>") \
577 EXTDATA_RO(MK_REP_LBL(TSO,,)); \
578 const W_ TSO_info[] = { \
580 ,(W_) INFO_OTHER_TAG \
581 ,(W_) MK_REP_REF(TSO,,) \
582 INCLUDE_PROFILING_INFO(TSO) \
586 const W_ MK_REP_LBL(TSO,,)[] = { \
587 INCLUDE_TYPE_INFO(TSO) \
588 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
590 INCLUDE_COPYING_INFO(_Evacuate_TSO,_Scavenge_TSO) \
591 INCLUDE_COMPACTING_INFO(_ScanLink_TSO,_PRStart_TSO,_ScanMove_TSO,_PRIn_TSO) \
596 %************************************************************************
598 \subsubsection[STKO_ITBL]{@STKO_ITBL@}
600 %************************************************************************
602 The special info table used for stack objects (STKOs).
605 #define STKO_ITBL() \
606 CAT_DECLARE(StkO,INTERNAL_KIND,"STKO","<STKO>") \
607 EXTFUN(StkO_entry); \
608 EXTDATA_RO(MK_REP_LBL(StkO,,)); \
609 const W_ StkO_info[] = { \
611 ,(W_) INFO_OTHER_TAG \
612 ,(W_) MK_REP_REF(StkO,,) \
613 INCLUDE_PROFILING_INFO(StkO) \
616 #define STKO_RTBL() \
617 const W_ MK_REP_LBL(StkO,,)[] = { \
618 INCLUDE_TYPE_INFO(STKO_DYNAMIC) \
619 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
621 INCLUDE_COPYING_INFO(_Evacuate_StkO,_Scavenge_StkO) \
622 INCLUDE_COMPACTING_INFO(_ScanLink_StkO,_PRStart_StkO,_ScanMove_StkO,_PRIn_StkO) \
625 #define STKO_STATIC_ITBL() \
626 CAT_DECLARE(StkO_static,INTERNAL_KIND,"STKO","<STKO>") \
627 EXTFUN(StkO_static_entry); \
628 EXTDATA_RO(MK_REP_LBL(StkO_static,,)); \
629 const W_ StkO_static_info[] = { \
630 (W_) StkO_static_entry \
631 ,(W_) INFO_OTHER_TAG \
632 ,(W_) MK_REP_REF(StkO_static,,) \
633 INCLUDE_PROFILING_INFO(StkO_static) \
636 #define STKO_STATIC_RTBL() \
637 const W_ MK_REP_LBL(StkO_static,,)[] = { \
638 INCLUDE_TYPE_INFO(STKO_STATIC) \
639 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
641 INCLUDE_COPYING_INFO(_Evacuate_Static,_Dummy_Static_entry) \
642 INCLUDE_COMPACTING_INFO(_Dummy_Static_entry,_PRStart_Static, \
643 _Dummy_Static_entry,_PRIn_Error) \
648 %************************************************************************
650 \subsubsection[BQ_ITBL]{@BQ_ITBL@}
652 %************************************************************************
654 Special info-table for local blocking queues.
658 CAT_DECLARE(BQ,INTERNAL_KIND,"BQ","<BQ>") \
660 EXTDATA_RO(MK_REP_LBL(BQ,,)); \
661 const W_ BQ_info[] = { \
663 ,(W_) INFO_OTHER_TAG \
664 ,(W_) MK_REP_REF(BQ,,) \
665 INCLUDE_PROFILING_INFO(BQ) \
669 const W_ MK_REP_LBL(BQ,,)[] = { \
670 INCLUDE_TYPE_INFO(BQ) \
671 INCLUDE_SIZE_INFO(MIN_UPD_SIZE,INFO_UNUSED) \
673 INCLUDE_COPYING_INFO(_Evacuate_BQ,_Scavenge_BQ) \
674 SPEC_COMPACTING_INFO(_ScanLink_BQ,_PRStart_BQ,_ScanMove_BQ,_PRIn_BQ) \
680 #endif /* CONCURRENT */
683 Even the sequential system gets to play with SynchVars, though it really
684 doesn't make too much sense (if any). Okay; maybe it makes some sense.
685 (See the 1.3 I/O stuff.)
687 %************************************************************************
689 \subsubsection[SVar-closures]{@SynchVar@ heap objects}
691 %************************************************************************
694 #define SVAR_HS (MUTUPLE_HS)
696 #define SVAR_CLOSURE_SIZE(closure) 3
698 #define SET_SVAR_HDR(closure,infolbl,cc) \
699 SET_MUTUPLE_HDR(closure,infolbl,cc,MUTUPLE_VHS+3,3)
701 /* The value must come first, because we shrink the other two fields off
702 when writing an IVar */
704 #define SVAR_VALUE_LOCN (SVAR_HS+0)
705 #define SVAR_HEAD_LOCN (SVAR_HS+1)
706 #define SVAR_TAIL_LOCN (SVAR_HS+2)
708 #define SVAR_VALUE(closure) ((PP_)(closure))[SVAR_VALUE_LOCN]
709 #define SVAR_HEAD(closure) ((PP_)(closure))[SVAR_HEAD_LOCN]
710 #define SVAR_TAIL(closure) ((PP_)(closure))[SVAR_TAIL_LOCN]
713 End multi-slurp protection:
716 #endif /* THREADS_H */