2 % (c) The GRASP Project, Glasgow University, 1994-1995
4 \section[Thread]{Thread support macros used in \tr{.hc} files}
15 #define TYPE_OF_SPARK struct spark
16 #define TYPE_OF_SPARK_PTR sparkq
17 #define SIZE_OF_SPARK (sizeof(TYPE_OF_SPARK))
21 struct spark *prev, *next;
31 #define OR_CONTEXT_SWITCH
35 extern I_ do_gr_sim; /* Are we simulating granularity? */
38 extern I_ do_qp_prof; /* Are we quasi-parallel profiling? */
44 #define DO_QP_PROF do_qp_prof
47 extern I_ context_switch; /* Flag set by signal handler */
49 #define CS_MAX_FREQUENCY 100 /* context switches per second */
50 #define CS_MIN_MILLISECS (1000/CS_MAX_FREQUENCY)/* milliseconds per slice */
52 #ifdef __STG_GCC_REGS__
53 #define OR_CONTEXT_SWITCH || context_switch
55 #define OR_CONTEXT_SWITCH /* in miniInterpret */
58 #define REQUIRED_POOL 0
59 #define ADVISORY_POOL 1
64 #define TYPE_OF_SPARK PP_
65 #define SIZE_OF_SPARK (sizeof(TYPE_OF_SPARK))
67 extern TYPE_OF_SPARK PendingSparksBase[SPARK_POOLS],
68 PendingSparksLim[SPARK_POOLS];
69 extern TYPE_OF_SPARK PendingSparksHd[SPARK_POOLS],
70 PendingSparksTl[SPARK_POOLS];
72 extern I_ SparkLimit[SPARK_POOLS];
74 extern P_ RunnableThreadsHd, RunnableThreadsTl;
75 extern P_ WaitingThreadsHd, WaitingThreadsTl;
77 extern I_ sparksIgnored;
79 IF_RTS(extern void AwaitEvent(I_);)
83 extern TYPE_OF_SPARK_PTR PendingSparksBase[][SPARK_POOLS],
84 PendingSparksLim[][SPARK_POOLS];
85 extern TYPE_OF_SPARK_PTR PendingSparksHd[][SPARK_POOLS],
86 PendingSparksTl[][SPARK_POOLS];
87 extern P_ RunnableThreadsHd[], RunnableThreadsTl[],
88 WaitThreadsHd[], WaitThreadsTl[];
90 #define SparkQueueHd PendingSparksHd[CurrentProc][ADVISORY_POOL]
91 #define SparkQueueTl PendingSparksTl[CurrentProc][ADVISORY_POOL]
92 #define ThreadQueueHd RunnableThreadsHd[CurrentProc]
93 #define ThreadQueueTl RunnableThreadsTl[CurrentProc]
94 #define WaitingThreadsHd WaitThreadsHd[CurrentProc]
95 #define WaitingThreadsTl WaitThreadsTl[CurrentProc]
99 IF_RTS(extern void PruneSparks(STG_NO_ARGS);)
103 /* Codes that can be used as params for ReSchedule */
104 /* I distinguish them from the values 0/1 in the -UGRAN setup for security */
106 #define FIND_THREAD 10
107 #define SAME_THREAD 11
108 #define NEW_THREAD SAME_THREAD
109 #define CHANGE_THREAD 13
110 #define END_OF_WORLD 14
112 extern W_ SparksAvail, SurplusThreads;
114 extern W_ CurrentTime[];
115 extern I_ OutstandingFetches[], OutstandingFishes[];
116 extern enum proc_status procStatus[];
118 # if defined(GRAN_CHECK) && defined(GRAN) /* Just for testing */
119 # define FETCH_MASK_TSO 0x08000000 /* only bits 0, 1, 2 should be used */
121 extern P_ BlockedOnFetch[];
126 extern P_ CurrentTSO; /* thread state object now in use */
128 extern P_ AvailableStack;
129 extern P_ AvailableTSO;
133 void ScheduleThreads PROTO((P_ topClosure));
136 void ReSchedule PROTO((int what_next)) STG_NORETURN;
137 void add_to_spark_queue PROTO((sparkq));
138 int set_sparkname PROTO((P_, int));
139 int reset_sparkname PROTO((P_));
140 I_ spark_queue_len PROTO((PROC, I_));
141 sparkq delete_from_spark_queue PROTO((sparkq, sparkq));
142 I_ thread_queue_len PROTO((PROC));
143 void DisposeSparkQ PROTO((sparkq));
147 void ReSchedule PROTO((int again)) STG_NORETURN;
151 void EndThread(STG_NO_ARGS) STG_NORETURN;
153 /* ToDo: Check if these are still needed -- HWL */
154 void QP_Event0 PROTO((I_, P_));
155 void QP_Event1 PROTO((char *, P_));
156 void QP_Event2 PROTO((char *, P_, P_));
157 long qp_elapsed_time(STG_NO_ARGS);
160 %************************************************************************
162 \subsection[thread-heap-objs]{Special threads-only heap objects (`closures')}
164 %************************************************************************
166 %************************************************************************
168 \subsubsection[TSO-closures]{@TSO@ (thread state object) heap objects}
170 %************************************************************************
172 We now enter the realm of the Deeply Magical.
174 Reduction threads come and go, resume and suspend, etc., in the threaded
175 world. Obviously, there must be a place to squirrel away state information
176 when a thread is suspended. Hence these {\em thread state objects} (TSOs).
178 Rather than manage TSOs' alloc/dealloc, etc., in some {\em ad hoc} way, we
179 instead alloc/dealloc/etc them in the heap; then we can use all the
180 standard garbage-collection/fetching/flushing/etc machinery on them.
181 So that's why TSOs are ``heap objects,'' albeit very special ones.
183 We use all the standard heap-object/closure jargon... (e.g.,
184 @SET_TSO_HDR@, fixed headers, variable-hdr size, ...).
186 A TSO is a fixed-size object with (post-header) words arranged like
187 the main register table, and enough slop so that the register table
188 can be properly aligned. The last header word of the TSO is
189 a pointer to the (internal) start of the interesting data.
191 Note that the heap and stack pointers in the TSO are only valid while
192 the thread is executing, and only if the corresponding values are not
193 stored in machine registers (i.e. the TSO becomes the backing register
194 table for those values).
197 #define TSO_INFO_WORDS 10
200 #define TSO_REDN_WORDS 2
202 #define TSO_REDN_WORDS 0
205 #if defined(GRAN) || defined(PAR)
206 /* do we really need a whole statistics buffer in PAR setup? HWL*/
207 #define TSO_GRAN_WORDS 17
209 #define TSO_GRAN_WORDS 0
213 (GC_MUT_RESERVED_WORDS + TSO_INFO_WORDS + TSO_REDN_WORDS + TSO_GRAN_WORDS)
215 #define TSO_HS (FIXED_HS + TSO_VHS)
216 #define TSO_CTS_SIZE (BYTES_TO_STGWORDS(sizeof(STGRegisterTable) + sizeof(StgDouble)))
218 #define TSO_PTRS (MAX_VANILLA_REG + 2)
220 /* std start-filling-in macro: */
221 #define SET_TSO_HDR(closure,infolbl,cc) \
222 { SET_FIXED_HDR(closure,infolbl,cc); \
223 SET_MUT_RESERVED_WORDS(closure); \
226 #define TSO_INFO_START (FIXED_HS + GC_MUT_RESERVED_WORDS)
227 #define TSO_LINK_LOCN (TSO_INFO_START + 0)
228 #define TSO_CCC_LOCN (TSO_INFO_START + 1)
229 #define TSO_NAME_LOCN (TSO_INFO_START + 2)
230 #define TSO_ID_LOCN (TSO_INFO_START + 3)
231 #define TSO_TYPE_LOCN (TSO_INFO_START + 4)
232 #define TSO_PC1_LOCN (TSO_INFO_START + 5)
233 #define TSO_PC2_LOCN (TSO_INFO_START + 6)
234 #define TSO_ARG1_LOCN (TSO_INFO_START + 7)
235 #define TSO_EVENT_LOCN (TSO_INFO_START + 8)
236 #define TSO_SWITCH_LOCN (TSO_INFO_START + 9)
238 #define TSO_REDN_START (TSO_INFO_START + TSO_INFO_WORDS)
240 #define TSO_AHWM_LOCN (TSO_REDN_START + 0)
241 #define TSO_BHWM_LOCN (TSO_REDN_START + 1)
244 #define TSO_GRAN_START (TSO_REDN_START + TSO_REDN_WORDS)
245 #if defined(GRAN) || defined(PAR)
246 #define TSO_LOCKED_LOCN (TSO_GRAN_START + 0)
247 #define TSO_SPARKNAME_LOCN (TSO_GRAN_START + 1)
248 #define TSO_STARTEDAT_LOCN (TSO_GRAN_START + 2)
249 #define TSO_EXPORTED_LOCN (TSO_GRAN_START + 3)
250 #define TSO_BASICBLOCKS_LOCN (TSO_GRAN_START + 4)
251 #define TSO_ALLOCS_LOCN (TSO_GRAN_START + 5)
252 #define TSO_EXECTIME_LOCN (TSO_GRAN_START + 6)
253 #define TSO_FETCHTIME_LOCN (TSO_GRAN_START + 7)
254 #define TSO_FETCHCOUNT_LOCN (TSO_GRAN_START + 8)
255 #define TSO_BLOCKTIME_LOCN (TSO_GRAN_START + 9)
256 #define TSO_BLOCKCOUNT_LOCN (TSO_GRAN_START + 10)
257 #define TSO_BLOCKEDAT_LOCN (TSO_GRAN_START + 11)
258 #define TSO_GLOBALSPARKS_LOCN (TSO_GRAN_START + 12)
259 #define TSO_LOCALSPARKS_LOCN (TSO_GRAN_START + 13)
260 #define TSO_QUEUE_LOCN (TSO_GRAN_START + 14)
261 #define TSO_PRI_LOCN (TSO_GRAN_START + 15)
262 #define TSO_CLOCK_LOCN (TSO_GRAN_START + 16)
265 #define TSO_LINK(closure) (((PP_)closure)[TSO_LINK_LOCN])
266 #define TSO_CCC(closure) (((CostCentre *)closure)[TSO_CCC_LOCN])
267 #define TSO_NAME(closure) (((PP_)closure)[TSO_NAME_LOCN])
268 #define TSO_ID(closure) (((P_)closure)[TSO_ID_LOCN])
269 #define TSO_TYPE(closure) (((P_)closure)[TSO_TYPE_LOCN])
270 #define TSO_PC1(closure) (((FP_)closure)[TSO_PC1_LOCN])
271 #define TSO_PC2(closure) (((FP_)closure)[TSO_PC2_LOCN])
272 #define TSO_ARG1(closure) (((P_)closure)[TSO_ARG1_LOCN])
273 #define TSO_EVENT(closure) (((P_)closure)[TSO_EVENT_LOCN])
274 #define TSO_SWITCH(closure) (((FP_)closure)[TSO_SWITCH_LOCN])
276 #define TSO_AHWM(closure) (((I_ *)closure)[TSO_AHWM_LOCN])
277 #define TSO_BHWM(closure) (((I_ *)closure)[TSO_BHWM_LOCN])
279 #define TSO_LOCKED(closure) (((P_)closure)[TSO_LOCKED_LOCN])
280 #define TSO_SPARKNAME(closure) (((P_)closure)[TSO_SPARKNAME_LOCN])
281 #define TSO_STARTEDAT(closure) (((P_)closure)[TSO_STARTEDAT_LOCN])
282 #define TSO_EXPORTED(closure) (((P_)closure)[TSO_EXPORTED_LOCN])
283 #define TSO_BASICBLOCKS(closure) (((P_)closure)[TSO_BASICBLOCKS_LOCN])
284 #define TSO_ALLOCS(closure) (((P_)closure)[TSO_ALLOCS_LOCN])
285 #define TSO_EXECTIME(closure) (((P_)closure)[TSO_EXECTIME_LOCN])
286 #define TSO_FETCHTIME(closure) (((P_)closure)[TSO_FETCHTIME_LOCN])
287 #define TSO_FETCHCOUNT(closure) (((P_)closure)[TSO_FETCHCOUNT_LOCN])
288 #define TSO_BLOCKTIME(closure) (((P_)closure)[TSO_BLOCKTIME_LOCN])
289 #define TSO_BLOCKCOUNT(closure) (((P_)closure)[TSO_BLOCKCOUNT_LOCN])
290 #define TSO_BLOCKEDAT(closure) (((P_)closure)[TSO_BLOCKEDAT_LOCN])
291 #define TSO_GLOBALSPARKS(closure) (((P_)closure)[TSO_GLOBALSPARKS_LOCN])
292 #define TSO_LOCALSPARKS(closure) (((P_)closure)[TSO_LOCALSPARKS_LOCN])
293 #define TSO_QUEUE(closure) (((P_)closure)[TSO_QUEUE_LOCN])
294 #define TSO_PRI(closure) (((P_)closure)[TSO_PRI_LOCN])
295 /* TSO_CLOCK is only needed in GrAnSim-Light */
296 #define TSO_CLOCK(closure) (((P_)closure)[TSO_CLOCK_LOCN])
298 #define TSO_INTERNAL_PTR(closure) \
299 ((STGRegisterTable *)(((W_)(((P_)closure) \
300 + TSO_HS + BYTES_TO_STGWORDS(sizeof(StgDouble)))) & ~(sizeof(StgDouble) - 1)))
302 #if defined(CONCURRENT) && defined(GRAN) /* HWL */
303 /* Per definitionem a tso is really awake if it has met a first */
304 /* GRAN_RESCHEDULE macro after having been rescheduled. */
305 #define REALLY_AWAKE(tso) (TSO_SWITCH(tso) != TSO_PC2(tso))
306 #define SET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = NULL
307 #define RESET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = TSO_PC2(tso)
312 The types of threads (TSO_TYPE):
314 #define T_MAIN 0 /* Must be executed locally */
315 #define T_REQUIRED 1 /* A required thread -- may be exported */
316 #define T_ADVISORY 2 /* An advisory thread -- may be exported */
317 #define T_FAIL 3 /* A failure thread -- may be exported */
320 The total space required to start a new thread (See NewThread in
323 #define THREAD_SPACE_REQUIRED (TSO_HS + TSO_CTS_SIZE + STKO_HS + RTSflags.ConcFlags.stkChunkSize)
326 Here are the various queues for GrAnSim-type events.
328 #define Q_RUNNING 'G'
329 #define Q_RUNNABLE 'A'
330 #define Q_BLOCKED 'R'
331 #define Q_FETCHING 'Y'
332 #define Q_MIGRATING 'B'
335 %************************************************************************
337 \subsubsection[spark-closures]{Pending Sparks}
339 %************************************************************************
344 P_ FindLocalSpark PROTO((rtsBool forexport));
346 void DisposeSpark PROTO((P_ spark));
347 rtsBool Spark PROTO((P_ closure, rtsBool required));
351 #ifdef GRAN /* For GrAnSim sparks are currently mallocated -- HWL */
353 void DisposeSpark PROTO((sparkq spark));
354 sparkq NewSpark PROTO((P_,I_,I_,I_,I_,I_));
356 /* # define MAX_EVENTS 1000 */ /* For GC Roots Purposes */
357 # define MAX_SPARKS 0 /* i.e. infinite */
359 #if defined(GRAN_JSM_SPARKS)
360 /* spark is a pointer into some sparkq (which is for JSM sparls just an
361 array of (struct sparks) */
363 # define SPARK_PREV(spark) { fprintf(stderr,"Error: SPARK_PREV not supported for JSM sparks") \
364 EXIT(EXIT_FAILURE); }
365 /* NB: SPARK_NEXT may only be used as a rhs but NOT as a lhs */
366 # define SPARK_NEXT(spark) (spark++)
367 # define SPARK_NODE(spark) (P_)(spark->node)
368 # define SPARK_NAME(spark) (spark->name)
369 # define SPARK_GRAN_INFO(spark) (spark->gran_info)
370 # define SPARK_GLOBAL(spark) (spark->global)
371 # define SPARK_EXPORTED(spark) (SPARK_GLOBAL(spark) > 1)
373 # define SPARK_PREV(spark) (spark->prev)
374 # define SPARK_NEXT(spark) (sparkq)(spark->next)
375 # define SPARK_NODE(spark) (spark->node)
376 # define SPARK_NAME(spark) (spark->name)
377 # define SPARK_GRAN_INFO(spark) (spark->gran_info)
378 # define SPARK_GLOBAL(spark) (spark->global)
379 # define SPARK_EXPORTED(spark) (SPARK_GLOBAL(spark) > 1)
385 %************************************************************************
387 \subsubsection[STKO-closures]{@STKO@ (stack object) heap objects}
389 %************************************************************************
391 We linger in the Deeply Magical...
393 Each reduction thread has to have its own stack space. As there may
394 be many such threads, and as any given one may need quite a big stack,
395 a naive give-'em-a-big-stack-and-let-'em-run approach will cost a {\em
398 Our approach is to give a thread a small stack space, and then link
399 on/off extra ``chunks'' as the need arises. Again, this is a
400 storage-management problem, and, yet again, we choose to graft the
401 whole business onto the existing heap-management machinery. So stack
402 objects will live in the heap, be garbage collected, etc., etc..
404 So, as with TSOs, we use the standard heap-object (`closure') jargon.
406 Here is the picture of how a stack object is arranged:
408 <----- var hdr --------> v ---- FirstPtr --- v
409 ---------------------------------------------------------------------
410 ...|| SpB | SuB | SpA | SuA || B stk -> ... | ... <- A stk || PREV ||
411 ---------------------------------------------------------------------
415 We keep the following state-of-stack info in the {\em variable-header}
418 SpB, SuB & their {\em offsets} from 1st non-hdr word (marked \tr{XX} above)\\
419 SpA, SuA & their {\em offsets} from the next-to-last word (marked \tr{YY} above)\\
420 ctr field??? & (GC\_GEN\_WHATNOT may serve instead)\\
423 The stack-pointer offsets are from the points indicated and are {\em
424 non-negative} for pointers to this chunk of the stack space.
426 At the {\em end} of the stack object, we have a {\em link} to the
427 previous part of the overall stack. The link is \tr{NULL} if this is
428 the bottom of the overall stack.
430 After the header, we have @STKO_CHUNK_SIZE-1@ words of actual stack
431 stuff. The B-stack part begins at the lowest address and grows
432 upwards; the A-stack parts begins at the highest address and grows
435 From a storage-manager point of view, these are {\em very special}
440 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 9)
442 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 7)
444 #define STKO_HS (FIXED_HS + STKO_VHS)
446 #define MIN_STKO_CHUNK_SIZE 16 /* Rather arbitrary */
448 #define STKO_CLOSURE_SIZE(closure) STKO_SIZE(closure)
450 #define STKO_CLOSURE_CTS_SIZE(closure) (STKO_CLOSURE_SIZE(closure) - STKO_VHS)
451 #define STKO_CLOSURE_PTR(closure, no) (*STKO_CLOSURE_ADDR(closure, no))
453 #define STKO_CLOSURE_ADDR(s, n) (((P_)(s)) + STKO_HS + (n) - 1)
454 #define STKO_CLOSURE_OFFSET(s, p) (((P_)(p) - (P_)(s)) - STKO_HS + 1)
456 /* std start-filling-in macro: */
457 #define SET_STKO_HDR(s,infolbl,cc) \
458 { SET_FIXED_HDR(s,infolbl,cc); \
459 SET_MUT_RESERVED_WORDS(s); \
460 /* the other header words filled in some other way */ }
462 /* now we have the STKO-specific stuff
464 Note: The S[pu][AB] registers are put in this order so that
465 they will appear in monotonically increasing order in
466 the StkO...just as an aid to the poor wee soul who has
471 #define STKO_ADEP_LOCN (STKO_HS - 9)
472 #define STKO_BDEP_LOCN (STKO_HS - 8)
474 #define STKO_SIZE_LOCN (STKO_HS - 7)
475 #define STKO_RETURN_LOCN (STKO_HS - 6)
476 #define STKO_LINK_LOCN (STKO_HS - 5)
477 #define STKO_SuB_LOCN (STKO_HS - 4)
478 #define STKO_SpB_LOCN (STKO_HS - 3)
479 #define STKO_SpA_LOCN (STKO_HS - 2)
480 #define STKO_SuA_LOCN (STKO_HS - 1)
482 #define STKO_ADEP(s) (((I_ *)(s))[STKO_ADEP_LOCN])
483 #define STKO_BDEP(s) (((I_ *)(s))[STKO_BDEP_LOCN])
484 #define STKO_SIZE(s) (((P_)(s))[STKO_SIZE_LOCN])
485 #define STKO_RETURN(s) (((StgRetAddr *)(s))[STKO_RETURN_LOCN])
486 #define STKO_LINK(s) (((PP_)(s))[STKO_LINK_LOCN])
487 #define STKO_SpB(s) (((PP_)(s))[STKO_SpB_LOCN])
488 #define STKO_SuB(s) (((PP_)(s))[STKO_SuB_LOCN])
489 #define STKO_SpA(s) (((PP_ *)(s))[STKO_SpA_LOCN])
490 #define STKO_SuA(s) (((PP_ *)(s))[STKO_SuA_LOCN])
492 #define STKO_BSTK_OFFSET(closure) (STKO_HS)
493 #define STKO_ASTK_OFFSET(closure) (FIXED_HS + STKO_CLOSURE_SIZE(closure) - 1)
494 #define STKO_BSTK_BOT(closure) (((P_)(closure)) + STKO_BSTK_OFFSET(closure))
495 #define STKO_ASTK_BOT(closure) (((PP_)(closure)) + STKO_ASTK_OFFSET(closure))
498 These are offsets into the stack object proper (starting at 1 for
499 the first word after the header).
502 #define STKO_SpA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpA(s)))
503 #define STKO_SuA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuA(s)))
504 #define STKO_SpB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpB(s)))
505 #define STKO_SuB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuB(s)))
508 %************************************************************************
510 \subsubsection[BQ-closures]{@BQ@ (blocking queue) heap objects (`closures')}
512 %************************************************************************
514 Blocking queues are built in the parallel system when a local thread
515 enters a non-global node. They are similar to black holes, except
516 that when they are updated, the blocking queue must be enlivened
517 too. A blocking queue closure thus has the following structure.
522 \begin{tabular}{||l|l|l|l||}\hline
523 GA & Info ptr. & $\ldots$ & Blocking Queue \\ \hline
529 The blocking queue itself is a pointer to a list of blocking queue entries.
530 The list is formed from TSO closures. For the generational garbage collectors,
531 the BQ must have the same structure as an IND, with the blocking queue hanging
532 off of the indirection pointer. (This has to do with treating the BQ as an old
533 root if it gets updated while in the old generation.)
536 #define BQ_VHS IND_VHS
539 #define BQ_CLOSURE_SIZE(closure) IND_CLOSURE_SIZE(closure)
540 #define BQ_CLOSURE_NoPTRS(closure) IND_CLOSURE_NoPTRS(closure)
541 #define BQ_CLOSURE_NoNONPTRS(closure) IND_CLOSURE_NoNONPTRS(closure)
542 #define BQ_CLOSURE_PTR(closure, no) (((P_)(closure))[BQ_HS + (no) - 1])
545 Blocking queues store a pointer to a list of blocking queue entries.
548 #define BQ_ENTRIES(closure) IND_CLOSURE_PTR(closure)
549 #define BQ_LINK(closure) IND_CLOSURE_LINK(closure)
552 We have only one kind of blocking queue closure, so we test the info pointer
553 for a specific value rather than looking in the info table for a special bit.
558 #define IS_BQ_CLOSURE(closure) (INFO_PTR(closure) == (W_) BQ_info)
561 %************************************************************************
563 \subsubsection[TSO_ITBL]{@TSO_ITBL@}
565 %************************************************************************
567 The special info table used for thread state objects (TSOs).
572 CAT_DECLARE(TSO,INTERNAL_KIND,"TSO","<TSO>") \
574 EXTDATA_RO(MK_REP_LBL(TSO,,)); \
575 const W_ TSO_info[] = { \
577 ,(W_) INFO_OTHER_TAG \
578 ,(W_) MK_REP_REF(TSO,,) \
579 INCLUDE_PROFILING_INFO(TSO) \
583 const W_ MK_REP_LBL(TSO,,)[] = { \
584 INCLUDE_TYPE_INFO(TSO) \
585 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
587 INCLUDE_COPYING_INFO(_Evacuate_TSO,_Scavenge_TSO) \
588 INCLUDE_COMPACTING_INFO(_ScanLink_TSO,_PRStart_TSO,_ScanMove_TSO,_PRIn_TSO) \
593 %************************************************************************
595 \subsubsection[STKO_ITBL]{@STKO_ITBL@}
597 %************************************************************************
599 The special info table used for stack objects (STKOs).
602 #define STKO_ITBL() \
603 CAT_DECLARE(StkO,INTERNAL_KIND,"STKO","<STKO>") \
604 EXTFUN(StkO_entry); \
605 EXTDATA_RO(MK_REP_LBL(StkO,,)); \
606 const W_ StkO_info[] = { \
608 ,(W_) INFO_OTHER_TAG \
609 ,(W_) MK_REP_REF(StkO,,) \
610 INCLUDE_PROFILING_INFO(StkO) \
613 #define STKO_RTBL() \
614 const W_ MK_REP_LBL(StkO,,)[] = { \
615 INCLUDE_TYPE_INFO(STKO_DYNAMIC) \
616 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
618 INCLUDE_COPYING_INFO(_Evacuate_StkO,_Scavenge_StkO) \
619 INCLUDE_COMPACTING_INFO(_ScanLink_StkO,_PRStart_StkO,_ScanMove_StkO,_PRIn_StkO) \
622 #define STKO_STATIC_ITBL() \
623 CAT_DECLARE(StkO_static,INTERNAL_KIND,"STKO","<STKO>") \
624 EXTFUN(StkO_static_entry); \
625 EXTDATA_RO(MK_REP_LBL(StkO_static,,)); \
626 const W_ StkO_static_info[] = { \
627 (W_) StkO_static_entry \
628 ,(W_) INFO_OTHER_TAG \
629 ,(W_) MK_REP_REF(StkO_static,,) \
630 INCLUDE_PROFILING_INFO(StkO_static) \
633 #define STKO_STATIC_RTBL() \
634 const W_ MK_REP_LBL(StkO_static,,)[] = { \
635 INCLUDE_TYPE_INFO(STKO_STATIC) \
636 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
638 INCLUDE_COPYING_INFO(_Evacuate_Static,_Dummy_Static_entry) \
639 INCLUDE_COMPACTING_INFO(_Dummy_Static_entry,_PRStart_Static, \
640 _Dummy_Static_entry,_PRIn_Error) \
645 %************************************************************************
647 \subsubsection[BQ_ITBL]{@BQ_ITBL@}
649 %************************************************************************
651 Special info-table for local blocking queues.
655 CAT_DECLARE(BQ,INTERNAL_KIND,"BQ","<BQ>") \
657 EXTDATA_RO(MK_REP_LBL(BQ,,)); \
658 const W_ BQ_info[] = { \
660 ,(W_) INFO_OTHER_TAG \
661 ,(W_) MK_REP_REF(BQ,,) \
662 INCLUDE_PROFILING_INFO(BQ) \
666 const W_ MK_REP_LBL(BQ,,)[] = { \
667 INCLUDE_TYPE_INFO(BQ) \
668 INCLUDE_SIZE_INFO(MIN_UPD_SIZE,INFO_UNUSED) \
670 INCLUDE_COPYING_INFO(_Evacuate_BQ,_Scavenge_BQ) \
671 SPEC_COMPACTING_INFO(_ScanLink_BQ,_PRStart_BQ,_ScanMove_BQ,_PRIn_BQ) \
677 #endif /* CONCURRENT */
680 Even the sequential system gets to play with SynchVars, though it really
681 doesn't make too much sense (if any). Okay; maybe it makes some sense.
682 (See the 1.3 I/O stuff.)
684 %************************************************************************
686 \subsubsection[SVar-closures]{@SynchVar@ heap objects}
688 %************************************************************************
691 #define SVAR_HS (MUTUPLE_HS)
693 #define SVAR_CLOSURE_SIZE(closure) 3
695 #define SET_SVAR_HDR(closure,infolbl,cc) \
696 SET_MUTUPLE_HDR(closure,infolbl,cc,MUTUPLE_VHS+3,3)
698 /* The value must come first, because we shrink the other two fields off
699 when writing an IVar */
701 #define SVAR_VALUE_LOCN (SVAR_HS+0)
702 #define SVAR_HEAD_LOCN (SVAR_HS+1)
703 #define SVAR_TAIL_LOCN (SVAR_HS+2)
705 #define SVAR_VALUE(closure) ((PP_)(closure))[SVAR_VALUE_LOCN]
706 #define SVAR_HEAD(closure) ((PP_)(closure))[SVAR_HEAD_LOCN]
707 #define SVAR_TAIL(closure) ((PP_)(closure))[SVAR_TAIL_LOCN]
710 End multi-slurp protection:
713 #endif /* THREADS_H */