2 % (c) The GRASP Project, Glasgow University, 1994-1995
4 \section[Thread]{Thread support macros used in \tr{.hc} files}
13 #define GRAN_ALLOC_HEAP(n,liveness) /* nothing */
14 #define GRAN_UNALLOC_HEAP(n,liveness) /* nothing */
15 #define GRAN_FETCH() /* nothing */
16 #define GRAN_FETCH_AND_RESCHEDULE(liveness) /* nothing */
17 #define GRAN_RESCHEDULE(liveness, reenter) /* nothing */
18 #define GRAN_EXEC(arith,branch,loads,stores,floats) /* nothing */
19 #define GRAN_SPARK() /* nothing */
26 #define OR_CONTEXT_SWITCH
30 extern I_ do_gr_sim; /* Are we simulating granularity? */
33 extern I_ do_qp_prof; /* Are we quasi-parallel profiling? */
39 #define DO_QP_PROF do_qp_prof
42 extern I_ context_switch; /* Flag set by signal handler */
44 #define CS_MAX_FREQUENCY 100 /* context switches per second */
45 #define CS_MIN_MILLISECS (1000/CS_MAX_FREQUENCY)/* milliseconds per slice */
47 #ifdef __STG_GCC_REGS__
48 #define OR_CONTEXT_SWITCH || context_switch
50 #define OR_CONTEXT_SWITCH /* in miniInterpret */
53 #define REQUIRED_POOL 0
54 #define ADVISORY_POOL 1
59 extern PP_ PendingSparksBase[SPARK_POOLS], PendingSparksLim[SPARK_POOLS];
60 extern PP_ PendingSparksHd[SPARK_POOLS], PendingSparksTl[SPARK_POOLS];
62 extern I_ SparkLimit[SPARK_POOLS];
64 extern P_ RunnableThreadsHd, RunnableThreadsTl;
65 extern P_ WaitingThreadsHd, WaitingThreadsTl;
67 extern I_ sparksIgnored;
69 IF_RTS(extern void AwaitEvent(I_);)
73 extern sparkq PendingSparksHd[][SPARK_POOLS], PendingSparksTl[][SPARK_POOLS];
74 extern P_ RunnableThreadsHd[], RunnableThreadsTl[],
75 WaitThreadsHd[], WaitThreadsTl[];
77 #define SparkQueueHd PendingSparksHd[CurrentProc][ADVISORY_POOL]
78 #define SparkQueueTl PendingSparksTl[CurrentProc][ADVISORY_POOL]
79 #define ThreadQueueHd RunnableThreadsHd[CurrentProc]
80 #define ThreadQueueTl RunnableThreadsTl[CurrentProc]
81 #define WaitingThreadsHd WaitThreadsHd[CurrentProc]
82 #define WaitingThreadsTl WaitThreadsTl[CurrentProc]
86 IF_RTS(extern void PruneSparks(STG_NO_ARGS);)
90 /* Codes that can be used as params for ReSchedule */
91 /* I distinguish them from the values 0/1 in the -UGRAN setup for security */
93 #define FIND_THREAD 10
94 #define SAME_THREAD 11
95 #define NEW_THREAD SAME_THREAD
96 #define CHANGE_THREAD 13
98 #define MAX_PROC (BITS_IN(W_)) /* Maximum number of PEs that can be simulated */
101 extern W_ IdleProcs, Idlers;
103 extern unsigned CurrentProc;
104 extern W_ CurrentTime[];
105 extern W_ SparksAvail, SurplusThreads;
107 /* Processor numbers to bitmasks and vice-versa */
110 #define PE_NUMBER(n) (1l << (long)n)
111 #define ThisPE PE_NUMBER(CurrentProc)
112 #define MainPE PE_NUMBER(MainProc)
114 #define IS_LOCAL_TO(ga,proc) ((1l << (long) proc) & ga)
116 /* These constants should eventually be program parameters */
118 /* Communication Cost Model (EDS-like), max_proc > 2. */
120 #define LATENCY 1000 /* Latency for single packet */
121 #define ADDITIONAL_LATENCY 100 /* Latency for additional packets */
122 #define BASICBLOCKTIME 10
123 #define FETCHTIME (LATENCY*2+MSGUNPACKTIME)
124 #define LOCALUNBLOCKTIME 10
125 #define GLOBALUNBLOCKTIME (LATENCY+MSGUNPACKTIME)
127 extern W_ gran_latency, gran_additional_latency, gran_fetchtime,
128 gran_lunblocktime, gran_gunblocktime;
130 #define MSGPACKTIME 0 /* Cost of creating a packet */
131 #define MSGUNPACKTIME 0 /* Cost of receiving a packet */
133 extern W_ gran_mpacktime, gran_mtidytime, gran_munpacktime;
135 /* Thread cost model */
136 #define THREADCREATETIME (25+THREADSCHEDULETIME)
137 #define THREADQUEUETIME 12 /* Cost of adding a thread to the running/runnable queue */
138 #define THREADDESCHEDULETIME 75 /* Cost of descheduling a thread */
139 #define THREADSCHEDULETIME 75 /* Cost of scheduling a thread */
140 #define THREADCONTEXTSWITCHTIME (THREADDESCHEDULETIME+THREADSCHEDULETIME)
142 extern W_ gran_threadcreatetime, gran_threadqueuetime,
143 gran_threadscheduletime, gran_threaddescheduletime,
144 gran_threadcontextswitchtime;
146 /* Instruction Cost model (SPARC, including cache misses) */
148 #define BRANCH_COST 2
151 #define FLOAT_COST 1 /* ? */
153 extern W_ gran_arith_cost, gran_branch_cost,
154 gran_load_cost, gran_store_cost, gran_float_cost,
157 /* Miscellaneous Parameters */
158 extern I_ DoFairSchedule;
159 extern I_ DoReScheduleOnFetch;
160 extern I_ SimplifiedFetch;
161 extern I_ DoStealThreadsFirst;
162 extern I_ DoAlwaysCreateThreads;
163 extern I_ DoThreadMigration;
164 extern I_ DoGUMMFetching;
165 extern I_ FetchStrategy;
166 extern I_ PreferSparksOfLocalNodes;
167 /* These come from debug options -bD? */
169 extern I_ PrintFetchMisses, fetch_misses;
171 extern I_ nUPDs, nUPDs_old, nUPDs_new, nUPDs_BQ, nPAPs, BQ_lens;
174 extern I_ no_gr_profile;
175 extern I_ do_sp_profile;
177 extern I_ NeedToReSchedule;
179 extern void GranSimAllocate PROTO((I_, P_, W_));
180 extern void GranSimUnAllocate PROTO((I_, P_, W_));
181 extern I_ GranSimFetch PROTO((P_));
182 extern void GranSimExec PROTO((W_,W_,W_,W_,W_));
183 extern void GranSimSpark PROTO((W_,P_));
184 extern void GranSimBlock (STG_NO_ARGS);
185 extern void PerformReschedule PROTO((W_, W_));
187 #if 0 /* 'ngo Dochmey */
189 #define GRAN_ALLOC_HEAP(n,liveness) STGCALL3(void,(),GranSimAllocate,n,0,0)
190 #define GRAN_UNALLOC_HEAP(n,liveness) STGCALL3(void,(),GranSimUnallocate,n,0,0)
192 #define GRAN_FETCH() STGCALL1(I_,(),GranSimFetch,Node)
194 #define GRAN_FETCH_AND_RESCHEDULE(liveness_mask) \
195 do { if(liveness_mask&LIVENESS_R1) \
196 STGCALL1(I_,(),GranSimFetch,Node); \
197 GRAN_RESCHEDULE(liveness_mask,1); \
200 #define GRAN_RESCHEDULE(liveness_mask,reenter) \
201 STGCALL2_GC(void,(), \
202 PerformReschedule,liveness_mask,reenter)
204 #define THREAD_CONTEXT_SWITCH(liveness_mask,reenter) \
206 if (context_switch /* OR_INTERVAL_EXPIRED */) { \
207 GRAN_RESCHEDULE(liveness_mask,reenter); \
210 #define GRAN_EXEC(arith,branch,load,store,floats) \
211 STGCALL5(void,(),GranSimExec,arith,branch,load,store,floats)
214 #else /* 1 */ /* chu' Dochmey */
216 #define GRAN_ALLOC_HEAP(n,liveness) \
218 GranSimAllocate(n,0,0); \
221 #define GRAN_UNALLOC_HEAP(n,liveness) \
223 GranSimUnallocate(n,0,0); \
226 #define GRAN_FETCH() \
228 GranSimFetch(Node); \
231 #define GRAN_FETCH_AND_RESCHEDULE(liveness_mask) \
232 do { if(liveness_mask&LIVENESS_R1) \
234 GranSimFetch(Node); \
235 RestoreAllStgRegs(); \
236 GRAN_RESCHEDULE(liveness_mask,1); \
239 #define GRAN_RESCHEDULE(liveness_mask,reenter) \
240 PerformReschedule_wrapper(liveness_mask,reenter)
242 #define THREAD_CONTEXT_SWITCH(liveness_mask,reenter) \
244 if (context_switch /* OR_INTERVAL_EXPIRED */) { \
245 GRAN_RESCHEDULE(liveness_mask,reenter); \
248 #define GRAN_EXEC(arith,branch,load,store,floats) \
250 GranSimExec(arith,branch,load,store,floats); \
256 #define ADD_TO_SPARK_QUEUE(spark) \
257 SPARK_NEXT(spark) = NULL; \
258 SPARK_PREV(spark) = PendingSparksTl[CurrentProc][ADVISORY_POOL]; \
259 if (PendingSparksHd[CurrentProc][ADVISORY_POOL] == NULL) \
260 PendingSparksHd[CurrentProc][ADVISORY_POOL] = spark; \
262 SPARK_NEXT(PendingSparksTl[CurrentProc][ADVISORY_POOL]) = spark; \
263 PendingSparksTl[CurrentProc][ADVISORY_POOL] = spark; \
267 extern P_ CurrentTSO; /* thread state object now in use */
269 extern P_ AvailableStack;
270 extern P_ AvailableTSO;
274 void ScheduleThreads PROTO((P_ topClosure));
276 void ReSchedule PROTO((int what_next)) STG_NORETURN;
278 void ReSchedule PROTO((int again)) STG_NORETURN;
280 void EndThread(STG_NO_ARGS) STG_NORETURN;
282 void QP_Event0 PROTO((I_, P_));
283 void QP_Event1 PROTO((char *, P_));
284 void QP_Event2 PROTO((char *, P_, P_));
285 long qp_elapsed_time(STG_NO_ARGS);
288 %************************************************************************
290 \subsection[thread-heap-objs]{Special threads-only heap objects (`closures')}
292 %************************************************************************
294 %************************************************************************
296 \subsubsection[TSO-closures]{@TSO@ (thread state object) heap objects}
298 %************************************************************************
300 We now enter the realm of the Deeply Magical.
302 Reduction threads come and go, resume and suspend, etc., in the threaded
303 world. Obviously, there must be a place to squirrel away state information
304 when a thread is suspended. Hence these {\em thread state objects} (TSOs).
306 Rather than manage TSOs' alloc/dealloc, etc., in some {\em ad hoc} way, we
307 instead alloc/dealloc/etc them in the heap; then we can use all the
308 standard garbage-collection/fetching/flushing/etc machinery on them.
309 So that's why TSOs are ``heap objects,'' albeit very special ones.
311 We use all the standard heap-object/closure jargon... (e.g.,
312 @SET_TSO_HDR@, fixed headers, variable-hdr size, ...).
314 A TSO is a fixed-size object with (post-header) words arranged like
315 the main register table, and enough slop so that the register table
316 can be properly aligned. The last header word of the TSO is
317 a pointer to the (internal) start of the interesting data.
319 Note that the heap and stack pointers in the TSO are only valid while
320 the thread is executing, and only if the corresponding values are not
321 stored in machine registers (i.e. the TSO becomes the backing register
322 table for those values).
325 #define TSO_INFO_WORDS 10
328 #define TSO_REDN_WORDS 2
330 #define TSO_REDN_WORDS 0
333 #if defined(GRAN) || defined(PAR)
334 #define TSO_GRAN_WORDS 15
336 #define TSO_GRAN_WORDS 0
340 (GC_MUT_RESERVED_WORDS + TSO_INFO_WORDS + TSO_REDN_WORDS + TSO_GRAN_WORDS)
342 #define TSO_HS (FIXED_HS + TSO_VHS)
343 #define TSO_CTS_SIZE (BYTES_TO_STGWORDS(sizeof(STGRegisterTable) + sizeof(StgDouble)))
345 #define TSO_PTRS (MAX_VANILLA_REG + 2)
347 /* std start-filling-in macro: */
348 #define SET_TSO_HDR(closure,infolbl,cc) \
349 { SET_FIXED_HDR(closure,infolbl,cc); \
350 SET_MUT_RESERVED_WORDS(closure); \
353 #define TSO_INFO_START (FIXED_HS + GC_MUT_RESERVED_WORDS)
354 #define TSO_LINK_LOCN (TSO_INFO_START + 0)
355 #define TSO_CCC_LOCN (TSO_INFO_START + 1)
356 #define TSO_NAME_LOCN (TSO_INFO_START + 2)
357 #define TSO_ID_LOCN (TSO_INFO_START + 3)
358 #define TSO_TYPE_LOCN (TSO_INFO_START + 4)
359 #define TSO_PC1_LOCN (TSO_INFO_START + 5)
360 #define TSO_PC2_LOCN (TSO_INFO_START + 6)
361 #define TSO_ARG1_LOCN (TSO_INFO_START + 7)
362 #define TSO_EVENT_LOCN (TSO_INFO_START + 8)
363 #define TSO_SWITCH_LOCN (TSO_INFO_START + 9)
365 #define TSO_REDN_START (TSO_INFO_START + TSO_INFO_WORDS)
367 #define TSO_AHWM_LOCN (TSO_REDN_START + 0)
368 #define TSO_BHWM_LOCN (TSO_REDN_START + 1)
371 #define TSO_GRAN_START (TSO_REDN_START + TSO_REDN_WORDS)
372 #if defined(GRAN) || defined(PAR)
373 #define TSO_LOCKED_LOCN (TSO_GRAN_START + 0)
374 #define TSO_SPARKNAME_LOCN (TSO_GRAN_START + 1)
375 #define TSO_STARTEDAT_LOCN (TSO_GRAN_START + 2)
376 #define TSO_EXPORTED_LOCN (TSO_GRAN_START + 3)
377 #define TSO_BASICBLOCKS_LOCN (TSO_GRAN_START + 4)
378 #define TSO_ALLOCS_LOCN (TSO_GRAN_START + 5)
379 #define TSO_EXECTIME_LOCN (TSO_GRAN_START + 6)
380 #define TSO_FETCHTIME_LOCN (TSO_GRAN_START + 7)
381 #define TSO_FETCHCOUNT_LOCN (TSO_GRAN_START + 8)
382 #define TSO_BLOCKTIME_LOCN (TSO_GRAN_START + 9)
383 #define TSO_BLOCKCOUNT_LOCN (TSO_GRAN_START + 10)
384 #define TSO_BLOCKEDAT_LOCN (TSO_GRAN_START + 11)
385 #define TSO_GLOBALSPARKS_LOCN (TSO_GRAN_START + 12)
386 #define TSO_LOCALSPARKS_LOCN (TSO_GRAN_START + 13)
387 #define TSO_QUEUE_LOCN (TSO_GRAN_START + 14)
390 #define TSO_LINK(closure) (((PP_)closure)[TSO_LINK_LOCN])
391 #define TSO_CCC(closure) (((CostCentre *)closure)[TSO_CCC_LOCN])
392 #define TSO_NAME(closure) (((PP_)closure)[TSO_NAME_LOCN])
393 #define TSO_ID(closure) (((P_)closure)[TSO_ID_LOCN])
394 #define TSO_TYPE(closure) (((P_)closure)[TSO_TYPE_LOCN])
395 #define TSO_PC1(closure) (((FP_)closure)[TSO_PC1_LOCN])
396 #define TSO_PC2(closure) (((FP_)closure)[TSO_PC2_LOCN])
397 #define TSO_ARG1(closure) (((P_)closure)[TSO_ARG1_LOCN])
398 #define TSO_EVENT(closure) (((P_)closure)[TSO_EVENT_LOCN])
399 #define TSO_SWITCH(closure) (((FP_)closure)[TSO_SWITCH_LOCN])
401 #define TSO_AHWM(closure) (((I_ *)closure)[TSO_AHWM_LOCN])
402 #define TSO_BHWM(closure) (((I_ *)closure)[TSO_BHWM_LOCN])
404 #define TSO_LOCKED(closure) (((P_)closure)[TSO_LOCKED_LOCN])
405 #define TSO_SPARKNAME(closure) (((P_)closure)[TSO_SPARKNAME_LOCN])
406 #define TSO_STARTEDAT(closure) (((P_)closure)[TSO_STARTEDAT_LOCN])
407 #define TSO_EXPORTED(closure) (((P_)closure)[TSO_EXPORTED_LOCN])
408 #define TSO_BASICBLOCKS(closure) (((P_)closure)[TSO_BASICBLOCKS_LOCN])
409 #define TSO_ALLOCS(closure) (((P_)closure)[TSO_ALLOCS_LOCN])
410 #define TSO_EXECTIME(closure) (((P_)closure)[TSO_EXECTIME_LOCN])
411 #define TSO_FETCHTIME(closure) (((P_)closure)[TSO_FETCHTIME_LOCN])
412 #define TSO_FETCHCOUNT(closure) (((P_)closure)[TSO_FETCHCOUNT_LOCN])
413 #define TSO_BLOCKTIME(closure) (((P_)closure)[TSO_BLOCKTIME_LOCN])
414 #define TSO_BLOCKCOUNT(closure) (((P_)closure)[TSO_BLOCKCOUNT_LOCN])
415 #define TSO_BLOCKEDAT(closure) (((P_)closure)[TSO_BLOCKEDAT_LOCN])
416 #define TSO_GLOBALSPARKS(closure) (((P_)closure)[TSO_GLOBALSPARKS_LOCN])
417 #define TSO_LOCALSPARKS(closure) (((P_)closure)[TSO_LOCALSPARKS_LOCN])
418 #define TSO_QUEUE(closure) (((P_)closure)[TSO_QUEUE_LOCN])
420 #define TSO_INTERNAL_PTR(closure) \
421 ((STGRegisterTable *)(((W_)(((P_)closure) \
422 + TSO_HS + BYTES_TO_STGWORDS(sizeof(StgDouble)))) & ~(sizeof(StgDouble) - 1)))
424 #if defined(CONCURRENT) && defined(GRAN) /* HWL */
425 /* Per definitionem a tso is really awake if it has met a first */
426 /* GRAN_RESCHEDULE macro after having been rescheduled. */
427 #define REALLY_AWAKE(tso) (TSO_SWITCH(tso) != TSO_PC2(tso))
428 #define SET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = NULL
429 #define RESET_AWAKE_FLAG(tso) TSO_SWITCH(tso) = TSO_PC2(tso)
434 The types of threads (TSO_TYPE):
436 #define T_MAIN 0 /* Must be executed locally */
437 #define T_REQUIRED 1 /* A required thread -- may be exported */
438 #define T_ADVISORY 2 /* An advisory thread -- may be exported */
439 #define T_FAIL 3 /* A failure thread -- may be exported */
442 The total space required to start a new thread (See NewThread in
445 #define THREAD_SPACE_REQUIRED (TSO_HS + TSO_CTS_SIZE + STKO_HS + RTSflags.ConcFlags.stkChunkSize)
448 Here are the various queues for GrAnSim-type events.
450 #define Q_RUNNING 'G'
451 #define Q_RUNNABLE 'A'
452 #define Q_BLOCKED 'R'
453 #define Q_FETCHING 'Y'
456 %************************************************************************
458 \subsubsection[spark-closures]{Pending Sparks}
460 %************************************************************************
465 P_ FindLocalSpark PROTO((rtsBool forexport));
467 void DisposeSpark PROTO((P_ spark));
468 rtsBool Spark PROTO((P_ closure, rtsBool required));
472 #ifdef GRAN /* For GrAnSim sparks are currently mallocated -- HWL */
474 void DisposeSpark PROTO((sparkq spark));
476 # define MAX_SPARKS 2000 /* For GC Roots Purposes */
477 # if defined(GRAN_TNG)
478 extern sparkq NewSpark PROTO((P_,I_,I_,I_));
479 # else /* !GRAN_TNG */
480 extern sparkq NewSpark PROTO((P_,I_,I_));
481 # endif /* GRAN_TNG */
483 # define SPARK_PREV(spark) (spark->prev)
484 # define SPARK_NEXT(spark) (sparkq)(spark->next)
485 # define SPARK_NODE(spark) (P_)(spark->node)
486 # define SPARK_NAME(spark) (spark->name)
487 # if defined(GRAN_TNG)
488 # define SPARK_GRAN_INFO(spark) (spark->gran_info)
489 # endif /* GRAN_TNG */
490 # define SPARK_GLOBAL(spark) (spark->global)
491 # define SPARK_EXPORTED(spark) (SPARK_GLOBAL(spark) > 1)
496 %************************************************************************
498 \subsubsection[STKO-closures]{@STKO@ (stack object) heap objects}
500 %************************************************************************
502 We linger in the Deeply Magical...
504 Each reduction thread has to have its own stack space. As there may
505 be many such threads, and as any given one may need quite a big stack,
506 a naive give-'em-a-big-stack-and-let-'em-run approach will cost a {\em
509 Our approach is to give a thread a small stack space, and then link
510 on/off extra ``chunks'' as the need arises. Again, this is a
511 storage-management problem, and, yet again, we choose to graft the
512 whole business onto the existing heap-management machinery. So stack
513 objects will live in the heap, be garbage collected, etc., etc..
515 So, as with TSOs, we use the standard heap-object (`closure') jargon.
517 Here is the picture of how a stack object is arranged:
519 <----- var hdr --------> v ---- FirstPtr --- v
520 ---------------------------------------------------------------------
521 ...|| SpB | SuB | SpA | SuA || B stk -> ... | ... <- A stk || PREV ||
522 ---------------------------------------------------------------------
526 We keep the following state-of-stack info in the {\em variable-header}
529 SpB, SuB & their {\em offsets} from 1st non-hdr word (marked \tr{XX} above)\\
530 SpA, SuA & their {\em offsets} from the next-to-last word (marked \tr{YY} above)\\
531 ctr field??? & (GC\_GEN\_WHATNOT may serve instead)\\
534 The stack-pointer offsets are from the points indicated and are {\em
535 non-negative} for pointers to this chunk of the stack space.
537 At the {\em end} of the stack object, we have a {\em link} to the
538 previous part of the overall stack. The link is \tr{NULL} if this is
539 the bottom of the overall stack.
541 After the header, we have @STKO_CHUNK_SIZE-1@ words of actual stack
542 stuff. The B-stack part begins at the lowest address and grows
543 upwards; the A-stack parts begins at the highest address and grows
546 From a storage-manager point of view, these are {\em very special}
551 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 9)
553 #define STKO_VHS (GC_MUT_RESERVED_WORDS + 7)
555 #define STKO_HS (FIXED_HS + STKO_VHS)
557 #define MIN_STKO_CHUNK_SIZE 16 /* Rather arbitrary */
559 #define STKO_CLOSURE_SIZE(closure) STKO_SIZE(closure)
561 #define STKO_CLOSURE_CTS_SIZE(closure) (STKO_CLOSURE_SIZE(closure) - STKO_VHS)
562 #define STKO_CLOSURE_PTR(closure, no) (*STKO_CLOSURE_ADDR(closure, no))
564 #define STKO_CLOSURE_ADDR(s, n) (((P_)(s)) + STKO_HS + (n) - 1)
565 #define STKO_CLOSURE_OFFSET(s, p) (((P_)(p) - (P_)(s)) - STKO_HS + 1)
567 /* std start-filling-in macro: */
568 #define SET_STKO_HDR(s,infolbl,cc) \
569 { SET_FIXED_HDR(s,infolbl,cc); \
570 SET_MUT_RESERVED_WORDS(s); \
571 /* the other header words filled in some other way */ }
573 /* now we have the STKO-specific stuff
575 Note: The S[pu][AB] registers are put in this order so that
576 they will appear in monotonically increasing order in
577 the StkO...just as an aid to the poor wee soul who has
582 #define STKO_ADEP_LOCN (STKO_HS - 9)
583 #define STKO_BDEP_LOCN (STKO_HS - 8)
585 #define STKO_SIZE_LOCN (STKO_HS - 7)
586 #define STKO_RETURN_LOCN (STKO_HS - 6)
587 #define STKO_LINK_LOCN (STKO_HS - 5)
588 #define STKO_SuB_LOCN (STKO_HS - 4)
589 #define STKO_SpB_LOCN (STKO_HS - 3)
590 #define STKO_SpA_LOCN (STKO_HS - 2)
591 #define STKO_SuA_LOCN (STKO_HS - 1)
593 #define STKO_ADEP(s) (((I_ *)(s))[STKO_ADEP_LOCN])
594 #define STKO_BDEP(s) (((I_ *)(s))[STKO_BDEP_LOCN])
595 #define STKO_SIZE(s) (((P_)(s))[STKO_SIZE_LOCN])
596 #define STKO_RETURN(s) (((StgRetAddr *)(s))[STKO_RETURN_LOCN])
597 #define STKO_LINK(s) (((PP_)(s))[STKO_LINK_LOCN])
598 #define STKO_SpB(s) (((PP_)(s))[STKO_SpB_LOCN])
599 #define STKO_SuB(s) (((PP_)(s))[STKO_SuB_LOCN])
600 #define STKO_SpA(s) (((PP_ *)(s))[STKO_SpA_LOCN])
601 #define STKO_SuA(s) (((PP_ *)(s))[STKO_SuA_LOCN])
603 #define STKO_BSTK_OFFSET(closure) (STKO_HS)
604 #define STKO_ASTK_OFFSET(closure) (FIXED_HS + STKO_CLOSURE_SIZE(closure) - 1)
605 #define STKO_BSTK_BOT(closure) (((P_)(closure)) + STKO_BSTK_OFFSET(closure))
606 #define STKO_ASTK_BOT(closure) (((PP_)(closure)) + STKO_ASTK_OFFSET(closure))
609 These are offsets into the stack object proper (starting at 1 for
610 the first word after the header).
613 #define STKO_SpA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpA(s)))
614 #define STKO_SuA_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuA(s)))
615 #define STKO_SpB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SpB(s)))
616 #define STKO_SuB_OFFSET(s) (STKO_CLOSURE_OFFSET(s,STKO_SuB(s)))
619 %************************************************************************
621 \subsubsection[BQ-closures]{@BQ@ (blocking queue) heap objects (`closures')}
623 %************************************************************************
625 Blocking queues are built in the parallel system when a local thread
626 enters a non-global node. They are similar to black holes, except
627 that when they are updated, the blocking queue must be enlivened
628 too. A blocking queue closure thus has the following structure.
633 \begin{tabular}{||l|l|l|l||}\hline
634 GA & Info ptr. & $\ldots$ & Blocking Queue \\ \hline
640 The blocking queue itself is a pointer to a list of blocking queue entries.
641 The list is formed from TSO closures. For the generational garbage collectors,
642 the BQ must have the same structure as an IND, with the blocking queue hanging
643 off of the indirection pointer. (This has to do with treating the BQ as an old
644 root if it gets updated while in the old generation.)
647 #define BQ_VHS IND_VHS
650 #define BQ_CLOSURE_SIZE(closure) IND_CLOSURE_SIZE(closure)
651 #define BQ_CLOSURE_NoPTRS(closure) IND_CLOSURE_NoPTRS(closure)
652 #define BQ_CLOSURE_NoNONPTRS(closure) IND_CLOSURE_NoNONPTRS(closure)
653 #define BQ_CLOSURE_PTR(closure, no) (((P_)(closure))[BQ_HS + (no) - 1])
656 Blocking queues store a pointer to a list of blocking queue entries.
659 #define BQ_ENTRIES(closure) IND_CLOSURE_PTR(closure)
660 #define BQ_LINK(closure) IND_CLOSURE_LINK(closure)
663 We have only one kind of blocking queue closure, so we test the info pointer
664 for a specific value rather than looking in the info table for a special bit.
669 #define IS_BQ_CLOSURE(closure) (INFO_PTR(closure) == (W_) BQ_info)
672 %************************************************************************
674 \subsubsection[TSO_ITBL]{@TSO_ITBL@}
676 %************************************************************************
678 The special info table used for thread state objects (TSOs).
683 CAT_DECLARE(TSO,INTERNAL_KIND,"TSO","<TSO>") \
685 EXTDATA_RO(MK_REP_LBL(TSO,,)); \
686 const W_ TSO_info[] = { \
688 ,(W_) INFO_OTHER_TAG \
689 ,(W_) MK_REP_REF(TSO,,) \
690 INCLUDE_PROFILING_INFO(TSO) \
694 const W_ MK_REP_LBL(TSO,,)[] = { \
695 INCLUDE_TYPE_INFO(TSO) \
696 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
698 INCLUDE_COPYING_INFO(_Evacuate_TSO,_Scavenge_TSO) \
699 INCLUDE_COMPACTING_INFO(_ScanLink_TSO,_PRStart_TSO,_ScanMove_TSO,_PRIn_TSO) \
704 %************************************************************************
706 \subsubsection[STKO_ITBL]{@STKO_ITBL@}
708 %************************************************************************
710 The special info table used for stack objects (STKOs).
713 #define STKO_ITBL() \
714 CAT_DECLARE(StkO,INTERNAL_KIND,"STKO","<STKO>") \
715 EXTFUN(StkO_entry); \
716 EXTDATA_RO(MK_REP_LBL(StkO,,)); \
717 const W_ StkO_info[] = { \
719 ,(W_) INFO_OTHER_TAG \
720 ,(W_) MK_REP_REF(StkO,,) \
721 INCLUDE_PROFILING_INFO(StkO) \
724 #define STKO_RTBL() \
725 const W_ MK_REP_LBL(StkO,,)[] = { \
726 INCLUDE_TYPE_INFO(STKO_DYNAMIC) \
727 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
729 INCLUDE_COPYING_INFO(_Evacuate_StkO,_Scavenge_StkO) \
730 INCLUDE_COMPACTING_INFO(_ScanLink_StkO,_PRStart_StkO,_ScanMove_StkO,_PRIn_StkO) \
733 #define STKO_STATIC_ITBL() \
734 CAT_DECLARE(StkO_static,INTERNAL_KIND,"STKO","<STKO>") \
735 EXTFUN(StkO_static_entry); \
736 EXTDATA_RO(MK_REP_LBL(StkO_static,,)); \
737 const W_ StkO_static_info[] = { \
738 (W_) StkO_static_entry \
739 ,(W_) INFO_OTHER_TAG \
740 ,(W_) MK_REP_REF(StkO_static,,) \
741 INCLUDE_PROFILING_INFO(StkO_static) \
744 #define STKO_STATIC_RTBL() \
745 const W_ MK_REP_LBL(StkO_static,,)[] = { \
746 INCLUDE_TYPE_INFO(STKO_STATIC) \
747 INCLUDE_SIZE_INFO(INFO_UNUSED,INFO_UNUSED) \
749 INCLUDE_COPYING_INFO(_Evacuate_Static,_Dummy_Static_entry) \
750 INCLUDE_COMPACTING_INFO(_Dummy_Static_entry,_PRStart_Static, \
751 _Dummy_Static_entry,_PRIn_Error) \
756 %************************************************************************
758 \subsubsection[BQ_ITBL]{@BQ_ITBL@}
760 %************************************************************************
762 Special info-table for local blocking queues.
766 CAT_DECLARE(BQ,INTERNAL_KIND,"BQ","<BQ>") \
768 EXTDATA_RO(MK_REP_LBL(BQ,,)); \
769 const W_ BQ_info[] = { \
771 ,(W_) INFO_OTHER_TAG \
772 ,(W_) MK_REP_REF(BQ,,) \
773 INCLUDE_PROFILING_INFO(BQ) \
777 const W_ MK_REP_LBL(BQ,,)[] = { \
778 INCLUDE_TYPE_INFO(BQ) \
779 INCLUDE_SIZE_INFO(MIN_UPD_SIZE,INFO_UNUSED) \
781 INCLUDE_COPYING_INFO(_Evacuate_BQ,_Scavenge_BQ) \
782 SPEC_COMPACTING_INFO(_ScanLink_BQ,_PRStart_BQ,_ScanMove_BQ,_PRIn_BQ) \
788 #endif /* CONCURRENT */
791 Even the sequential system gets to play with SynchVars, though it really
792 doesn't make too much sense (if any). Okay; maybe it makes some sense.
793 (See the 1.3 I/O stuff.)
795 %************************************************************************
797 \subsubsection[SVar-closures]{@SynchVar@ heap objects}
799 %************************************************************************
802 #define SVAR_HS (MUTUPLE_HS)
804 #define SVAR_CLOSURE_SIZE(closure) 3
806 #define SET_SVAR_HDR(closure,infolbl,cc) \
807 SET_MUTUPLE_HDR(closure,infolbl,cc,MUTUPLE_VHS+3,3)
809 /* The value must come first, because we shrink the other two fields off
810 when writing an IVar */
812 #define SVAR_VALUE_LOCN (SVAR_HS+0)
813 #define SVAR_HEAD_LOCN (SVAR_HS+1)
814 #define SVAR_TAIL_LOCN (SVAR_HS+2)
816 #define SVAR_VALUE(closure) ((PP_)(closure))[SVAR_VALUE_LOCN]
817 #define SVAR_HEAD(closure) ((PP_)(closure))[SVAR_HEAD_LOCN]
818 #define SVAR_TAIL(closure) ((PP_)(closure))[SVAR_TAIL_LOCN]
821 End multi-slurp protection:
824 #endif /* THREADS_H */