2 % (c) The GRASP/AQUA Project, Glasgow University, 1993-1994
4 \section[StgMacros]{C macros used in GHC-generated \tr{.hc} files}
11 %************************************************************************
13 \subsection[StgMacros-abbrev]{Abbreviatory(?) and general macros}
15 %************************************************************************
19 /* for function declarations */
20 #define STGFUN(f) F_ f(STG_NO_ARGS)
21 #define STATICFUN(f) static F_ f(STG_NO_ARGS)
23 /* for functions/data that are really external to this module */
24 #define EXTFUN(f) extern F_ f(STG_NO_ARGS)
25 #define EXTDATA(d) extern W_ d[]
26 #define EXTDATA_RO(d) extern const W_ d[] /* read-only */
28 /* for fwd decls to functions/data somewhere else in this module */
29 /* (identical for the mo') */
30 #define INTFUN(f) static F_ f(STG_NO_ARGS)
31 #define INTDATA(d) extern W_ d[]
32 #define INTDATA_RO(d) extern const W_ d[] /* read-only */
34 /* short forms of most of the above */
36 #define FN_(f) F_ f(STG_NO_ARGS)
37 #define IFN_(f) static F_ f(STG_NO_ARGS)
38 #define EF_(f) extern F_ f(STG_NO_ARGS)
39 #define ED_(d) extern W_ d[]
40 #define ED_RO_(d) extern const W_ d[] /* read-only */
41 #define IF_(f) static F_ f(STG_NO_ARGS)
43 /* GCC is uncooperative about the next one: */
44 /* But, the "extern" prevents initialisation... ADR */
46 #define ID_(d) extern W_ d[]
47 #define ID_RO_(d) extern const W_ d[] /* read-only */
49 #define ID_(d) static W_ d[]
50 #define ID_RO_(d) static const W_ d[] /* read-only */
54 General things; note: general-but-``machine-dependent'' macros are
55 given in \tr{StgMachDeps.lh}.
57 #define STG_MAX(a,b) (((a)>=(b)) ? (a) : (b))
60 Macros to combine two short words into a single
61 word and split such a word back into two.
63 Dependent on machine word size :-)
66 #define COMBINE_WORDS(word,short1,short2) \
68 ((packed_shorts *)&(word))->wu.s1 = short1; \
69 ((packed_shorts *)&(word))->wu.s2 = short2; \
72 #define SPLIT_WORD(word,short1,short2) \
74 short1 = ((packed_shorts *)&(word))->wu.s1; \
75 short2 = ((packed_shorts *)&(word))->wu.s2; \
80 %************************************************************************
82 \subsection[StgMacros-gen-stg]{General STGish macros}
84 %************************************************************************
86 Common sizes of vector-return tables.
88 Claim: don't need fwd decls for return pts in \tr{VECTBL*}, because
89 the AbsC flattener ensures that things come out sufficiently
93 #ifdef __STG_REV_TBLS__
94 #define UNVECTBL(staticp,label,a) /* nothing */
96 #define UNVECTBL(staticp,label,a) \
98 staticp const W_ label[] = { \
105 #if defined(USE_SPLIT_MARKERS)
106 #define __STG_SPLIT_MARKER(n) FN_(CAT2(__stg_split_marker,n)){ }
108 #define __STG_SPLIT_MARKER(n) /* nothing */
112 %************************************************************************
114 \subsection[StgMacros-exceptions]{Exception-checking macros}
116 %************************************************************************
118 Argument-satisfaction check, stack(s) overflow check, heap overflow
121 The @SUBTRACT(upper, lower)@ macros return a positive result in words
122 indicating the amount by which upper is above lower on the stack.
125 #define SUBTRACT_A_STK( upper, lower ) AREL( (lower) - (upper) )
126 #define SUBTRACT_B_STK( upper, lower ) BREL( (lower) - (upper) )
129 %************************************************************************
131 \subsubsection[StgMacros-arg-satis]{Argument-satisfaction checks}
133 %************************************************************************
135 @ARGS_CHK(n)@ sees of there are @n@ words of args on the A/B stack.
136 If not, it jumps to @UpdatePAP@.
138 @ARGS_CHK@ args are pre-directionified.
139 Notice that we do the comparisons in the form (x < a+n), for
140 some constant n. This generates more efficient code (with GCC at least)
144 #define ARGS_CHK_A(n) \
145 if (SuA /*SUBTRACT_A_STK( SpA, SuA )*/ < (SpA+(n))) { \
149 #define ARGS_CHK_A_LOAD_NODE(n, closure_addr) \
150 if (SuA /*SUBTRACT_A_STK( SpA, SuA )*/ < (SpA+(n))) { \
151 Node = (P_) closure_addr; \
155 #define ARGS_CHK_B(n) \
156 if (SpB /*SUBTRACT_B_STK( SpB, SuB )*/ < (SuB-(n))) { \
161 #define ARGS_CHK_B_LOAD_NODE(n, closure_addr) \
162 if (SpB /*SUBTRACT_B_STK( SpB, SuB )*/ < (SuB-(n))) { \
163 Node = (P_) closure_addr; \
168 %************************************************************************
170 \subsubsection[StgMacros-stk-chks]{Stack-overflow check}
172 %************************************************************************
174 @STK_CHK(a,b)@ [misc args omitted...] checks that we can allocate @a@
175 words of A stack and @b@ words of B stack. If not, it calls
176 @StackOverflow@ (which dies).
178 (It will be different in the parallel case.)
180 NB: args @a@ and @b@ are pre-direction-ified!
182 extern I_ SqueezeUpdateFrames PROTO((P_, P_, P_));
183 int sanityChk_StkO (P_ stko); /* ToDo: move to a sane place */
185 #if ! defined(CONCURRENT)
187 extern void StackOverflow(STG_NO_ARGS) STG_NORETURN;
189 #if STACK_CHECK_BY_PAGE_FAULT
191 #define STACK_OVERFLOW(liveness,hda,hdb,spa,spb,rtype,reenter) \
192 /* use memory protection instead; still need ticky-ness */
196 #define STACK_OVERFLOW(liveness,hda,hdb,spa,spb,rtype,reenter) \
197 ULTRASAFESTGCALL0(void,(void *),StackOverflow)
199 #endif /* not using page-faulting */
203 extern I_ StackOverflow PROTO((W_, W_));
206 * On a uniprocessor, we do *NOT* context switch on a stack overflow
207 * (though we may GC). Therefore, we never have to reenter node.
210 #define STACK_OVERFLOW(liveness,hda,hdb,spa,spb,rtype,reenter) \
211 DO_STACKOVERFLOW((hda+hdb)<<2|((rtype)<<1)|(reenter),((spa)<<20)|((spb)<<8)|(liveness))
213 #define STACK_OVERFLOW_HEADROOM(args,y) ((args) >> 2)
214 #define STACK_OVERFLOW_PRIM_RETURN(args,y) ((args) & 2)
215 #define STACK_OVERFLOW_REENTER(args,y) ((args) & 1)
217 #define STACK_OVERFLOW_AWORDS(x,args) (((args) >> 20) & 0x0fff)
218 #define STACK_OVERFLOW_BWORDS(x,args) (((args) >> 8) & 0x0fff)
219 #define STACK_OVERFLOW_LIVENESS(x,args) ((args) & 0xff)
221 #endif /* CONCURRENT */
223 #define STK_CHK(liveness_mask,a_headroom,b_headroom,spa,spb,ret_type,reenter)\
225 DO_ASTK_HWM(); /* ticky-ticky profiling */ \
227 if (STKS_OVERFLOW_OP((a_headroom) + 1, (b_headroom) + 1)) { \
228 STACK_OVERFLOW(liveness_mask,a_headroom,b_headroom,spa,spb,ret_type,reenter);\
233 %************************************************************************
235 \subsubsection[StgMacros-arity-chks]{Arity checks (for debugging)}
237 %************************************************************************
239 This is a debugging feature. Each call to fast-entry-point code sets
240 @ExpectedArity@ to some value, and the callee then checks that the
241 value is as expected.
244 #if defined(__DO_ARITY_CHKS__)
246 extern I_ ExpectedArity;
247 extern void ArityError PROTO((I_)) STG_NORETURN;
249 #define SET_ARITY(n) do { ExpectedArity = (n); } while(0)
250 #define CHK_ARITY(n) \
252 if (ExpectedArity != (n)) { \
253 ULTRASAFESTGCALL1(void,(void *, I_),ArityError,n); \
256 #else /* ! __DO_ARITY_CHKS__: normal case */
258 #define SET_ARITY(n) /* nothing */
259 #define CHK_ARITY(n) /* nothing */
261 #endif /* ! __DO_ARITY_CHKS__ */
264 %************************************************************************
266 \subsubsection[StgMacros-heap-chks]{Heap-overflow checks}
268 %************************************************************************
270 Please see the general discussion/commentary about ``what really
271 happens in a GC,'' in \tr{SMinterface.lh}.
274 extern void PerformGC PROTO((W_));
275 void RealPerformGC PROTO((W_ liveness, W_ reqsize, W_ always_reenter_node, rtsBool do_full_collection));
276 void checkInCCallGC(STG_NO_ARGS);
279 void StgPerformGarbageCollection(STG_NO_ARGS);
284 #define OR_MSG_PENDING /* never */
286 #define HEAP_OVERFLOW(liveness,n,reenter) \
288 DO_GC((((W_)n)<<8)|(liveness)); \
291 #define REQSIZE_BITMASK ((1L << ((BITS_IN(W_) - 8 + 1))) - 1)
292 #define HEAP_OVERFLOW_REQSIZE(args) (((args) >> 8) & REQSIZE_BITMASK)
293 #define HEAP_OVERFLOW_REENTER(args) 0
294 #define HEAP_OVERFLOW_LIVENESS(args) ((args) & 0xff)
296 #else /* CONCURRENT */
298 extern void ReallyPerformThreadGC PROTO((W_, rtsBool));
300 #define HEAP_OVERFLOW(liveness,n,reenter) \
302 DO_GC((((W_)(n))<<9)|((reenter)<<8)|(liveness)); \
305 #define REQSIZE_BITMASK ((1L << ((BITS_IN(W_) - 9 + 1))) - 1)
306 #define HEAP_OVERFLOW_REQSIZE(args) (((args) >> 9) & REQSIZE_BITMASK)
307 #define HEAP_OVERFLOW_REENTER(args) (((args) >> 8) & 0x1)
308 #define HEAP_OVERFLOW_LIVENESS(args) ((args) & 0xff)
312 #define OR_MSG_PENDING /* never */
316 extern int PacketsWaiting; /*Probes for incoming messages*/
317 extern int heapChkCounter; /*Not currently used! We check for messages when*/
318 /*a thread is resheduled PWT*/
319 /* #define OR_MSG_PENDING || (--heapChkCounter == 0 && PacketsWaiting())*/
320 #define OR_MSG_PENDING /* never */
323 #endif /* CONCURRENT */
325 #if 0 /* alpha_TARGET_ARCH */
326 #define CACHE_LINE 4 /* words */
327 #define LINES_AHEAD 3
328 #define PRE_FETCH(n) \
331 j = ((STG_VOLATILE StgInt *) Hp)[LINES_AHEAD * CACHE_LINE]; \
333 #define EXTRA_HEAP_WORDS (CACHE_LINE * LINES_AHEAD)
335 #define PRE_FETCH(reg)
336 #define EXTRA_HEAP_WORDS 0
340 #define HEAP_CHK(liveness_mask,n,reenter) \
342 /* TICKY_PARANOIA(__FILE__, __LINE__); */ \
343 /* THREAD_CONTEXT_SWITCH(liveness_mask,reenter); */ \
344 ALLOC_HEAP(n); /* ticky profiling */ \
345 GRAN_ALLOC_HEAP(n,liveness_mask); /* Granularity Simulation */ \
346 if (((Hp = Hp + (n)) > HpLim)) { \
347 /* Old: STGCALL3_GC(PerformGC,liveness_mask,n,StgFalse); */\
348 HEAP_OVERFLOW(liveness_mask,n,StgFalse); \
353 #define HEAP_CHK(liveness_mask,n,reenter) \
355 /* TICKY_PARANOIA(__FILE__, __LINE__); */ \
357 ALLOC_HEAP(n); /* ticky profiling */ \
358 if (((Hp = Hp + (n)) > HpLim) OR_INTERVAL_EXPIRED OR_CONTEXT_SWITCH OR_MSG_PENDING) { \
359 HEAP_OVERFLOW(liveness_mask,n,reenter); \
367 #define HEAP_CHK_AND_RESTORE_N(liveness_mask,n,reenter) \
369 /* TICKY_PARANOIA(__FILE__, __LINE__); */ \
371 ALLOC_HEAP(n); /* ticky profiling */ \
372 if (((Hp = Hp + (n)) > HpLim) OR_INTERVAL_EXPIRED OR_CONTEXT_SWITCH OR_MSG_PENDING) { \
373 HEAP_OVERFLOW(liveness_mask,n,reenter); \
374 n = TSO_ARG1(CurrentTSO); \
379 #define HEAP_CHK_AND_RESTORE_N(liveness_mask,n,reenter) \
380 HEAP_CHK(liveness_mask,n,reenter)
387 %************************************************************************
389 \subsection[StgMacros-prim-ops]{Primitive operations}
391 %************************************************************************
393 One thing to be {\em very careful about} with these macros that assign
394 to results is that the assignment must come {\em last}. Some of the
395 other arguments may be in terms of addressing modes that get clobbered
396 by the assignment. (Dirty imperative programming RULES!)
398 The order here is roughly that in \tr{compiler/prelude/PrimOps.lhs}.
400 %************************************************************************
402 \subsubsection[StgMacros-compare-primops]{Primitive comparison ops on basic types}
404 %************************************************************************
406 We cast the chars in case one of them is a literal (so C things work right
407 even for 8-bit chars).
409 #define gtCharZh(r,a,b) r=(I_)((a)> (b))
410 #define geCharZh(r,a,b) r=(I_)((a)>=(b))
411 #define eqCharZh(r,a,b) r=(I_)((a)==(b))
412 #define neCharZh(r,a,b) r=(I_)((a)!=(b))
413 #define ltCharZh(r,a,b) r=(I_)((a)< (b))
414 #define leCharZh(r,a,b) r=(I_)((a)<=(b))
416 #define gtIntZh(r,a,b) r=(I_)((a) >(b))
417 #define geIntZh(r,a,b) r=(I_)((a)>=(b))
418 #define eqIntZh(r,a,b) r=(I_)((a)==(b))
419 #define neIntZh(r,a,b) r=(I_)((a)!=(b))
420 #define ltIntZh(r,a,b) r=(I_)((a) <(b))
421 #define leIntZh(r,a,b) r=(I_)((a)<=(b))
423 #define gtWordZh(r,a,b) r=(I_)((a) >(b))
424 #define geWordZh(r,a,b) r=(I_)((a)>=(b))
425 #define eqWordZh(r,a,b) r=(I_)((a)==(b))
426 #define neWordZh(r,a,b) r=(I_)((a)!=(b))
427 #define ltWordZh(r,a,b) r=(I_)((a) <(b))
428 #define leWordZh(r,a,b) r=(I_)((a)<=(b))
430 #define gtAddrZh(r,a,b) r=(I_)((a) >(b))
431 #define geAddrZh(r,a,b) r=(I_)((a)>=(b))
432 #define eqAddrZh(r,a,b) r=(I_)((a)==(b))
433 #define neAddrZh(r,a,b) r=(I_)((a)!=(b))
434 #define ltAddrZh(r,a,b) r=(I_)((a) <(b))
435 #define leAddrZh(r,a,b) r=(I_)((a)<=(b))
437 #define gtFloatZh(r,a,b) r=(I_)((a)> (b))
438 #define geFloatZh(r,a,b) r=(I_)((a)>=(b))
439 #define eqFloatZh(r,a,b) r=(I_)((a)==(b))
440 #define neFloatZh(r,a,b) r=(I_)((a)!=(b))
441 #define ltFloatZh(r,a,b) r=(I_)((a)< (b))
442 #define leFloatZh(r,a,b) r=(I_)((a)<=(b))
444 #define gtDoubleZh(r,a,b) r=(I_)((a)> (b))
445 #define geDoubleZh(r,a,b) r=(I_)((a)>=(b))
446 #define eqDoubleZh(r,a,b) r=(I_)((a)==(b))
447 #define neDoubleZh(r,a,b) r=(I_)((a)!=(b))
448 #define ltDoubleZh(r,a,b) r=(I_)((a)< (b))
449 #define leDoubleZh(r,a,b) r=(I_)((a)<=(b))
452 %************************************************************************
454 \subsubsection[StgMacros-char-primops]{Primitive @Char#@ ops (and @LitString#@ish things, too)}
456 %************************************************************************
458 We cast the chars in case one of them is a literal (so C things work right
459 even for 8-bit chars).
461 #define ordZh(r,a) r=(I_)((W_) (a))
462 #define chrZh(r,a) r=(StgChar)((W_)(a))
465 %************************************************************************
467 \subsubsection[StgMacros-int-primops]{Primitive @Int#@ ops}
469 %************************************************************************
472 I_ stg_div PROTO((I_ a, I_ b));
474 #define plusIntZh(r,a,b) r=(a)+(b)
475 #define minusIntZh(r,a,b) r=(a)-(b)
476 #define timesIntZh(r,a,b) r=(a)*(b)
477 #define quotIntZh(r,a,b) r=(a)/(b)
478 #define divIntZh(r,a,b) r=ULTRASAFESTGCALL2(I_,(void *, I_, I_),stg_div,(a),(b))
479 #define remIntZh(r,a,b) r=(a)%(b)
480 #define negateIntZh(r,a) r=-(a)
483 %************************************************************************
485 \subsubsection[StgMacros-word-primops]{Primitive @Word#@ ops}
487 %************************************************************************
490 #define andZh(r,a,b) r=(a)&(b)
491 #define orZh(r,a,b) r=(a)|(b)
492 #define notZh(r,a) r=~(a)
494 #define shiftLZh(r,a,b) r=(a)<<(b)
495 #define shiftRAZh(r,a,b) r=(a)>>(b)
496 #define shiftRLZh(r,a,b) r=(a)>>(b)
497 #define iShiftLZh(r,a,b) r=(a)<<(b)
498 #define iShiftRAZh(r,a,b) r=(a)>>(b)
499 #define iShiftRLZh(r,a,b) r=(a)>>(b)
501 #define int2WordZh(r,a) r=(W_)(a)
502 #define word2IntZh(r,a) r=(I_)(a)
505 %************************************************************************
507 \subsubsection[StgMacros-addr-primops]{Primitive @Addr#@ ops}
509 %************************************************************************
512 #define int2AddrZh(r,a) r=(A_)(a)
513 #define addr2IntZh(r,a) r=(I_)(a)
516 %************************************************************************
518 \subsubsection[StgMacros-float-primops]{Primitive @Float#@ ops}
520 %************************************************************************
523 #define plusFloatZh(r,a,b) r=(a)+(b)
524 #define minusFloatZh(r,a,b) r=(a)-(b)
525 #define timesFloatZh(r,a,b) r=(a)*(b)
526 #define divideFloatZh(r,a,b) r=(a)/(b)
527 #define negateFloatZh(r,a) r=-(a)
529 #define int2FloatZh(r,a) r=(StgFloat)(a)
530 #define float2IntZh(r,a) r=(I_)(a)
532 #define expFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),exp,a)
533 #define logFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),log,a)
534 #define sqrtFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),sqrt,a)
535 #define sinFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),sin,a)
536 #define cosFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),cos,a)
537 #define tanFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),tan,a)
538 #define asinFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),asin,a)
539 #define acosFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),acos,a)
540 #define atanFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),atan,a)
541 #define sinhFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),sinh,a)
542 #define coshFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),cosh,a)
543 #define tanhFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),tanh,a)
544 #define powerFloatZh(r,a,b) r=(StgFloat) SAFESTGCALL2(StgDouble,(void *, StgDouble,StgDouble),pow,a,b)
546 /* encoding/decoding given w/ Integer stuff */
549 %************************************************************************
551 \subsubsection[StgMacros-double-primops]{Primitive @Double#@ ops}
553 %************************************************************************
556 #define plusDoubleZh(r,a,b) r=(a)+(b)
557 #define minusDoubleZh(r,a,b) r=(a)-(b)
558 #define timesDoubleZh(r,a,b) r=(a)*(b)
559 #define divideDoubleZh(r,a,b) r=(a)/(b)
560 #define negateDoubleZh(r,a) r=-(a)
562 #define int2DoubleZh(r,a) r=(StgDouble)(a)
563 #define double2IntZh(r,a) r=(I_)(a)
565 #define float2DoubleZh(r,a) r=(StgDouble)(a)
566 #define double2FloatZh(r,a) r=(StgFloat)(a)
568 #define expDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),exp,a)
569 #define logDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),log,a)
570 #define sqrtDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),sqrt,a)
571 #define sinDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),sin,a)
572 #define cosDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),cos,a)
573 #define tanDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),tan,a)
574 #define asinDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),asin,a)
575 #define acosDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),acos,a)
576 #define atanDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),atan,a)
577 #define sinhDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),sinh,a)
578 #define coshDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),cosh,a)
579 #define tanhDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),tanh,a)
580 #define powerDoubleZh(r,a,b) r=(StgDouble) SAFESTGCALL2(StgDouble,(void *, StgDouble,StgDouble),pow,a,b)
583 %************************************************************************
585 \subsubsection[StgMacros-integer-primops]{Primitive @Integer@-related ops (GMP stuff)}
587 %************************************************************************
589 Dirty macros we use for the real business.
591 INVARIANT: When one of these macros is called, the only live data is
592 tidily on the STG stacks or in the STG registers (the code generator
593 ensures this). If there are any pointer-arguments, they will be in
594 the first \tr{Ret*} registers (e.g., \tr{da} arg of \tr{gmpTake1Return1}).
596 OK, here are the real macros:
598 #define gmpTake1Return1(size_chk_macro, liveness, mpz_op, ar,sr,dr, aa,sa,da) \
601 I_ space = size_chk_macro(sa); \
603 /* Check that there will be enough heap & make Hp visible to GMP allocator */ \
604 GMP_HEAP_LOOKAHEAD(liveness,space); \
606 /* Now we can initialise (post possible GC) */ \
609 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
611 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result); \
613 /* Perform the operation */ \
614 SAFESTGCALL2(void,(void *, MP_INT *, MP_INT *),mpz_op,&result,&arg); \
616 GMP_HEAP_HANDBACK(); /* restore Hp */ \
617 (ar) = result.alloc; \
618 (sr) = result.size; \
619 (dr) = (B_) (result.d - DATA_HS); \
620 /* pt to *beginning* of object (GMP has been monkeying around in the middle) */ \
624 #define gmpTake2Return1(size_chk_macro, liveness, mpz_op, ar,sr,dr, a1,s1,d1, a2,s2,d2)\
628 I_ space = size_chk_macro(s1,s2); \
630 /* Check that there will be enough heap & make Hp visible to GMP allocator */ \
631 GMP_HEAP_LOOKAHEAD(liveness,space); \
633 /* Now we can initialise (post possible GC) */ \
636 arg1.d = (unsigned long int *) (BYTE_ARR_CTS(d1)); \
639 arg2.d = (unsigned long int *) (BYTE_ARR_CTS(d2)); \
641 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result); \
643 /* Perform the operation */ \
644 SAFESTGCALL3(void,(void *, MP_INT *, MP_INT *, MP_INT *),mpz_op,&result,&arg1,&arg2); \
646 GMP_HEAP_HANDBACK(); /* restore Hp */ \
647 (ar) = result.alloc; \
648 (sr) = result.size; \
649 (dr) = (B_) (result.d - DATA_HS); \
650 /* pt to *beginning* of object (GMP has been monkeying around in the middle) */ \
653 #define gmpTake2Return2(size_chk_macro, liveness, mpz_op, ar1,sr1,dr1, ar2,sr2,dr2, a1,s1,d1, a2,s2,d2) \
658 I_ space = size_chk_macro(s1,s2); \
660 /* Check that there will be enough heap & make Hp visible to GMP allocator */ \
661 GMP_HEAP_LOOKAHEAD(liveness,space); \
663 /* Now we can initialise (post possible GC) */ \
666 arg1.d = (unsigned long int *) (BYTE_ARR_CTS(d1)); \
669 arg2.d = (unsigned long int *) (BYTE_ARR_CTS(d2)); \
671 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result1); \
672 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result2); \
674 /* Perform the operation */ \
675 SAFESTGCALL4(void,(void *, MP_INT *, MP_INT *, MP_INT *, MP_INT *),mpz_op,&result1,&result2,&arg1,&arg2); \
677 GMP_HEAP_HANDBACK(); /* restore Hp */ \
678 (ar1) = result1.alloc; \
679 (sr1) = result1.size; \
680 (dr1) = (B_) (result1.d - DATA_HS); \
681 (ar2) = result2.alloc; \
682 (sr2) = result2.size; \
683 (dr2) = (B_) (result2.d - DATA_HS); \
687 Some handy size-munging macros: sometimes gratuitously {\em conservative}.
688 The \tr{+16} is to allow for the initial allocation of \tr{MP_INT} results.
689 The \tr{__abs} stuff is because negative-ness of GMP things is encoded
692 #define __abs(a) (( (a) >= 0 ) ? (a) : (-(a)))
693 #define GMP_SIZE_ONE() (2 + DATA_HS + 16)
694 #define GMP_SAME_SIZE(a) (__abs(a) + DATA_HS + 16)
695 #define GMP_MAX_SIZE(a,b) ((__abs(a) > __abs(b) ? __abs(a) : __abs(b)) + 1 + DATA_HS + 16)
696 /* NB: the +1 is for the carry (or whatever) */
697 #define GMP_2MAX_SIZE(a,b) (2 * GMP_MAX_SIZE(a,b))
698 #define GMP_ADD_SIZES(a,b) (__abs(a) + __abs(b) + 1 + DATA_HS + 16)
699 /* the +1 may just be paranoia */
702 For the Integer/GMP stuff, we have macros that {\em look ahead} for
703 some space, but don't actually grab it.
705 If there are live pointers at the time of the lookahead, the caller
706 must make sure they are in \tr{Ret1}, \tr{Ret2}, ..., so they can be
707 handled normally. We achieve this by having the code generator {\em
708 always} pass args to may-invoke-GC primitives in registers, using the
709 normal pointers-first policy. This means that, if we do go to garbage
710 collection, everything is already in the Right Place.
712 Saving and restoring Hp register so the MP allocator can see them. If we are
713 performing liftime profiling need to save and restore HpLim as well so that
714 it can be bumped if allocation occurs.
716 The second argument to @GMP_HEAP_LOOKAHEAD@ must be an lvalue so that
717 it can be restored from @TSO_ARG1@ after a failed @HEAP_CHK@ in
721 #define GMP_HEAP_LOOKAHEAD(liveness,n) \
723 HEAP_CHK_AND_RESTORE_N(liveness,n,0); \
725 UN_ALLOC_HEAP(n); /* Undo ticky-ticky */ \
726 SAVE_Hp = Hp; /* Hand over the hp */ \
727 DEBUG_SetGMPAllocBudget(n) \
730 #define GMP_HEAP_HANDBACK() \
732 DEBUG_ResetGMPAllocBudget()
736 void *stgAllocForGMP PROTO((size_t size_in_bytes));
737 void *stgReallocForGMP PROTO((void *ptr, size_t old_size, size_t new_size));
738 void stgDeallocForGMP PROTO((void *ptr, size_t size));
741 extern StgInt DEBUG_GMPAllocBudget;
742 #define DEBUG_SetGMPAllocBudget(n) DEBUG_GMPAllocBudget = (n);
743 #define DEBUG_ResetGMPAllocBudget() DEBUG_GMPAllocBudget = 0;
745 #define DEBUG_SetGMPAllocBudget(n) /*nothing*/
746 #define DEBUG_ResetGMPAllocBudget() /*nothing*/
750 The real business (defining Integer primops):
752 #define negateIntegerZh(ar,sr,dr, liveness, aa,sa,da) \
753 gmpTake1Return1(GMP_SAME_SIZE, liveness, mpz_neg, ar,sr,dr, aa,sa,da)
755 #define plusIntegerZh(ar,sr,dr, liveness, a1,s1,d1, a2,s2,d2) \
756 gmpTake2Return1(GMP_MAX_SIZE, liveness, mpz_add, ar,sr,dr, a1,s1,d1, a2,s2,d2)
757 #define minusIntegerZh(ar,sr,dr, liveness, a1,s1,d1, a2,s2,d2) \
758 gmpTake2Return1(GMP_MAX_SIZE, liveness, mpz_sub, ar,sr,dr, a1,s1,d1, a2,s2,d2)
759 #define timesIntegerZh(ar,sr,dr, liveness, a1,s1,d1, a2,s2,d2) \
760 gmpTake2Return1(GMP_ADD_SIZES, liveness, mpz_mul, ar,sr,dr, a1,s1,d1, a2,s2,d2)
762 /* div, mod, quot, rem are defined w/ quotRem & divMod */
764 #define quotRemIntegerZh(ar1,sr1,dr1, ar2,sr2,dr2, liveness, a1,s1,d1, a2,s2,d2) \
765 gmpTake2Return2(GMP_2MAX_SIZE, liveness, mpz_divmod, ar1,sr1,dr1, ar2,sr2,dr2, a1,s1,d1, a2,s2,d2)
766 #define divModIntegerZh(ar1,sr1,dr1, ar2,sr2,dr2, liveness, a1,s1,d1, a2,s2,d2) \
767 gmpTake2Return2(GMP_2MAX_SIZE, liveness, mpz_mdivmod, ar1,sr1,dr1, ar2,sr2,dr2, a1,s1,d1, a2,s2,d2)
770 Comparison ops (@<@, @>=@, etc.) are defined in terms of the cmp
771 fellow (returns -ve, 0, or +ve).
773 #define cmpIntegerZh(r, hp, a1,s1,d1, a2,s2,d2) /* calls mpz_cmp */ \
776 /* Does not allocate memory */ \
780 arg1.d = (unsigned long int *) (BYTE_ARR_CTS(d1)); \
783 arg2.d = (unsigned long int *) (BYTE_ARR_CTS(d2)); \
785 (r) = SAFESTGCALL2(I_,(void *, MP_INT *, MP_INT *),mpz_cmp,&arg1,&arg2); \
792 #define integer2IntZh(r, hp, aa,sa,da) \
794 /* Does not allocate memory */ \
798 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
800 (r) = SAFESTGCALL1(I_,(void *, MP_INT *),mpz_get_si,&arg); \
803 /* Since we're forced to know a little bit about MP_INT layout to do this with
804 pre-allocated heap, we just inline the whole of mpz_init_set_si here.
805 ** DIRE WARNING. if mpz_init_set_si changes, so does this! ***
808 #define int2IntegerZh(ar,sr,dr, hp, i) \
809 { StgInt val; /* to snaffle arg to avoid aliasing */ \
811 val = (i); /* snaffle... */ \
813 SET_DATA_HDR((hp),ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
815 if ((val) < 0) { (sr) = -1; (hp)[DATA_HS] = -(val); } \
816 else if ((val) > 0) { (sr) = 1; (hp)[DATA_HS] = (val); } \
817 else /* val==0 */ { (sr) = 0; } \
819 (dr) = (B_)(hp); /* dr is an StgByteArray */ \
822 #define word2IntegerZh(ar,sr,dr, hp, i) \
823 { StgWord val; /* to snaffle arg to avoid aliasing */ \
825 val = (i); /* snaffle... */ \
827 SET_DATA_HDR((hp),ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
829 if ((val) != 0) { (sr) = 1; (hp)[DATA_HS] = (val); } \
830 else /* val==0 */ { (sr) = 0; } \
832 (dr) = (B_)(hp); /* dr is an StgByteArray */ \
837 Then there are a few oddments to make life easier:
841 The "str" argument must be a literal C string.
843 addr2Integer( ..., "foo") OK!
846 addr2Integer( ..., x) NO! NO!
849 #define addr2IntegerZh(ar,sr,dr, liveness, str) \
851 /* taking the number of bytes/8 as the number of words of lookahead \
852 is plenty conservative */ \
853 I_ space = GMP_SAME_SIZE(sizeof(str) / 8 + 1); \
855 GMP_HEAP_LOOKAHEAD(liveness, space); \
857 /* Perform the operation */ \
858 if (SAFESTGCALL3(I_,(void *, MP_INT *, char *, int), mpz_init_set_str,&result,(str),/*base*/10)) \
861 GMP_HEAP_HANDBACK(); /* restore Hp */ \
862 (ar) = result.alloc; \
863 (sr) = result.size; \
864 (dr) = (B_) (result.d - DATA_HS); \
865 /* pt to *beginning* of object (GMP has been monkeying around in the middle) */ \
869 Encoding and decoding float-ish things is pretty Integer-ish. We use
870 these pretty magical support functions, essentially stolen from Lennart:
872 StgFloat __encodeFloat PROTO((MP_INT *, I_));
873 void __decodeFloat PROTO((MP_INT * /*result1*/,
877 StgDouble __encodeDouble PROTO((MP_INT *, I_));
878 void __decodeDouble PROTO((MP_INT * /*result1*/,
883 Some floating-point format info, made with the \tr{enquire} program
884 (version~4.3) [comes with gcc].
886 /* this should be done by CPU architecture, insofar as possible [WDP] */
888 #if sparc_TARGET_ARCH \
889 || alpha_TARGET_ARCH \
890 || hppa1_1_TARGET_ARCH \
891 || i386_TARGET_ARCH \
892 || m68k_TARGET_ARCH \
893 || mipsel_TARGET_ARCH \
894 || mipseb_TARGET_ARCH \
895 || powerpc_TARGET_ARCH
897 /* yes, it is IEEE floating point */
898 #include "ieee-flpt.h"
900 #if alpha_dec_osf1_TARGET \
901 || i386_TARGET_ARCH \
902 || mipsel_TARGET_ARCH
904 #undef BIGENDIAN /* little-endian weirdos... */
909 #else /* unknown floating-point format */
911 ******* ERROR *********** Any ideas about floating-point format?
913 #endif /* unknown floating-point */
917 #if alpha_dec_osf1_TARGET
918 #define encodeFloatZh(r, hp, aa,sa,da, expon) encodeDoubleZh(r, hp, aa,sa,da, expon)
920 #define encodeFloatZh(r, hp, aa,sa,da, expon) \
922 /* Does not allocate memory */ \
926 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
928 r = SAFESTGCALL2(StgFloat,(void *, MP_INT *, I_), __encodeFloat,&arg,(expon)); \
932 #define encodeDoubleZh(r, hp, aa,sa,da, expon) \
934 /* Does not allocate memory */ \
938 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
940 r = SAFESTGCALL2(StgDouble,(void *, MP_INT *, I_), __encodeDouble,&arg,(expon));\
943 #if alpha_dec_osf1_TARGET
944 #define decodeFloatZh(exponr, ar,sr,dr, hp, f) decodeDoubleZh(exponr, ar,sr,dr, hp, f)
946 #define decodeFloatZh(exponr, ar,sr,dr, hp, f) \
949 StgFloat arg = (f); \
951 /* Be prepared to tell Lennart-coded __decodeFloat */ \
952 /* where mantissa.d can be put (it does not care about the rest) */ \
953 SET_DATA_HDR(hp,ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
954 mantissa.d = (hp) + DATA_HS; \
956 /* Perform the operation */ \
957 SAFESTGCALL3(void,(void *, MP_INT *, I_ *, StgFloat),__decodeFloat,&mantissa,&exponent,arg); \
959 ar = mantissa.alloc; \
960 sr = mantissa.size; \
965 #define decodeDoubleZh(exponr, ar,sr,dr, hp, f) \
968 StgDouble arg = (f); \
970 /* Be prepared to tell Lennart-coded __decodeDouble */ \
971 /* where mantissa.d can be put (it does not care about the rest) */ \
972 SET_DATA_HDR(hp,ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
973 mantissa.d = (hp) + DATA_HS; \
975 /* Perform the operation */ \
976 SAFESTGCALL3(void,(void *, MP_INT *, I_ *, StgDouble),__decodeDouble,&mantissa,&exponent,arg); \
978 ar = mantissa.alloc; \
979 sr = mantissa.size; \
984 %************************************************************************
986 \subsubsection[StgMacros-mv-floats]{Moving floats and doubles around (e.g., to/from stacks)}
988 %************************************************************************
990 With GCC, we use magic non-standard inlining; for other compilers, we
991 just use functions (see also \tr{runtime/prims/PrimArith.lc}).
993 (The @OMIT_...@ is only used in compiling some of the RTS, none of
994 which uses these anyway.)
997 #if alpha_TARGET_ARCH \
998 || i386_TARGET_ARCH \
1001 #define ASSIGN_FLT(dst, src) *(StgFloat *)(dst) = (src);
1002 #define PK_FLT(src) (*(StgFloat *)(src))
1004 #define ASSIGN_DBL(dst, src) *(StgDouble *)(dst) = (src);
1005 #define PK_DBL(src) (*(StgDouble *)(src))
1007 #else /* not m68k || alpha || i[34]86 */
1009 /* Special handling for machines with troublesome alignment constraints */
1011 #define FLOAT_ALIGNMENT_TROUBLES TRUE
1013 #if ! defined(__GNUC__) || ! defined(__STG_GCC_REGS__)
1015 extern void ASSIGN_DBL PROTO((W_ [], StgDouble));
1016 extern StgDouble PK_DBL PROTO((W_ []));
1017 extern void ASSIGN_FLT PROTO((W_ [], StgFloat));
1018 extern StgFloat PK_FLT PROTO((W_ []));
1020 #else /* yes, its __GNUC__ && we really want them */
1022 #if sparc_TARGET_ARCH
1024 #define ASSIGN_FLT(dst, src) *(StgFloat *)(dst) = (src);
1025 #define PK_FLT(src) (*(StgFloat *)(src))
1027 #define ASSIGN_DBL(dst,src) \
1028 __asm__("st %2,%0\n\tst %R2,%1" : "=m" (((P_)(dst))[0]), \
1029 "=m" (((P_)(dst))[1]) : "f" (src));
1031 #define PK_DBL(src) \
1032 ( { register double d; \
1033 __asm__("ld %1,%0\n\tld %2,%R0" : "=f" (d) : \
1034 "m" (((P_)(src))[0]), "m" (((P_)(src))[1])); d; \
1041 ASSIGN_DBL(W_ p_dest[], StgDouble src)
1045 p_dest[0] = y.du.dhi;
1046 p_dest[1] = y.du.dlo;
1049 /* GCC also works with this version, but it generates
1050 the same code as the previous one, and is not ANSI
1052 #define ASSIGN_DBL( p_dest, src ) \
1053 *p_dest = ((double_thing) src).du.dhi; \
1054 *(p_dest+1) = ((double_thing) src).du.dlo \
1062 y.du.dhi = p_src[0];
1063 y.du.dlo = p_src[1];
1069 ASSIGN_FLT(W_ p_dest[], StgFloat src)
1085 #endif /* ! sparc */
1087 #endif /* __GNUC__ */
1089 #endif /* not __m68k__ */
1092 %************************************************************************
1094 \subsubsection[StgMacros-array-primops]{Primitive arrays}
1096 %************************************************************************
1098 We regularly use this macro to fish the ``contents'' part
1099 out of a DATA or TUPLE closure, which is what is used for
1100 non-ptr and ptr arrays (respectively).
1102 BYTE_ARR_CTS returns a @C_ *@!
1104 We {\em ASSUME} we can use the same macro for both!!
1108 #define BYTE_ARR_CTS(a) \
1109 ({ ASSERT(INFO_PTR(a) == (W_) ArrayOfData_info); \
1110 ((C_ *) (((StgPtr) (a))+DATA_HS)); })
1111 #define PTRS_ARR_CTS(a) \
1112 ({ ASSERT((INFO_PTR(a) == (W_) ArrayOfPtrs_info) \
1113 || (INFO_PTR(a) == (W_) ImMutArrayOfPtrs_info));\
1114 ((a)+MUTUPLE_HS);} )
1116 #define BYTE_ARR_CTS(a) ((char *) (((StgPtr) (a))+DATA_HS))
1117 #define PTRS_ARR_CTS(a) ((a)+MUTUPLE_HS)
1121 extern I_ genSymZh(STG_NO_ARGS);
1122 extern I_ resetGenSymZh(STG_NO_ARGS);
1123 extern I_ incSeqWorldZh(STG_NO_ARGS);
1125 /* sigh again: without these some (notably "float") willnae work */
1126 extern I_ long2bytes__ PROTO((long, unsigned char *));
1127 extern I_ int2bytes__ PROTO((int, unsigned char *));
1128 extern I_ short2bytes__ PROTO((short, unsigned char *));
1129 extern I_ float2bytes__ PROTO((float, unsigned char *));
1130 extern I_ double2bytes__ PROTO((double, unsigned char *));
1132 /* these may not be necessary; and they create warnings (WDP) */
1133 extern I_ bytes2long__ PROTO((P_, I_ *));
1134 extern I_ bytes2int__ PROTO((P_, I_ *));
1135 extern I_ bytes2short__ PROTO((P_, I_ *));
1136 extern I_ bytes2float__ PROTO((P_, StgFloat *));
1137 extern I_ bytes2double__ PROTO((P_, StgDouble *));
1139 extern I_ byteArrayHasNUL__ PROTO((const char *, I_));
1142 OK, the easy ops first: (all except \tr{newArr*}:
1144 (OLD:) VERY IMPORTANT! The read/write/index primitive ops
1145 on @ByteArray#@s index the array using a {\em BYTE} offset, even
1146 if the thing begin gotten out is a multi-byte @Int#@, @Float#@ etc.
1147 This is because you might be trying to take apart a C struct, where
1148 the offset from the start of the struct isn't a multiple of the
1149 size of the thing you're getting. Hence the @(char *)@ casts.
1151 EVEN MORE IMPORTANT! The above is a lie. The offsets for BlahArrays
1152 are in Blahs. WDP 95/08
1154 In the case of messing with @StgAddrs@ (@A_@), which are really \tr{void *},
1155 we cast to @P_@, because you can't index off an uncast \tr{void *}.
1157 In the case of @Array#@ (which contain pointers), the offset is in units
1158 of one ptr (not bytes).
1161 #define sameMutableArrayZh(r,a,b) r=(I_)((a)==(b))
1162 #define sameMutableByteArrayZh(r,a,b) r=(I_)((B_)(a)==(B_)(b))
1164 #define readArrayZh(r,a,i) r=((PP_) PTRS_ARR_CTS(a))[(i)]
1166 #define readCharArrayZh(r,a,i) indexCharOffAddrZh(r,BYTE_ARR_CTS(a),i)
1167 #define readIntArrayZh(r,a,i) indexIntOffAddrZh(r,BYTE_ARR_CTS(a),i)
1168 #define readAddrArrayZh(r,a,i) indexAddrOffAddrZh(r,BYTE_ARR_CTS(a),i)
1169 #define readFloatArrayZh(r,a,i) indexFloatOffAddrZh(r,BYTE_ARR_CTS(a),i)
1170 #define readDoubleArrayZh(r,a,i) indexDoubleOffAddrZh(r,BYTE_ARR_CTS(a),i)
1172 /* result ("r") arg ignored in write macros! */
1173 #define writeArrayZh(a,i,v) ((PP_) PTRS_ARR_CTS(a))[(i)]=(v)
1175 #define writeCharArrayZh(a,i,v) ((C_ *)(BYTE_ARR_CTS(a)))[i] = (v)
1176 #define writeIntArrayZh(a,i,v) ((I_ *)(BYTE_ARR_CTS(a)))[i] = (v)
1177 #define writeAddrArrayZh(a,i,v) ((PP_)(BYTE_ARR_CTS(a)))[i] = (v)
1178 #define writeFloatArrayZh(a,i,v) \
1179 ASSIGN_FLT((P_) (((StgFloat *)(BYTE_ARR_CTS(a))) + i),v)
1180 #define writeDoubleArrayZh(a,i,v) \
1181 ASSIGN_DBL((P_) (((StgDouble *)(BYTE_ARR_CTS(a))) + i),v)
1183 #define indexArrayZh(r,a,i) r=((PP_) PTRS_ARR_CTS(a))[(i)]
1185 #define indexCharArrayZh(r,a,i) indexCharOffAddrZh(r,BYTE_ARR_CTS(a),i)
1186 #define indexIntArrayZh(r,a,i) indexIntOffAddrZh(r,BYTE_ARR_CTS(a),i)
1187 #define indexAddrArrayZh(r,a,i) indexAddrOffAddrZh(r,BYTE_ARR_CTS(a),i)
1188 #define indexFloatArrayZh(r,a,i) indexFloatOffAddrZh(r,BYTE_ARR_CTS(a),i)
1189 #define indexDoubleArrayZh(r,a,i) indexDoubleOffAddrZh(r,BYTE_ARR_CTS(a),i)
1191 #define indexCharOffAddrZh(r,a,i) r= ((C_ *)(a))[i]
1192 #define indexIntOffAddrZh(r,a,i) r= ((I_ *)(a))[i]
1193 #define indexAddrOffAddrZh(r,a,i) r= ((PP_)(a))[i]
1194 #define indexFloatOffAddrZh(r,a,i) r= PK_FLT((P_) (((StgFloat *)(a)) + i))
1195 #define indexDoubleOffAddrZh(r,a,i) r= PK_DBL((P_) (((StgDouble *)(a)) + i))
1197 /* Freezing arrays-of-ptrs requires changing an info table, for the
1198 benefit of the generational collector. It needs to scavenge mutable
1199 objects, even if they are in old space. When they become immutable,
1200 they can be removed from this scavenge list. */
1201 #define unsafeFreezeArrayZh(r,a) \
1205 FREEZE_MUT_HDR(result,ImMutArrayOfPtrs_info); \
1209 #define unsafeFreezeByteArrayZh(r,a) r=(B_)(a)
1212 Now the \tr{newArr*} ops:
1216 --------------------
1217 Will: ToDo: we need to find suitable places to put this comment, and the
1218 "in-general" one which follows.
1220 ************ Nota Bene. The "n" in this macro is guaranteed to
1221 be a register, *not* (say) Node[1]. That means that it is guaranteed
1222 to survive GC, provided only that the register is kept unaltered.
1223 This is important, because "n" is used after the HEAP_CHK.
1225 In general, *all* parameters to these primitive-op macros are always
1226 registers. (Will: For exactly *which* primitive-op macros is this guaranteed?
1227 Exactly those which can trigger GC?)
1228 ------------------------
1230 NOTE: the above may now be OLD (WDP 94/02/10)
1234 For char arrays, the size is in {\em BYTES}.
1237 #define newCharArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(C_))
1238 #define newIntArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(I_))
1239 #define newAddrArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(P_))
1240 #define newFloatArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(StgFloat))
1241 #define newDoubleArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(StgDouble))
1243 #define newByteArray(r,liveness,n) \
1248 HEAP_CHK(liveness,DATA_HS+BYTES_TO_STGWORDS(n),0); \
1249 size = BYTES_TO_STGWORDS(n); \
1250 ALLOC_PRIM(DATA_HS,size,0,DATA_HS+size) /* ticky ticky */; \
1251 CC_ALLOC(CCC,DATA_HS+size,ARR_K); \
1253 result = Hp-(DATA_HS+size)+1; \
1254 SET_DATA_HDR(result,ArrayOfData_info,CCC,DATA_VHS+size,0); \
1259 Arrays of pointers need to be initialised; uses \tr{TUPLES}!
1260 The initialisation value is guaranteed to be in a register,
1261 and will be indicated by the liveness mask, so it's ok to do
1262 a \tr{HEAP_CHK}, which may trigger GC.
1265 /* The new array initialization routine for the NCG */
1266 void newArrZh_init PROTO((P_ result, I_ n, P_ init));
1268 #define newArrayZh(r,liveness,n,init) \
1273 HEAP_CHK(liveness, MUTUPLE_HS+(n),0); \
1274 ALLOC_PRIM(MUTUPLE_HS,(n),0,MUTUPLE_HS+(n)) /* ticky ticky */; \
1275 CC_ALLOC(CCC,MUTUPLE_HS+(n),ARR_K); /* cc prof */ \
1277 result = Hp + 1 - (MUTUPLE_HS+(n)); \
1278 SET_MUTUPLE_HDR(result,ArrayOfPtrs_info,CCC,MUTUPLE_VHS+(n),0) \
1279 for (p = result+MUTUPLE_HS; p < (result+MUTUPLE_HS+(n)); p++) { \
1287 %************************************************************************
1289 \subsubsection[StgMacros-SynchVar-primops]{Synchronizing Variables PrimOps}
1291 %************************************************************************
1296 #define newSynchVarZh(r, hp) \
1298 ALLOC_PRIM(MUTUPLE_HS,3,0,MUTUPLE_HS+3) /* ticky ticky */; \
1299 CC_ALLOC(CCC,MUTUPLE_HS+3,ARR_K); /* cc prof */ \
1300 SET_SVAR_HDR(hp,EmptySVar_info,CCC); \
1301 SVAR_HEAD(hp) = SVAR_TAIL(hp) = SVAR_VALUE(hp) = Nil_closure; \
1309 extern void Yield PROTO((W_));
1311 #define takeMVarZh(r, liveness, node) \
1313 while (INFO_PTR(node) != (W_) FullSVar_info) { \
1314 if (SVAR_HEAD(node) == Nil_closure) \
1315 SVAR_HEAD(node) = CurrentTSO; \
1317 TSO_LINK(SVAR_TAIL(node)) = CurrentTSO; \
1318 TSO_LINK(CurrentTSO) = (P_) Nil_closure; \
1319 SVAR_TAIL(node) = CurrentTSO; \
1320 DO_YIELD(liveness << 1); \
1322 SET_INFO_PTR(node, EmptySVar_info); \
1323 r = SVAR_VALUE(node); \
1324 SVAR_VALUE(node) = Nil_closure; \
1329 #define takeMVarZh(r, liveness, node) \
1331 if (INFO_PTR(node) != (W_) FullSVar_info) { \
1332 /* Don't wrap the calls; we're done with STG land */\
1334 fprintf(stderr, "takeMVar#: MVar is empty.\n"); \
1335 EXIT(EXIT_FAILURE); \
1337 SET_INFO_PTR(node, EmptySVar_info); \
1338 r = SVAR_VALUE(node); \
1339 SVAR_VALUE(node) = Nil_closure; \
1350 /* Only difference to the !GRAN def: RunnableThreadsHd has been replaced by */
1351 /* ThreadQueueHd i.e. the tso is added at the end of the thread queue on */
1352 /* the CurrentProc. This means we have an implicit context switch after */
1353 /* putMVar even if unfair scheduling is used in GranSim (default)! -- HWL */
1355 #define putMVarZh(node, value) \
1358 if (INFO_PTR(node) == (W_) FullSVar_info) { \
1359 /* Don't wrap the calls; we're done with STG land */\
1361 fprintf(stderr, "putMVar#: MVar already full.\n"); \
1362 EXIT(EXIT_FAILURE); \
1364 SET_INFO_PTR(node, FullSVar_info); \
1365 SVAR_VALUE(node) = value; \
1366 tso = SVAR_HEAD(node); \
1367 if (tso != (P_) Nil_closure) { \
1369 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1370 if (ThreadQueueHd == Nil_closure) \
1371 ThreadQueueHd = tso; \
1373 TSO_LINK(ThreadQueueTl) = tso; \
1374 ThreadQueueTl = tso; \
1375 SVAR_HEAD(node) = TSO_LINK(tso); \
1376 TSO_LINK(tso) = (P_) Nil_closure; \
1377 if(SVAR_HEAD(node) == (P_) Nil_closure) \
1378 SVAR_TAIL(node) = (P_) Nil_closure; \
1384 #define putMVarZh(node, value) \
1387 if (INFO_PTR(node) == (W_) FullSVar_info) { \
1388 /* Don't wrap the calls; we're done with STG land */\
1390 fprintf(stderr, "putMVar#: MVar already full.\n"); \
1391 EXIT(EXIT_FAILURE); \
1393 SET_INFO_PTR(node, FullSVar_info); \
1394 SVAR_VALUE(node) = value; \
1395 tso = SVAR_HEAD(node); \
1396 if (tso != (P_) Nil_closure) { \
1398 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1399 if (RunnableThreadsHd == Nil_closure) \
1400 RunnableThreadsHd = tso; \
1402 TSO_LINK(RunnableThreadsTl) = tso; \
1403 RunnableThreadsTl = tso; \
1404 SVAR_HEAD(node) = TSO_LINK(tso); \
1405 TSO_LINK(tso) = (P_) Nil_closure; \
1406 if(SVAR_HEAD(node) == (P_) Nil_closure) \
1407 SVAR_TAIL(node) = (P_) Nil_closure; \
1415 #define putMVarZh(node, value) \
1418 if (INFO_PTR(node) == (W_) FullSVar_info) { \
1419 /* Don't wrap the calls; we're done with STG land */\
1421 fprintf(stderr, "putMVar#: MVar already full.\n"); \
1422 EXIT(EXIT_FAILURE); \
1424 SET_INFO_PTR(node, FullSVar_info); \
1425 SVAR_VALUE(node) = value; \
1434 #define readIVarZh(r, liveness, node) \
1436 if (INFO_PTR(node) != (W_) ImMutArrayOfPtrs_info) { \
1437 if (SVAR_HEAD(node) == Nil_closure) \
1438 SVAR_HEAD(node) = CurrentTSO; \
1440 TSO_LINK(SVAR_TAIL(node)) = CurrentTSO; \
1441 TSO_LINK(CurrentTSO) = (P_) Nil_closure; \
1442 SVAR_TAIL(node) = CurrentTSO; \
1443 DO_YIELD(liveness << 1); \
1445 r = SVAR_VALUE(node); \
1450 #define readIVarZh(r, liveness, node) \
1452 if (INFO_PTR(node) != (W_) ImMutArrayOfPtrs_info) { \
1453 /* Don't wrap the calls; we're done with STG land */\
1455 fprintf(stderr, "readIVar#: IVar is empty.\n"); \
1456 EXIT(EXIT_FAILURE); \
1458 r = SVAR_VALUE(node); \
1469 /* Only difference to the !GRAN def: RunnableThreadsHd has been replaced by */
1470 /* ThreadQueueHd i.e. the tso is added at the end of the thread queue on */
1471 /* the CurrentProc. This means we have an implicit context switch after */
1472 /* writeIVar even if unfair scheduling is used in GranSim (default)! -- HWL */
1474 #define writeIVarZh(node, value) \
1477 if (INFO_PTR(node) == (W_) ImMutArrayOfPtrs_info) { \
1478 /* Don't wrap the calls; we're done with STG land */\
1480 fprintf(stderr, "writeIVar#: IVar already full.\n");\
1481 EXIT(EXIT_FAILURE); \
1483 tso = SVAR_HEAD(node); \
1484 if (tso != (P_) Nil_closure) { \
1485 if (ThreadQueueHd == Nil_closure) \
1486 ThreadQueueHd = tso; \
1488 TSO_LINK(ThreadQueueTl) = tso; \
1489 while(TSO_LINK(tso) != Nil_closure) { \
1491 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1492 tso = TSO_LINK(tso); \
1495 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1496 ThreadQueueTl = tso; \
1498 /* Don't use freeze, since it's conditional on GC */ \
1499 SET_INFO_PTR(node, ImMutArrayOfPtrs_info); \
1500 MUTUPLE_CLOSURE_SIZE(node) = (MUTUPLE_VHS+1); \
1501 SVAR_VALUE(node) = value; \
1506 #define writeIVarZh(node, value) \
1509 if (INFO_PTR(node) == (W_) ImMutArrayOfPtrs_info) { \
1510 /* Don't wrap the calls; we're done with STG land */\
1512 fprintf(stderr, "writeIVar#: IVar already full.\n");\
1513 EXIT(EXIT_FAILURE); \
1515 tso = SVAR_HEAD(node); \
1516 if (tso != (P_) Nil_closure) { \
1517 if (RunnableThreadsHd == Nil_closure) \
1518 RunnableThreadsHd = tso; \
1520 TSO_LINK(RunnableThreadsTl) = tso; \
1521 while(TSO_LINK(tso) != Nil_closure) { \
1523 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1524 tso = TSO_LINK(tso); \
1527 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1528 RunnableThreadsTl = tso; \
1530 /* Don't use freeze, since it's conditional on GC */ \
1531 SET_INFO_PTR(node, ImMutArrayOfPtrs_info); \
1532 MUTUPLE_CLOSURE_SIZE(node) = (MUTUPLE_VHS+1); \
1533 SVAR_VALUE(node) = value; \
1540 #define writeIVarZh(node, value) \
1543 if (INFO_PTR(node) == (W_) ImMutArrayOfPtrs_info) { \
1544 /* Don't wrap the calls; we're done with STG land */\
1546 fprintf(stderr, "writeIVar#: IVar already full.\n");\
1547 EXIT(EXIT_FAILURE); \
1549 /* Don't use freeze, since it's conditional on GC */ \
1550 SET_INFO_PTR(node, ImMutArrayOfPtrs_info); \
1551 MUTUPLE_CLOSURE_SIZE(node) = (MUTUPLE_VHS+1); \
1552 SVAR_VALUE(node) = value; \
1558 %************************************************************************
1560 \subsubsection[StgMacros-Wait-primops]{Delay/Wait PrimOps}
1562 %************************************************************************
1567 /* ToDo: for GRAN */
1569 #define delayZh(liveness, us) \
1571 if (WaitingThreadsTl == Nil_closure) \
1572 WaitingThreadsHd = CurrentTSO; \
1574 TSO_LINK(WaitingThreadsTl) = CurrentTSO; \
1575 WaitingThreadsTl = CurrentTSO; \
1576 TSO_LINK(CurrentTSO) = Nil_closure; \
1577 TSO_EVENT(CurrentTSO) = (W_) ((us) < 1 ? 1 : (us)); \
1578 DO_YIELD(liveness << 1); \
1583 #define delayZh(liveness, us) \
1586 fprintf(stderr, "delay#: unthreaded build.\n"); \
1587 EXIT(EXIT_FAILURE); \
1594 /* ToDo: something for GRAN */
1596 #define waitZh(liveness, fd) \
1598 if (WaitingThreadsTl == Nil_closure) \
1599 WaitingThreadsHd = CurrentTSO; \
1601 TSO_LINK(WaitingThreadsTl) = CurrentTSO; \
1602 WaitingThreadsTl = CurrentTSO; \
1603 TSO_LINK(CurrentTSO) = Nil_closure; \
1604 TSO_EVENT(CurrentTSO) = (W_) (-(fd)); \
1605 DO_YIELD(liveness << 1); \
1610 #define waitZh(liveness, fd) \
1613 fprintf(stderr, "wait#: unthreaded build.\n"); \
1614 EXIT(EXIT_FAILURE); \
1621 %************************************************************************
1623 \subsubsection[StgMacros-IO-primops]{Primitive I/O, error-handling primops}
1625 %************************************************************************
1628 extern P_ TopClosure;
1629 EXTFUN(ErrorIO_innards);
1630 EXTFUN(__std_entry_error__);
1632 #define errorIOZh(a) \
1633 do { TopClosure=(a); \
1634 (void) SAFESTGCALL1(I_,(void *, FILE *),fflush,stdout); \
1635 (void) SAFESTGCALL1(I_,(void *, FILE *),fflush,stderr); \
1636 JMP_(ErrorIO_innards); \
1639 #if !defined(CALLER_SAVES_SYSTEM)
1640 /* can use the macros */
1641 #define stg_getc(stream) getc((FILE *) (stream))
1642 #define stg_putc(c,stream) putc((c),((FILE *) (stream)))
1644 /* must not use the macros (they contain embedded calls to _filbuf/whatnot) */
1645 #define stg_getc(stream) SAFESTGCALL1(I_,(void *, FILE *),fgetc,(FILE *) (stream))
1646 #define stg_putc(c,stream) SAFESTGCALL2(I_,(void *, char, FILE *),fputc,(c),((FILE *) (stream)))
1649 int initialize_virtual_timer(int us);
1650 int install_segv_handler(STG_NO_ARGS);
1651 int install_vtalrm_handler(STG_NO_ARGS);
1652 void initUserSignals(STG_NO_ARGS);
1653 void blockUserSignals(STG_NO_ARGS);
1654 void unblockUserSignals(STG_NO_ARGS);
1655 IF_RTS(void blockVtAlrmSignal(STG_NO_ARGS);)
1656 IF_RTS(void unblockVtAlrmSignal(STG_NO_ARGS);)
1657 IF_RTS(void AwaitEvent(I_ delta);)
1659 #ifdef _POSIX_SOURCE
1660 extern I_ sig_install PROTO((I_, I_, sigset_t *));
1661 #define stg_sig_ignore(s,m) SAFESTGCALL3(I_,(void *, I_, I_),sig_install,s,STG_SIG_IGN,(sigset_t *)m)
1662 #define stg_sig_default(s,m) SAFESTGCALL3(I_,(void *, I_, I_),sig_install,s,STG_SIG_DFL,(sigset_t *)m)
1663 #define stg_sig_catch(s,sp,m) SAFESTGCALL3(I_,(void *, I_, I_),sig_install,s,sp,(sigset_t *)m)
1665 extern I_ sig_install PROTO((I_, I_));
1666 #define stg_sig_ignore(s,m) SAFESTGCALL2(I_,(void *, I_, I_),sig_install,s,STG_SIG_IGN)
1667 #define stg_sig_default(s,m) SAFESTGCALL2(I_,(void *, I_, I_),sig_install,s,STG_SIG_DFL)
1668 #define stg_sig_catch(s,sp,m) SAFESTGCALL2(I_,(void *, I_, I_),sig_install,s,sp)
1671 #define STG_SIG_DFL (-1)
1672 #define STG_SIG_IGN (-2)
1673 #define STG_SIG_ERR (-3)
1675 StgInt getErrorHandler(STG_NO_ARGS);
1677 void raiseError PROTO((StgStablePtr handler));
1678 StgInt catchError PROTO((StgStablePtr newErrorHandler));
1680 void decrementErrorCount(STG_NO_ARGS);
1682 #define stg_catchError(sp) SAFESTGCALL1(I_,(void *, StgStablePtr),catchError,sp)
1683 #define stg_decrementErrorCount() SAFESTGCALL0(void,(void *),decrementErrorCount)
1686 %************************************************************************
1688 \subsubsection[StgMacros-stable-ptr]{Primitive ops for manipulating stable pointers}
1690 %************************************************************************
1693 The type of these should be:
1696 makeStablePointer# :: a -> State# _RealWorld -> StateAndStablePtr# _RealWorld a
1697 deRefStablePointer# :: StablePtr# a -> State# _RealWorld -> StateAndPtr _RealWorld a
1700 Since world-tokens are no longer explicitly passed around, the
1701 implementations have a few less arguments/results.
1703 The simpler one is @deRefStablePointer#@ (which is only a primop
1704 because it is more polymorphic than is allowed of a ccall).
1709 #define deRefStablePtrZh(ri,sp) \
1712 fprintf(stderr, "deRefStablePtr#: no stable pointer support.\n");\
1713 EXIT(EXIT_FAILURE); \
1718 extern StgPtr _deRefStablePointer PROTO((StgInt, StgPtr));
1720 #define deRefStablePtrZh(ri,sp) \
1721 ri = SAFESTGCALL2(I_,(void *, I_, P_),_deRefStablePointer,sp,StorageMgrInfo.StablePointerTable);
1725 Declarations for other stable pointer operations.
1728 void freeStablePointer PROTO((I_ stablePtr));
1730 void enterStablePtr PROTO((StgStablePtr, StgFunPtr));
1731 void performIO PROTO((StgStablePtr));
1732 I_ enterInt PROTO((StgStablePtr));
1733 I_ enterFloat PROTO((StgStablePtr));
1734 P_ deRefStablePointer PROTO((StgStablePtr));
1735 IF_RTS(I_ catchSoftHeapOverflow PROTO((StgStablePtr, I_));)
1736 IF_RTS(I_ getSoftHeapOverflowHandler(STG_NO_ARGS);)
1737 IF_RTS(extern StgStablePtr softHeapOverflowHandler;)
1738 IF_RTS(void shutdownHaskell(STG_NO_ARGS);)
1740 EXTFUN(stopPerformIODirectReturn);
1741 EXTFUN(startPerformIO);
1742 EXTFUN(stopEnterIntDirectReturn);
1743 EXTFUN(startEnterInt);
1744 EXTFUN(stopEnterFloatDirectReturn);
1745 EXTFUN(startEnterFloat);
1747 void enterStablePtr PROTO((StgStablePtr stableIndex, StgFunPtr startCode));
1751 IF_RTS(extern I_ ErrorIO_call_count;)
1754 Somewhat harder is @makeStablePointer#@ --- it is usually simple but
1755 if we're unlucky, it will have to allocate a new table and copy the
1756 old bit over. Since we might, very occasionally, have to call the
1757 garbage collector, this has to be a macro... sigh!
1759 NB @newSP@ is required because it is entirely possible that
1760 @stablePtr@ and @unstablePtr@ are aliases and so we can't do the
1761 assignment to @stablePtr@ until we've finished with @unstablePtr@.
1763 Another obscure piece of coding is the recalculation of the size of
1764 the table. We do this just in case Jim's threads decide they want to
1765 context switch---in which case any stack-allocated variables may get
1766 trashed. (If only there was a special heap check which didn't
1767 consider context switching...)
1772 /* Calculate SP Table size from number of pointers */
1773 #define SPTSizeFromNoPtrs( newNP ) (DYN_VHS + 1 + 2 * (newNP))
1775 /* Calculate number of pointers in new table from number in old table:
1776 any strictly increasing expression will do here */
1777 #define CalcNewNoSPtrs( i ) ((i)*2 + 100)
1779 extern void enlargeSPTable PROTO((P_, P_));
1781 #define makeStablePtrZh(stablePtr,liveness,unstablePtr) \
1783 EXTDATA_RO(StablePointerTable_info); \
1784 EXTDATA(UnusedSP); \
1785 StgStablePtr newSP; \
1787 if (SPT_EMPTY(StorageMgrInfo.StablePointerTable)) { /* free stack is empty */ \
1788 { /* Variables used before the heap check */ \
1789 I_ OldNoPtrs = SPT_NoPTRS( StorageMgrInfo.StablePointerTable ); \
1790 I_ NewNoPtrs = CalcNewNoSPtrs( OldNoPtrs ); \
1791 I_ NewSize = SPTSizeFromNoPtrs( NewNoPtrs ); \
1792 HEAP_CHK(liveness, _FHS+NewSize, 0); \
1794 { /* Variables used after the heap check - same values */ \
1795 I_ OldNoPtrs = SPT_NoPTRS( StorageMgrInfo.StablePointerTable ); \
1796 I_ NewNoPtrs = CalcNewNoSPtrs( OldNoPtrs ); \
1797 I_ NewSize = SPTSizeFromNoPtrs( NewNoPtrs ); \
1798 P_ SPTable = Hp + 1 - (_FHS + NewSize); \
1800 CC_ALLOC(CCC, _FHS+NewSize, SPT_K); /* cc prof */ \
1801 SET_DYN_HDR(SPTable,StablePointerTable_info,CCC,NewSize,NewNoPtrs);\
1802 SAFESTGCALL2(void, (void *, P_, P_), enlargeSPTable, SPTable, StorageMgrInfo.StablePointerTable); \
1803 StorageMgrInfo.StablePointerTable = SPTable; \
1807 newSP = SPT_POP(StorageMgrInfo.StablePointerTable); \
1808 SPT_SPTR(StorageMgrInfo.StablePointerTable, newSP) = unstablePtr; \
1809 stablePtr = newSP; \
1814 #define makeStablePtrZh(stablePtr,liveness,unstablePtr) \
1817 fprintf(stderr, "makeStablePtr#: no stable pointer support.\n");\
1818 EXIT(EXIT_FAILURE); \
1824 %************************************************************************
1826 \subsubsection[StgMacros-unsafePointerEquality]{Primitive `op' for breaking referential transparency}
1828 %************************************************************************
1830 The type of this is @reallyUnsafePtrEquality :: a -> a -> Int#@ so we
1831 can expect three parameters: the two arguments and a "register" to put
1834 Message to Will: This primop breaks referential transparency so badly
1835 you might want to leave it out. On the other hand, if you hide it
1836 away in an appropriate monad, it's perfectly safe. [ADR]
1838 Note that this primop is non-deterministic: different results can be
1839 obtained depending on just what the garbage collector (and code
1840 optimiser??) has done. However, we can guarantee that if two objects
1841 are pointer-equal, they have the same denotation --- the converse most
1842 certainly doesn't hold.
1844 ToDo ADR: The degree of non-determinism could be greatly reduced by
1845 following indirections.
1848 #define reallyUnsafePtrEqualityZh(r,a,b) r=((StgPtr)(a) == (StgPtr)(b))
1851 %************************************************************************
1853 \subsubsection[StgMacros-parallel-primop]{Primitive `op' for sparking (etc)}
1855 %************************************************************************
1857 Assuming local sparking in some form, we can now inline the spark request.
1859 We build a doubly-linked list in the heap, so that we can handle FIFO
1860 or LIFO scheduling as we please.
1862 Anything with tag >= 0 is in WHNF, so we discard it.
1871 #define parZh(r,hp,node,rest) \
1872 PARZh(r,hp,node,rest,0,0)
1874 #define parAtZh(r,hp,node,where,identifier,rest) \
1875 parATZh(r,hp,node,where,identifier,rest,1)
1877 #define parAtForNowZh(r,hp,node,where,identifier,rest) \
1878 parATZh(r,hp,node,where,identifier,rest,0)
1880 #define parATZh(r,hp,node,where,identifier,rest,local) \
1883 if (SHOULD_SPARK(node)) { \
1884 result = NewSpark((P_)node,identifier,local); \
1885 SAFESTGCALL3(void,(W_),GranSimSparkAt,result,where,identifier); \
1886 } else if (do_qp_prof) { \
1887 I_ tid = threadId++; \
1888 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
1893 #define parLocalZh(r,hp,node,identifier,rest) \
1894 PARZh(r,hp,node,rest,identifier,1)
1896 #define parGlobalZh(r,hp,node,identifier,rest) \
1897 PARZh(r,hp,node,rest,identifier,0)
1899 #define PARZh(r,hp,node,rest,identifier,local) \
1902 if (SHOULD_SPARK(node)) { \
1903 result = NewSpark((P_)node,identifier,local); \
1904 ADD_TO_SPARK_QUEUE(result); \
1905 SAFESTGCALL2(void,(W_),GranSimSpark,local,(P_)node); \
1906 /* context_switch = 1; not needed any more -- HWL */ \
1907 } else if (do_qp_prof) { \
1908 I_ tid = threadId++; \
1909 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
1916 extern I_ required_thread_count;
1919 #define COUNT_SPARK TSO_GLOBALSPARKS(CurrentTSO)++
1925 Note that we must bump the required thread count NOW, rather
1926 than when the thread is actually created.
1929 #define forkZh(r,liveness,node) \
1931 while (PendingSparksTl[REQUIRED_POOL] == PendingSparksLim[REQUIRED_POOL]) \
1932 DO_YIELD((liveness << 1) | 1); \
1934 if (SHOULD_SPARK(node)) { \
1935 *PendingSparksTl[REQUIRED_POOL]++ = (P_)(node); \
1936 } else if (DO_QP_PROF) { \
1937 I_ tid = threadId++; \
1938 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
1940 required_thread_count++; \
1941 context_switch = 1; \
1942 r = 1; /* Should not be necessary */ \
1945 #define parZh(r,node) \
1948 if (SHOULD_SPARK(node) && \
1949 PendingSparksTl[ADVISORY_POOL] < PendingSparksLim[ADVISORY_POOL]) { \
1950 *PendingSparksTl[ADVISORY_POOL]++ = (P_)(node); \
1954 I_ tid = threadId++; \
1955 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
1958 r = 1; /* Should not be necessary */ \
1963 The following seq# code should only be used in unoptimized code.
1964 Be warned: it's a potential bug-farm.
1966 First we push two words on the B stack: the current value of RetReg
1967 (which may or may not be live), and a continuation snatched largely out
1968 of thin air (it's a point within this code block). Then we set RetReg
1969 to the special polymorphic return code for seq, load up Node with the
1970 closure to be evaluated, and we're off. When the eval returns to the
1971 polymorphic seq return point, the two words are popped off the B stack,
1972 RetReg is restored, and we jump to the continuation, completing the
1973 primop and going on our merry way.
1979 #define seqZh(r,liveness,node) \
1982 STK_CHK(liveness,0,2,0,0,0,0); \
1984 SpB[BREL(0)] = (W_) RetReg; \
1985 SpB[BREL(1)] = (W_) &&cont; \
1986 RetReg = (StgRetAddr) vtbl_seq; \
1989 InfoPtr = (D_)(INFO_PTR(Node)); \
1990 JMP_(ENTRY_CODE(InfoPtr)); \
1992 r = 1; /* Should be unnecessary */ \
1996 #endif /* CONCURRENT */
1999 %************************************************************************
2001 \subsubsection[StgMacros-malloc-ptrs]{Malloc Pointers}
2003 %************************************************************************
2005 This macro is used to construct a MallocPtr on the heap after a ccall.
2006 Since MallocPtr's are like arrays in many ways, this is heavily based
2007 on the stuff for arrays above.
2009 What this does is plug the pointer (which will be in a local
2010 variable), into a fresh heap object and then sets a result (which will
2011 be a register) to point to the fresh heap object.
2013 Question: what's this "SET_ACTIVITY" stuff - should I be doing this
2014 too? (It's if you want to use the SPAT profiling tools to
2015 characterize program behavior by ``activity'' -- tail-calling,
2016 heap-checking, etc. -- see Ticky.lh. It is quite specialized.
2022 StgInt eqMallocPtr PROTO((StgMallocPtr p1, StgMallocPtr p2));
2023 void FreeMallocPtr PROTO((StgMallocPtr mp));
2025 #define constructMallocPtr(liveness, r, mptr) \
2029 HEAP_CHK(liveness, _FHS + MallocPtr_SIZE,0); \
2030 CC_ALLOC(CCC,_FHS + MallocPtr_SIZE,MallocPtr_K); /* cc prof */ \
2032 result = Hp + 1 - (_FHS + MallocPtr_SIZE); \
2033 SET_MallocPtr_HDR(result,MallocPtr_info,CCC,_FHS + MallocPtr_SIZE,0); \
2034 MallocPtr_CLOSURE_DATA(result) = mptr; \
2035 MallocPtr_CLOSURE_LINK(result) = StorageMgrInfo.MallocPtrList; \
2036 StorageMgrInfo.MallocPtrList = result; \
2039 printf("DEBUG: MallocPtr(0x%x) = <0x%x, 0x%x, 0x%x, 0x%x>\n", \
2041 result[0],result[1], \
2042 result[2],result[3]); \
2044 CHECK_MallocPtr_CLOSURE( result ); \
2045 VALIDATE_MallocPtrList( StorageMgrInfo.MallocPtrList ); \
2047 (r) = (P_) result; \
2051 #define constructMallocPtr(liveness, r, mptr) \
2054 fprintf(stderr, "constructMallocPtr: no malloc pointer support.\n");\
2055 EXIT(EXIT_FAILURE); \
2062 End-of-file's multi-slurp protection:
2064 #endif /* ! STGMACROS_H */