2 % (c) The GRASP/AQUA Project, Glasgow University, 1993-1994
4 \section[StgMacros]{C macros used in GHC-generated \tr{.hc} files}
11 %************************************************************************
13 \subsection[StgMacros-abbrev]{Abbreviatory(?) and general macros}
15 %************************************************************************
19 /* for function declarations */
20 #define STGFUN(f) F_ f(STG_NO_ARGS)
21 #define STATICFUN(f) static F_ f(STG_NO_ARGS)
23 /* for functions/data that are really external to this module */
24 #define EXTFUN(f) extern F_ f(STG_NO_ARGS)
25 #define EXTDATA(d) extern W_ d[]
26 #define EXTDATA_RO(d) extern const W_ d[] /* read-only */
28 /* for fwd decls to functions/data somewhere else in this module */
29 /* (identical for the mo') */
30 #define INTFUN(f) static F_ f(STG_NO_ARGS)
31 #define INTDATA(d) extern W_ d[]
32 #define INTDATA_RO(d) extern const W_ d[] /* read-only */
34 /* short forms of most of the above */
36 #define FN_(f) F_ f(STG_NO_ARGS)
37 #define IFN_(f) static F_ f(STG_NO_ARGS)
38 #define EF_(f) extern F_ f(STG_NO_ARGS)
39 #define ED_(d) extern W_ d[]
40 #define ED_RO_(d) extern const W_ d[] /* read-only */
41 #define IF_(f) static F_ f(STG_NO_ARGS)
43 /* GCC is uncooperative about the next one: */
44 /* But, the "extern" prevents initialisation... ADR */
46 #define ID_(d) extern W_ d[]
47 #define ID_RO_(d) extern const W_ d[] /* read-only */
49 #define ID_(d) static W_ d[]
50 #define ID_RO_(d) static const W_ d[] /* read-only */
54 General things; note: general-but-``machine-dependent'' macros are
55 given in \tr{StgMachDeps.lh}.
57 I_ STG_MAX PROTO((I_, I_)); /* GCC -Wall loves prototypes */
61 STG_MAX(I_ a, I_ b) { return((a >= b) ? a : b); }
62 /* NB: the naive #define macro version of STG_MAX
63 can lead to exponential CPP explosion, if you
64 have very-nested STG_MAXes.
68 Macros to combine two short words into a single
69 word and split such a word back into two.
71 Dependent on machine word size :-)
74 #define COMBINE_WORDS(word,short1,short2) \
76 ((packed_shorts *)&(word))->wu.s1 = short1; \
77 ((packed_shorts *)&(word))->wu.s2 = short2; \
80 #define SPLIT_WORD(word,short1,short2) \
82 short1 = ((packed_shorts *)&(word))->wu.s1; \
83 short2 = ((packed_shorts *)&(word))->wu.s2; \
88 %************************************************************************
90 \subsection[StgMacros-gen-stg]{General STGish macros}
92 %************************************************************************
94 Common sizes of vector-return tables.
96 Claim: don't need fwd decls for return pts in \tr{VECTBL*}, because
97 the AbsC flattener ensures that things come out sufficiently
101 #ifdef __STG_REV_TBLS__
102 #define UNVECTBL(staticp,label,a) /* nothing */
104 #define UNVECTBL(staticp,label,a) \
106 staticp const W_ label[] = { \
113 #if defined(USE_SPLIT_MARKERS)
114 #define __STG_SPLIT_MARKER(n) FN_(CAT2(__stg_split_marker,n)){ }
116 #define __STG_SPLIT_MARKER(n) /* nothing */
120 %************************************************************************
122 \subsection[StgMacros-exceptions]{Exception-checking macros}
124 %************************************************************************
126 Argument-satisfaction check, stack(s) overflow check, heap overflow
129 The @SUBTRACT(upper, lower)@ macros return a positive result in words
130 indicating the amount by which upper is above lower on the stack.
133 #define SUBTRACT_A_STK( upper, lower ) AREL( (lower) - (upper) )
134 #define SUBTRACT_B_STK( upper, lower ) BREL( (lower) - (upper) )
137 %************************************************************************
139 \subsubsection[StgMacros-arg-satis]{Argument-satisfaction checks}
141 %************************************************************************
143 @ARGS_CHK(n)@ sees of there are @n@ words of args on the A/B stack.
144 If not, it jumps to @UpdatePAP@.
146 @ARGS_CHK@ args are pre-directionified.
147 Notice that we do the comparisons in the form (x < a+n), for
148 some constant n. This generates more efficient code (with GCC at least)
152 #define ARGS_CHK_A(n) \
153 if (SuA /*SUBTRACT_A_STK( SpA, SuA )*/ < (SpA+(n))) { \
157 #define ARGS_CHK_A_LOAD_NODE(n, closure_addr) \
158 if (SuA /*SUBTRACT_A_STK( SpA, SuA )*/ < (SpA+(n))) { \
159 Node = (P_) closure_addr; \
163 #define ARGS_CHK_B(n) \
164 if (SpB /*SUBTRACT_B_STK( SpB, SuB )*/ < (SuB-(n))) { \
169 #define ARGS_CHK_B_LOAD_NODE(n, closure_addr) \
170 if (SpB /*SUBTRACT_B_STK( SpB, SuB )*/ < (SuB-(n))) { \
171 Node = (P_) closure_addr; \
176 %************************************************************************
178 \subsubsection[StgMacros-stk-chks]{Stack-overflow check}
180 %************************************************************************
182 @STK_CHK(a,b)@ [misc args omitted...] checks that we can allocate @a@
183 words of A stack and @b@ words of B stack. If not, it calls
184 @StackOverflow@ (which dies).
186 (It will be different in the parallel case.)
188 NB: args @a@ and @b@ are pre-direction-ified!
190 I_ SqueezeUpdateFrames PROTO((P_, P_, P_));
191 int sanityChk_StkO (P_ stko); /* ToDo: move to a sane place */
193 #if ! defined(CONCURRENT)
195 extern void StackOverflow(STG_NO_ARGS) STG_NORETURN;
197 #if STACK_CHECK_BY_PAGE_FAULT
199 #define STACK_OVERFLOW(liveness,hda,hdb,spa,spb,rtype,reenter) \
200 /* use memory protection instead; still need ticky-ness */
204 #define STACK_OVERFLOW(liveness,hda,hdb,spa,spb,rtype,reenter) \
205 ULTRASAFESTGCALL0(void,(void *),StackOverflow)
207 #endif /* not using page-faulting */
211 I_ StackOverflow PROTO((W_, W_));
214 * On a uniprocessor, we do *NOT* context switch on a stack overflow
215 * (though we may GC). Therefore, we never have to reenter node.
218 #define STACK_OVERFLOW(liveness,hda,hdb,spa,spb,rtype,reenter) \
219 DO_STACKOVERFLOW((hda+hdb)<<2|((rtype)<<1)|(reenter),((spa)<<20)|((spb)<<8)|(liveness))
221 #define STACK_OVERFLOW_HEADROOM(args,y) ((args) >> 2)
222 #define STACK_OVERFLOW_PRIM_RETURN(args,y) ((args) & 2)
223 #define STACK_OVERFLOW_REENTER(args,y) ((args) & 1)
225 #define STACK_OVERFLOW_AWORDS(x,args) (((args) >> 20) & 0x0fff)
226 #define STACK_OVERFLOW_BWORDS(x,args) (((args) >> 8) & 0x0fff)
227 #define STACK_OVERFLOW_LIVENESS(x,args) ((args) & 0xff)
229 #endif /* CONCURRENT */
231 #define STK_CHK(liveness_mask,a_headroom,b_headroom,spa,spb,ret_type,reenter)\
233 DO_ASTK_HWM(); /* ticky-ticky profiling */ \
235 if (STKS_OVERFLOW_OP(((a_headroom) + 1), ((b_headroom) + 1))) { \
236 STACK_OVERFLOW(liveness_mask,a_headroom,b_headroom,spa,spb,ret_type,reenter);\
241 %************************************************************************
243 \subsubsection[StgMacros-heap-chks]{Heap-overflow checks}
245 %************************************************************************
247 Please see the general discussion/commentary about ``what really
248 happens in a GC,'' in \tr{SMinterface.lh}.
251 void PerformGC PROTO((W_));
252 void RealPerformGC PROTO((W_ liveness, W_ reqsize, W_ always_reenter_node, rtsBool do_full_collection));
253 void checkInCCallGC(STG_NO_ARGS);
256 void StgPerformGarbageCollection(STG_NO_ARGS);
261 #define OR_MSG_PENDING /* never */
263 #define HEAP_OVERFLOW(liveness,n,reenter) \
265 DO_GC((((W_)n)<<8)|(liveness)); \
268 #define REQSIZE_BITMASK ((1L << ((BITS_IN(W_) - 8 + 1))) - 1)
269 #define HEAP_OVERFLOW_REQSIZE(args) (((args) >> 8) & REQSIZE_BITMASK)
270 #define HEAP_OVERFLOW_REENTER(args) 0
271 #define HEAP_OVERFLOW_LIVENESS(args) ((args) & 0xff)
273 #else /* CONCURRENT */
275 void ReallyPerformThreadGC PROTO((W_, rtsBool));
277 #define HEAP_OVERFLOW(liveness,n,reenter) \
279 DO_GC((((W_)(n))<<9)|((reenter)<<8)|(liveness)); \
282 #define REQSIZE_BITMASK ((1L << ((BITS_IN(W_) - 9 + 1))) - 1)
283 #define HEAP_OVERFLOW_REQSIZE(args) (((args) >> 9) & REQSIZE_BITMASK)
284 #define HEAP_OVERFLOW_REENTER(args) (((args) >> 8) & 0x1)
285 #define HEAP_OVERFLOW_LIVENESS(args) ((args) & 0xff)
289 #define OR_MSG_PENDING /* never */
293 extern int PacketsWaiting; /*Probes for incoming messages*/
294 extern int heapChkCounter; /*Not currently used! We check for messages when*/
295 /*a thread is resheduled PWT*/
296 /* #define OR_MSG_PENDING || (--heapChkCounter == 0 && PacketsWaiting())*/
297 #define OR_MSG_PENDING /* never */
300 #endif /* CONCURRENT */
302 #if 0 /* alpha_TARGET_ARCH */
303 #define CACHE_LINE 4 /* words */
304 #define LINES_AHEAD 3
305 #define PRE_FETCH(n) \
308 j = ((STG_VOLATILE StgInt *) Hp)[LINES_AHEAD * CACHE_LINE]; \
310 #define EXTRA_HEAP_WORDS (CACHE_LINE * LINES_AHEAD)
312 #define PRE_FETCH(reg)
313 #define EXTRA_HEAP_WORDS 0
317 #define HEAP_CHK(liveness_mask,n,reenter) \
319 /* TICKY_PARANOIA(__FILE__, __LINE__); */ \
320 /* THREAD_CONTEXT_SWITCH(liveness_mask,reenter); */ \
321 ALLOC_HEAP(n); /* ticky profiling */ \
322 GRAN_ALLOC_HEAP(n,liveness_mask); /* Granularity Simulation */ \
323 if (((Hp = Hp + (n)) > HpLim)) { \
324 /* Old: STGCALL3_GC(PerformGC,liveness_mask,n,StgFalse); */\
325 HEAP_OVERFLOW(liveness_mask,n,StgFalse); \
330 #define HEAP_CHK(liveness_mask,n,reenter) \
332 /* TICKY_PARANOIA(__FILE__, __LINE__); */ \
334 ALLOC_HEAP(n); /* ticky profiling */ \
335 if (((Hp = Hp + (n)) > HpLim) OR_INTERVAL_EXPIRED OR_CONTEXT_SWITCH OR_MSG_PENDING) { \
336 HEAP_OVERFLOW(liveness_mask,n,reenter); \
344 #define HEAP_CHK_AND_RESTORE_N(liveness_mask,n,reenter) \
346 /* TICKY_PARANOIA(__FILE__, __LINE__); */ \
348 ALLOC_HEAP(n); /* ticky profiling */ \
349 if (((Hp = Hp + (n)) > HpLim) OR_INTERVAL_EXPIRED OR_CONTEXT_SWITCH OR_MSG_PENDING) { \
350 HEAP_OVERFLOW(liveness_mask,n,reenter); \
351 n = TSO_ARG1(CurrentTSO); \
356 #define HEAP_CHK_AND_RESTORE_N(liveness_mask,n,reenter) \
357 HEAP_CHK(liveness_mask,n,reenter)
364 %************************************************************************
366 \subsection[StgMacros-prim-ops]{Primitive operations}
368 %************************************************************************
370 One thing to be {\em very careful about} with these macros that assign
371 to results is that the assignment must come {\em last}. Some of the
372 other arguments may be in terms of addressing modes that get clobbered
373 by the assignment. (Dirty imperative programming RULES!)
375 The order here is roughly that in \tr{compiler/prelude/PrimOps.lhs}.
377 %************************************************************************
379 \subsubsection[StgMacros-compare-primops]{Primitive comparison ops on basic types}
381 %************************************************************************
383 We cast the chars in case one of them is a literal (so C things work right
384 even for 8-bit chars).
386 #define gtCharZh(r,a,b) r=(I_)((a)> (b))
387 #define geCharZh(r,a,b) r=(I_)((a)>=(b))
388 #define eqCharZh(r,a,b) r=(I_)((a)==(b))
389 #define neCharZh(r,a,b) r=(I_)((a)!=(b))
390 #define ltCharZh(r,a,b) r=(I_)((a)< (b))
391 #define leCharZh(r,a,b) r=(I_)((a)<=(b))
393 /* Int comparisons: >#, >=# etc */
394 #define ZgZh(r,a,b) r=(I_)((a) >(b))
395 #define ZgZeZh(r,a,b) r=(I_)((a)>=(b))
396 #define ZeZeZh(r,a,b) r=(I_)((a)==(b))
397 #define ZdZeZh(r,a,b) r=(I_)((a)!=(b))
398 #define ZlZh(r,a,b) r=(I_)((a) <(b))
399 #define ZlZeZh(r,a,b) r=(I_)((a)<=(b))
401 #define gtWordZh(r,a,b) r=(I_)((a) >(b))
402 #define geWordZh(r,a,b) r=(I_)((a)>=(b))
403 #define eqWordZh(r,a,b) r=(I_)((a)==(b))
404 #define neWordZh(r,a,b) r=(I_)((a)!=(b))
405 #define ltWordZh(r,a,b) r=(I_)((a) <(b))
406 #define leWordZh(r,a,b) r=(I_)((a)<=(b))
408 #define gtAddrZh(r,a,b) r=(I_)((a) >(b))
409 #define geAddrZh(r,a,b) r=(I_)((a)>=(b))
410 #define eqAddrZh(r,a,b) r=(I_)((a)==(b))
411 #define neAddrZh(r,a,b) r=(I_)((a)!=(b))
412 #define ltAddrZh(r,a,b) r=(I_)((a) <(b))
413 #define leAddrZh(r,a,b) r=(I_)((a)<=(b))
415 #define gtFloatZh(r,a,b) r=(I_)((a)> (b))
416 #define geFloatZh(r,a,b) r=(I_)((a)>=(b))
417 #define eqFloatZh(r,a,b) r=(I_)((a)==(b))
418 #define neFloatZh(r,a,b) r=(I_)((a)!=(b))
419 #define ltFloatZh(r,a,b) r=(I_)((a)< (b))
420 #define leFloatZh(r,a,b) r=(I_)((a)<=(b))
422 /* Double comparisons: >##, >=#@ etc */
423 #define ZgZhZh(r,a,b) r=(I_)((a) >(b))
424 #define ZgZeZhZh(r,a,b) r=(I_)((a)>=(b))
425 #define ZeZeZhZh(r,a,b) r=(I_)((a)==(b))
426 #define ZdZeZhZh(r,a,b) r=(I_)((a)!=(b))
427 #define ZlZhZh(r,a,b) r=(I_)((a) <(b))
428 #define ZlZeZhZh(r,a,b) r=(I_)((a)<=(b))
431 %************************************************************************
433 \subsubsection[StgMacros-char-primops]{Primitive @Char#@ ops (and @LitString#@ish things, too)}
435 %************************************************************************
437 We cast the chars in case one of them is a literal (so C things work right
438 even for 8-bit chars).
440 #define ordZh(r,a) r=(I_)((W_) (a))
441 #define chrZh(r,a) r=(StgChar)((W_)(a))
444 %************************************************************************
446 \subsubsection[StgMacros-int-primops]{Primitive @Int#@ ops}
448 %************************************************************************
451 I_ stg_div PROTO((I_ a, I_ b));
453 #define ZpZh(r,a,b) r=(a)+(b)
454 #define ZmZh(r,a,b) r=(a)-(b)
455 #define ZtZh(r,a,b) r=(a)*(b)
456 #define quotIntZh(r,a,b) r=(a)/(b)
457 #define ZdZh(r,a,b) r=ULTRASAFESTGCALL2(I_,(void *, I_, I_),stg_div,(a),(b))
458 #define remIntZh(r,a,b) r=(a)%(b)
459 #define negateIntZh(r,a) r=-(a)
460 /* Ever used ? -- SOF */
461 #define absIntZh(a) r=(( (a) >= 0 ) ? (a) : (-(a)))
464 %************************************************************************
466 \subsubsection[StgMacros-word-primops]{Primitive @Word#@ ops}
468 %************************************************************************
471 #define andZh(r,a,b) r=(a)&(b)
472 #define orZh(r,a,b) r=(a)|(b)
473 #define xorZh(r,a,b) r=(a)^(b)
474 #define notZh(r,a) r=~(a)
476 #define shiftLZh(r,a,b) r=(a)<<(b)
477 #define shiftRAZh(r,a,b) r=(a)>>(b)
478 #define shiftRLZh(r,a,b) r=(a)>>(b)
479 #define iShiftLZh(r,a,b) r=(a)<<(b)
480 #define iShiftRAZh(r,a,b) r=(a)>>(b)
481 #define iShiftRLZh(r,a,b) r=(a)>>(b)
483 #define int2WordZh(r,a) r=(W_)(a)
484 #define word2IntZh(r,a) r=(I_)(a)
487 %************************************************************************
489 \subsubsection[StgMacros-addr-primops]{Primitive @Addr#@ ops}
491 %************************************************************************
494 #define int2AddrZh(r,a) r=(A_)(a)
495 #define addr2IntZh(r,a) r=(I_)(a)
498 %************************************************************************
500 \subsubsection[StgMacros-float-primops]{Primitive @Float#@ ops}
502 %************************************************************************
505 #define plusFloatZh(r,a,b) r=(a)+(b)
506 #define minusFloatZh(r,a,b) r=(a)-(b)
507 #define timesFloatZh(r,a,b) r=(a)*(b)
508 #define divideFloatZh(r,a,b) r=(a)/(b)
509 #define negateFloatZh(r,a) r=-(a)
511 #define int2FloatZh(r,a) r=(StgFloat)(a)
512 #define float2IntZh(r,a) r=(I_)(a)
514 #define expFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),exp,a)
515 #define logFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),log,a)
516 #define sqrtFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),sqrt,a)
517 #define sinFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),sin,a)
518 #define cosFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),cos,a)
519 #define tanFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),tan,a)
520 #define asinFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),asin,a)
521 #define acosFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),acos,a)
522 #define atanFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),atan,a)
523 #define sinhFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),sinh,a)
524 #define coshFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),cosh,a)
525 #define tanhFloatZh(r,a) r=(StgFloat) SAFESTGCALL1(StgDouble,(void *, StgDouble),tanh,a)
526 #define powerFloatZh(r,a,b) r=(StgFloat) SAFESTGCALL2(StgDouble,(void *, StgDouble,StgDouble),pow,a,b)
528 /* encoding/decoding given w/ Integer stuff */
531 %************************************************************************
533 \subsubsection[StgMacros-double-primops]{Primitive @Double#@ ops}
535 %************************************************************************
538 #define ZpZhZh(r,a,b) r=(a)+(b)
539 #define ZmZhZh(r,a,b) r=(a)-(b)
540 #define ZtZhZh(r,a,b) r=(a)*(b)
541 #define ZdZhZh(r,a,b) r=(a)/(b)
542 #define negateDoubleZh(r,a) r=-(a)
544 #define int2DoubleZh(r,a) r=(StgDouble)(a)
545 #define double2IntZh(r,a) r=(I_)(a)
547 #define float2DoubleZh(r,a) r=(StgDouble)(a)
548 #define double2FloatZh(r,a) r=(StgFloat)(a)
550 #define expDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),exp,a)
551 #define logDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),log,a)
552 #define sqrtDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),sqrt,a)
553 #define sinDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),sin,a)
554 #define cosDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),cos,a)
555 #define tanDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),tan,a)
556 #define asinDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),asin,a)
557 #define acosDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),acos,a)
558 #define atanDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),atan,a)
559 #define sinhDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),sinh,a)
560 #define coshDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),cosh,a)
561 #define tanhDoubleZh(r,a) r=(StgDouble) SAFESTGCALL1(StgDouble,(void *, StgDouble),tanh,a)
563 #define ZtZtZhZh(r,a,b) r=(StgDouble) SAFESTGCALL2(StgDouble,(void *, StgDouble,StgDouble),pow,a,b)
566 %************************************************************************
568 \subsubsection[StgMacros-integer-primops]{Primitive @Integer@-related ops (GMP stuff)}
570 %************************************************************************
572 Dirty macros we use for the real business.
574 INVARIANT: When one of these macros is called, the only live data is
575 tidily on the STG stacks or in the STG registers (the code generator
576 ensures this). If there are any pointer-arguments, they will be in
577 the first \tr{Ret*} registers (e.g., \tr{da} arg of \tr{gmpTake1Return1}).
579 OK, here are the real macros:
581 #define gmpTake1Return1(size_chk_macro, liveness, mpz_op, ar,sr,dr, aa,sa,da) \
584 I_ space = size_chk_macro(sa); \
586 /* Check that there will be enough heap & make Hp visible to GMP allocator */ \
587 GMP_HEAP_LOOKAHEAD(liveness,space); \
589 /* Now we can initialise (post possible GC) */ \
592 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
594 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result); \
596 /* Perform the operation */ \
597 SAFESTGCALL2(void,(void *, MP_INT *, MP_INT *),mpz_op,&result,&arg); \
599 GMP_HEAP_HANDBACK(); /* restore Hp */ \
600 (ar) = result.alloc; \
601 (sr) = result.size; \
602 (dr) = (B_) (result.d - DATA_HS); \
603 /* pt to *beginning* of object (GMP has been monkeying around in the middle) */ \
607 #define gmpTake2Return1(size_chk_macro, liveness, mpz_op, ar,sr,dr, a1,s1,d1, a2,s2,d2)\
611 I_ space = size_chk_macro(s1,s2); \
613 /* Check that there will be enough heap & make Hp visible to GMP allocator */ \
614 GMP_HEAP_LOOKAHEAD(liveness,space); \
616 /* Now we can initialise (post possible GC) */ \
619 arg1.d = (unsigned long int *) (BYTE_ARR_CTS(d1)); \
622 arg2.d = (unsigned long int *) (BYTE_ARR_CTS(d2)); \
624 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result); \
626 /* Perform the operation */ \
627 SAFESTGCALL3(void,(void *, MP_INT *, MP_INT *, MP_INT *),mpz_op,&result,&arg1,&arg2); \
629 GMP_HEAP_HANDBACK(); /* restore Hp */ \
630 (ar) = result.alloc; \
631 (sr) = result.size; \
632 (dr) = (B_) (result.d - DATA_HS); \
633 /* pt to *beginning* of object (GMP has been monkeying around in the middle) */ \
636 #define gmpTake2Return2(size_chk_macro, liveness, mpz_op, ar1,sr1,dr1, ar2,sr2,dr2, a1,s1,d1, a2,s2,d2) \
641 I_ space = size_chk_macro(s1,s2); \
643 /* Check that there will be enough heap & make Hp visible to GMP allocator */ \
644 GMP_HEAP_LOOKAHEAD(liveness,space); \
646 /* Now we can initialise (post possible GC) */ \
649 arg1.d = (unsigned long int *) (BYTE_ARR_CTS(d1)); \
652 arg2.d = (unsigned long int *) (BYTE_ARR_CTS(d2)); \
654 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result1); \
655 SAFESTGCALL1(void,(void *, MP_INT *),mpz_init,&result2); \
657 /* Perform the operation */ \
658 SAFESTGCALL4(void,(void *, MP_INT *, MP_INT *, MP_INT *, MP_INT *),mpz_op,&result1,&result2,&arg1,&arg2); \
660 GMP_HEAP_HANDBACK(); /* restore Hp */ \
661 (ar1) = result1.alloc; \
662 (sr1) = result1.size; \
663 (dr1) = (B_) (result1.d - DATA_HS); \
664 (ar2) = result2.alloc; \
665 (sr2) = result2.size; \
666 (dr2) = (B_) (result2.d - DATA_HS); \
670 Some handy size-munging macros: sometimes gratuitously {\em conservative}.
671 The \tr{+16} is to allow for the initial allocation of \tr{MP_INT} results.
672 The \tr{__abs} stuff is because negative-ness of GMP things is encoded
675 #define __abs(a) (( (a) >= 0 ) ? (a) : (-(a)))
676 #define GMP_SIZE_ONE() (2 + DATA_HS + 16)
677 #define GMP_SAME_SIZE(a) (__abs(a) + DATA_HS + 16)
678 #define GMP_MAX_SIZE(a,b) ((__abs(a) > __abs(b) ? __abs(a) : __abs(b)) + 1 + DATA_HS + 16)
679 /* NB: the +1 is for the carry (or whatever) */
680 #define GMP_2MAX_SIZE(a,b) (2 * GMP_MAX_SIZE(a,b))
681 #define GMP_ADD_SIZES(a,b) (__abs(a) + __abs(b) + 1 + DATA_HS + 16)
682 /* the +1 may just be paranoia */
685 For the Integer/GMP stuff, we have macros that {\em look ahead} for
686 some space, but don't actually grab it.
688 If there are live pointers at the time of the lookahead, the caller
689 must make sure they are in \tr{Ret1}, \tr{Ret2}, ..., so they can be
690 handled normally. We achieve this by having the code generator {\em
691 always} pass args to may-invoke-GC primitives in registers, using the
692 normal pointers-first policy. This means that, if we do go to garbage
693 collection, everything is already in the Right Place.
695 Saving and restoring Hp register so the MP allocator can see them. If we are
696 performing liftime profiling need to save and restore HpLim as well so that
697 it can be bumped if allocation occurs.
699 The second argument to @GMP_HEAP_LOOKAHEAD@ must be an lvalue so that
700 it can be restored from @TSO_ARG1@ after a failed @HEAP_CHK@ in
704 #define GMP_HEAP_LOOKAHEAD(liveness,n) \
706 HEAP_CHK_AND_RESTORE_N(liveness,n,0); \
708 UN_ALLOC_HEAP(n); /* Undo ticky-ticky */ \
709 SAVE_Hp = Hp; /* Hand over the hp */ \
710 DEBUG_SetGMPAllocBudget(n) \
713 #define GMP_HEAP_HANDBACK() \
715 DEBUG_ResetGMPAllocBudget()
719 void *stgAllocForGMP PROTO((size_t size_in_bytes));
720 void *stgReallocForGMP PROTO((void *ptr, size_t old_size, size_t new_size));
721 void stgDeallocForGMP PROTO((void *ptr, size_t size));
724 extern StgInt DEBUG_GMPAllocBudget;
725 #define DEBUG_SetGMPAllocBudget(n) DEBUG_GMPAllocBudget = (n);
726 #define DEBUG_ResetGMPAllocBudget() DEBUG_GMPAllocBudget = 0;
728 #define DEBUG_SetGMPAllocBudget(n) /*nothing*/
729 #define DEBUG_ResetGMPAllocBudget() /*nothing*/
733 The real business (defining Integer primops):
735 #define negateIntegerZh(ar,sr,dr, liveness, aa,sa,da) \
736 gmpTake1Return1(GMP_SAME_SIZE, liveness, mpz_neg, ar,sr,dr, aa,sa,da)
738 #define plusIntegerZh(ar,sr,dr, liveness, a1,s1,d1, a2,s2,d2) \
739 gmpTake2Return1(GMP_MAX_SIZE, liveness, mpz_add, ar,sr,dr, a1,s1,d1, a2,s2,d2)
740 #define minusIntegerZh(ar,sr,dr, liveness, a1,s1,d1, a2,s2,d2) \
741 gmpTake2Return1(GMP_MAX_SIZE, liveness, mpz_sub, ar,sr,dr, a1,s1,d1, a2,s2,d2)
742 #define timesIntegerZh(ar,sr,dr, liveness, a1,s1,d1, a2,s2,d2) \
743 gmpTake2Return1(GMP_ADD_SIZES, liveness, mpz_mul, ar,sr,dr, a1,s1,d1, a2,s2,d2)
745 /* div, mod, quot, rem are defined w/ quotRem & divMod */
747 #define quotRemIntegerZh(ar1,sr1,dr1, ar2,sr2,dr2, liveness, a1,s1,d1, a2,s2,d2) \
748 gmpTake2Return2(GMP_2MAX_SIZE, liveness, mpz_divmod, ar1,sr1,dr1, ar2,sr2,dr2, a1,s1,d1, a2,s2,d2)
749 #define divModIntegerZh(ar1,sr1,dr1, ar2,sr2,dr2, liveness, a1,s1,d1, a2,s2,d2) \
750 gmpTake2Return2(GMP_2MAX_SIZE, liveness, mpz_mdivmod, ar1,sr1,dr1, ar2,sr2,dr2, a1,s1,d1, a2,s2,d2)
753 Comparison ops (@<@, @>=@, etc.) are defined in terms of the cmp
754 fellow (returns -ve, 0, or +ve).
756 #define cmpIntegerZh(r, hp, a1,s1,d1, a2,s2,d2) /* calls mpz_cmp */ \
759 /* Does not allocate memory */ \
763 arg1.d = (unsigned long int *) (BYTE_ARR_CTS(d1)); \
766 arg2.d = (unsigned long int *) (BYTE_ARR_CTS(d2)); \
768 (r) = SAFESTGCALL2(I_,(void *, MP_INT *, MP_INT *),mpz_cmp,&arg1,&arg2); \
775 #define integer2IntZh(r, hp, aa,sa,da) \
777 /* Does not allocate memory */ \
781 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
783 (r) = SAFESTGCALL1(I_,(void *, MP_INT *),mpz_get_si,&arg); \
786 /* Since we're forced to know a little bit about MP_INT layout to do this with
787 pre-allocated heap, we just inline the whole of mpz_init_set_si here.
788 ** DIRE WARNING. if mpz_init_set_si changes, so does this! ***
791 #define int2IntegerZh(ar,sr,dr, hp, i) \
792 { StgInt val; /* to snaffle arg to avoid aliasing */ \
794 val = (i); /* snaffle... */ \
796 SET_DATA_HDR((hp),ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
798 if ((val) < 0) { (sr) = -1; (hp)[DATA_HS] = -(val); } \
799 else if ((val) > 0) { (sr) = 1; (hp)[DATA_HS] = (val); } \
800 else /* val==0 */ { (sr) = 0; } \
802 (dr) = (B_)(hp); /* dr is an StgByteArray */ \
805 #define word2IntegerZh(ar,sr,dr, hp, i) \
806 { StgWord val; /* to snaffle arg to avoid aliasing */ \
808 val = (i); /* snaffle... */ \
810 SET_DATA_HDR((hp),ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
812 if ((val) != 0) { (sr) = 1; (hp)[DATA_HS] = (val); } \
813 else /* val==0 */ { (sr) = 0; } \
815 (dr) = (B_)(hp); /* dr is an StgByteArray */ \
820 Then there are a few oddments to make life easier:
824 The "str" argument must be a literal C string.
826 addr2Integer( ..., "foo") OK!
829 addr2Integer( ..., x) NO! NO!
832 #define addr2IntegerZh(ar,sr,dr, liveness, str) \
834 /* taking the number of bytes/8 as the number of words of lookahead \
835 is plenty conservative */ \
836 I_ space = GMP_SAME_SIZE(sizeof(str) / 8 + 1); \
838 GMP_HEAP_LOOKAHEAD(liveness, space); \
840 /* Perform the operation */ \
841 if (SAFESTGCALL3(I_,(void *, MP_INT *, char *, int), mpz_init_set_str,&result,(str),/*base*/10)) \
844 GMP_HEAP_HANDBACK(); /* restore Hp */ \
845 (ar) = result.alloc; \
846 (sr) = result.size; \
847 (dr) = (B_) (result.d - DATA_HS); \
848 /* pt to *beginning* of object (GMP has been monkeying around in the middle) */ \
852 Encoding and decoding float-ish things is pretty Integer-ish. We use
853 these pretty magical support functions, essentially stolen from Lennart:
855 StgFloat __encodeFloat PROTO((MP_INT *, I_));
856 void __decodeFloat PROTO((MP_INT * /*result1*/,
860 StgDouble __encodeDouble PROTO((MP_INT *, I_));
861 void __decodeDouble PROTO((MP_INT * /*result1*/,
866 Some floating-point format info, made with the \tr{enquire} program
867 (version~4.3) [comes with gcc].
869 /* this should be done by CPU architecture, insofar as possible [WDP] */
871 #if sparc_TARGET_ARCH \
872 || alpha_TARGET_ARCH \
873 || hppa1_1_TARGET_ARCH \
874 || i386_TARGET_ARCH \
875 || m68k_TARGET_ARCH \
876 || mipsel_TARGET_ARCH \
877 || mipseb_TARGET_ARCH \
878 || powerpc_TARGET_ARCH \
879 || rs6000_TARGET_ARCH
881 /* yes, it is IEEE floating point */
882 #include "ieee-flpt.h"
884 #if alpha_dec_osf1_TARGET \
885 || i386_TARGET_ARCH \
886 || mipsel_TARGET_ARCH
888 #undef BIGENDIAN /* little-endian weirdos... */
893 #else /* unknown floating-point format */
895 ******* ERROR *********** Any ideas about floating-point format?
897 #endif /* unknown floating-point */
901 #if alpha_dec_osf1_TARGET
902 #define encodeFloatZh(r, hp, aa,sa,da, expon) encodeDoubleZh(r, hp, aa,sa,da, expon)
904 #define encodeFloatZh(r, hp, aa,sa,da, expon) \
906 /* Does not allocate memory */ \
910 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
912 r = SAFESTGCALL2(StgFloat,(void *, MP_INT *, I_), __encodeFloat,&arg,(expon)); \
916 #define encodeDoubleZh(r, hp, aa,sa,da, expon) \
918 /* Does not allocate memory */ \
922 arg.d = (unsigned long int *) (BYTE_ARR_CTS(da)); \
924 r = SAFESTGCALL2(StgDouble,(void *, MP_INT *, I_), __encodeDouble,&arg,(expon));\
927 #if alpha_dec_osf1_TARGET
928 #define decodeFloatZh(exponr, ar,sr,dr, hp, f) decodeDoubleZh(exponr, ar,sr,dr, hp, f)
930 #define decodeFloatZh(exponr, ar,sr,dr, hp, f) \
933 StgFloat arg = (f); \
935 /* Be prepared to tell Lennart-coded __decodeFloat */ \
936 /* where mantissa.d can be put (it does not care about the rest) */ \
937 SET_DATA_HDR(hp,ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
938 mantissa.d = (hp) + DATA_HS; \
940 /* Perform the operation */ \
941 SAFESTGCALL3(void,(void *, MP_INT *, I_ *, StgFloat),__decodeFloat,&mantissa,&exponent,arg); \
943 ar = mantissa.alloc; \
944 sr = mantissa.size; \
949 #define decodeDoubleZh(exponr, ar,sr,dr, hp, f) \
952 StgDouble arg = (f); \
954 /* Be prepared to tell Lennart-coded __decodeDouble */ \
955 /* where mantissa.d can be put (it does not care about the rest) */ \
956 SET_DATA_HDR(hp,ArrayOfData_info,CCC,DATA_VHS+MIN_MP_INT_SIZE,0); \
957 mantissa.d = (hp) + DATA_HS; \
959 /* Perform the operation */ \
960 SAFESTGCALL3(void,(void *, MP_INT *, I_ *, StgDouble),__decodeDouble,&mantissa,&exponent,arg); \
962 ar = mantissa.alloc; \
963 sr = mantissa.size; \
968 %************************************************************************
970 \subsubsection[StgMacros-mv-floats]{Moving floats and doubles around (e.g., to/from stacks)}
972 %************************************************************************
974 With GCC, we use magic non-standard inlining; for other compilers, we
975 just use functions (see also \tr{runtime/prims/PrimArith.lc}).
977 (The @OMIT_...@ is only used in compiling some of the RTS, none of
978 which uses these anyway.)
981 #if alpha_TARGET_ARCH \
982 || i386_TARGET_ARCH \
985 #define ASSIGN_FLT(dst, src) *(StgFloat *)(dst) = (src);
986 #define PK_FLT(src) (*(StgFloat *)(src))
988 #define ASSIGN_DBL(dst, src) *(StgDouble *)(dst) = (src);
989 #define PK_DBL(src) (*(StgDouble *)(src))
991 #else /* not m68k || alpha || i[34]86 */
993 /* Special handling for machines with troublesome alignment constraints */
995 #define FLOAT_ALIGNMENT_TROUBLES TRUE
997 #if ! defined(__GNUC__) || ! defined(__STG_GCC_REGS__)
999 void ASSIGN_DBL PROTO((W_ [], StgDouble));
1000 StgDouble PK_DBL PROTO((W_ []));
1001 void ASSIGN_FLT PROTO((W_ [], StgFloat));
1002 StgFloat PK_FLT PROTO((W_ []));
1004 #else /* yes, its __GNUC__ && we really want them */
1006 #if sparc_TARGET_ARCH
1008 #define ASSIGN_FLT(dst, src) *(StgFloat *)(dst) = (src);
1009 #define PK_FLT(src) (*(StgFloat *)(src))
1011 #define ASSIGN_DBL(dst,src) \
1012 __asm__("st %2,%0\n\tst %R2,%1" : "=m" (((P_)(dst))[0]), \
1013 "=m" (((P_)(dst))[1]) : "f" (src));
1015 #define PK_DBL(src) \
1016 ( { register double d; \
1017 __asm__("ld %1,%0\n\tld %2,%R0" : "=f" (d) : \
1018 "m" (((P_)(src))[0]), "m" (((P_)(src))[1])); d; \
1023 /* (not very) forward prototype declarations */
1024 void ASSIGN_DBL PROTO((W_ [], StgDouble));
1025 StgDouble PK_DBL PROTO((W_ []));
1026 void ASSIGN_FLT PROTO((W_ [], StgFloat));
1027 StgFloat PK_FLT PROTO((W_ []));
1031 ASSIGN_DBL(W_ p_dest[], StgDouble src)
1035 p_dest[0] = y.du.dhi;
1036 p_dest[1] = y.du.dlo;
1039 /* GCC also works with this version, but it generates
1040 the same code as the previous one, and is not ANSI
1042 #define ASSIGN_DBL( p_dest, src ) \
1043 *p_dest = ((double_thing) src).du.dhi; \
1044 *(p_dest+1) = ((double_thing) src).du.dlo \
1052 y.du.dhi = p_src[0];
1053 y.du.dlo = p_src[1];
1059 ASSIGN_FLT(W_ p_dest[], StgFloat src)
1075 #endif /* ! sparc */
1077 #endif /* __GNUC__ */
1079 #endif /* not __m68k__ */
1082 %************************************************************************
1084 \subsubsection[StgMacros-array-primops]{Primitive arrays}
1086 %************************************************************************
1088 We regularly use this macro to fish the ``contents'' part
1089 out of a DATA or TUPLE closure, which is what is used for
1090 non-ptr and ptr arrays (respectively).
1092 BYTE_ARR_CTS returns a @C_ *@!
1094 We {\em ASSUME} we can use the same macro for both!!
1098 #define BYTE_ARR_CTS(a) \
1099 ({ ASSERT(INFO_PTR(a) == (W_) ArrayOfData_info); \
1100 ((C_ *) (((StgPtr) (a))+DATA_HS)); })
1101 #define PTRS_ARR_CTS(a) \
1102 ({ ASSERT((INFO_PTR(a) == (W_) ArrayOfPtrs_info) \
1103 || (INFO_PTR(a) == (W_) ImMutArrayOfPtrs_info));\
1104 ((a)+MUTUPLE_HS);} )
1106 #define BYTE_ARR_CTS(a) ((char *) (((StgPtr) (a))+DATA_HS))
1107 #define PTRS_ARR_CTS(a) ((a)+MUTUPLE_HS)
1111 extern I_ genSymZh(STG_NO_ARGS);
1112 extern I_ resetGenSymZh(STG_NO_ARGS);
1113 extern I_ incSeqWorldZh(STG_NO_ARGS);
1115 extern I_ byteArrayHasNUL__ PROTO((const char *, I_));
1118 OK, the easy ops first: (all except \tr{newArr*}:
1120 (OLD:) VERY IMPORTANT! The read/write/index primitive ops
1121 on @ByteArray#@s index the array using a {\em BYTE} offset, even
1122 if the thing begin gotten out is a multi-byte @Int#@, @Float#@ etc.
1123 This is because you might be trying to take apart a C struct, where
1124 the offset from the start of the struct isn't a multiple of the
1125 size of the thing you're getting. Hence the @(char *)@ casts.
1127 EVEN MORE IMPORTANT! The above is a lie. The offsets for BlahArrays
1128 are in Blahs. WDP 95/08
1130 In the case of messing with @StgAddrs@ (@A_@), which are really \tr{void *},
1131 we cast to @P_@, because you can't index off an uncast \tr{void *}.
1133 In the case of @Array#@ (which contain pointers), the offset is in units
1134 of one ptr (not bytes).
1137 #define sameMutableArrayZh(r,a,b) r=(I_)((a)==(b))
1138 #define sameMutableByteArrayZh(r,a,b) r=(I_)((B_)(a)==(B_)(b))
1140 #define readArrayZh(r,a,i) r=((PP_) PTRS_ARR_CTS(a))[(i)]
1142 #define readCharArrayZh(r,a,i) indexCharOffAddrZh(r,BYTE_ARR_CTS(a),i)
1143 #define readIntArrayZh(r,a,i) indexIntOffAddrZh(r,BYTE_ARR_CTS(a),i)
1144 #define readAddrArrayZh(r,a,i) indexAddrOffAddrZh(r,BYTE_ARR_CTS(a),i)
1145 #define readFloatArrayZh(r,a,i) indexFloatOffAddrZh(r,BYTE_ARR_CTS(a),i)
1146 #define readDoubleArrayZh(r,a,i) indexDoubleOffAddrZh(r,BYTE_ARR_CTS(a),i)
1148 /* result ("r") arg ignored in write macros! */
1149 #define writeArrayZh(a,i,v) ((PP_) PTRS_ARR_CTS(a))[(i)]=(v)
1151 #define writeCharArrayZh(a,i,v) ((C_ *)(BYTE_ARR_CTS(a)))[i] = (v)
1152 #define writeIntArrayZh(a,i,v) ((I_ *)(BYTE_ARR_CTS(a)))[i] = (v)
1153 #define writeAddrArrayZh(a,i,v) ((PP_)(BYTE_ARR_CTS(a)))[i] = (v)
1154 #define writeFloatArrayZh(a,i,v) \
1155 ASSIGN_FLT((P_) (((StgFloat *)(BYTE_ARR_CTS(a))) + i),v)
1156 #define writeDoubleArrayZh(a,i,v) \
1157 ASSIGN_DBL((P_) (((StgDouble *)(BYTE_ARR_CTS(a))) + i),v)
1159 #define indexArrayZh(r,a,i) r=((PP_) PTRS_ARR_CTS(a))[(i)]
1161 #define indexCharArrayZh(r,a,i) indexCharOffAddrZh(r,BYTE_ARR_CTS(a),i)
1162 #define indexIntArrayZh(r,a,i) indexIntOffAddrZh(r,BYTE_ARR_CTS(a),i)
1163 #define indexAddrArrayZh(r,a,i) indexAddrOffAddrZh(r,BYTE_ARR_CTS(a),i)
1164 #define indexFloatArrayZh(r,a,i) indexFloatOffAddrZh(r,BYTE_ARR_CTS(a),i)
1165 #define indexDoubleArrayZh(r,a,i) indexDoubleOffAddrZh(r,BYTE_ARR_CTS(a),i)
1167 #define indexCharOffForeignObjZh(r,fo,i) indexCharOffAddrZh(r,ForeignObj_CLOSURE_DATA(fo),i)
1168 #define indexIntOffForeignObjZh(r,fo,i) indexIntOffAddrZh(r,ForeignObj_CLOSURE_DATA(fo),i)
1169 #define indexAddrOffForeignObjZh(r,fo,i) indexAddrOffAddrZh(r,ForeignObj_CLOSURE_DATA(fo),i)
1170 #define indexFloatOffForeignObjZh(r,fo,i) indexFloatOffAddrZh(r,ForeignObj_CLOSURE_DATA(fo),i)
1171 #define indexDoubleOffForeignObjZh(r,fo,i) indexDoubleOffAddrZh(r,ForeignObj_CLOSURE_DATA(fo),i)
1173 #define indexCharOffAddrZh(r,a,i) r= ((C_ *)(a))[i]
1174 #define indexIntOffAddrZh(r,a,i) r= ((I_ *)(a))[i]
1175 #define indexAddrOffAddrZh(r,a,i) r= ((PP_)(a))[i]
1176 #define indexFloatOffAddrZh(r,a,i) r= PK_FLT((P_) (((StgFloat *)(a)) + i))
1177 #define indexDoubleOffAddrZh(r,a,i) r= PK_DBL((P_) (((StgDouble *)(a)) + i))
1179 /* Freezing arrays-of-ptrs requires changing an info table, for the
1180 benefit of the generational collector. It needs to scavenge mutable
1181 objects, even if they are in old space. When they become immutable,
1182 they can be removed from this scavenge list. */
1183 #define unsafeFreezeArrayZh(r,a) \
1187 FREEZE_MUT_HDR(result,ImMutArrayOfPtrs_info); \
1191 #define unsafeFreezeByteArrayZh(r,a) r=(B_)(a)
1194 Now the \tr{newArr*} ops:
1198 --------------------
1199 Will: ToDo: we need to find suitable places to put this comment, and the
1200 "in-general" one which follows.
1202 ************ Nota Bene. The "n" in this macro is guaranteed to
1203 be a register, *not* (say) Node[1]. That means that it is guaranteed
1204 to survive GC, provided only that the register is kept unaltered.
1205 This is important, because "n" is used after the HEAP_CHK.
1207 In general, *all* parameters to these primitive-op macros are always
1208 registers. (Will: For exactly *which* primitive-op macros is this guaranteed?
1209 Exactly those which can trigger GC?)
1210 ------------------------
1212 NOTE: the above may now be OLD (WDP 94/02/10)
1216 For char arrays, the size is in {\em BYTES}.
1219 #define newCharArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(C_))
1220 #define newIntArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(I_))
1221 #define newAddrArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(P_))
1222 #define newFloatArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(StgFloat))
1223 #define newDoubleArrayZh(r,liveness,n) newByteArray(r,liveness,(n) * sizeof(StgDouble))
1225 #define newByteArray(r,liveness,n) \
1230 HEAP_CHK(liveness,DATA_HS+BYTES_TO_STGWORDS(n),0); \
1231 size = BYTES_TO_STGWORDS(n); \
1232 ALLOC_PRIM(DATA_HS,size,0,DATA_HS+size) /* ticky ticky */; \
1233 CC_ALLOC(CCC,DATA_HS+size,ARR_K); \
1235 result = Hp-(DATA_HS+size)+1; \
1236 SET_DATA_HDR(result,ArrayOfData_info,CCC,DATA_VHS+size,0); \
1241 Arrays of pointers need to be initialised; uses \tr{TUPLES}!
1242 The initialisation value is guaranteed to be in a register,
1243 and will be indicated by the liveness mask, so it's ok to do
1244 a \tr{HEAP_CHK}, which may trigger GC.
1247 /* The new array initialization routine for the NCG */
1248 void newArrZh_init PROTO((P_ result, I_ n, P_ init));
1250 #define newArrayZh(r,liveness,n,init) \
1255 HEAP_CHK(liveness, MUTUPLE_HS+(n),0); \
1256 ALLOC_PRIM(MUTUPLE_HS,(n),0,MUTUPLE_HS+(n)) /* ticky ticky */; \
1257 CC_ALLOC(CCC,MUTUPLE_HS+(n),ARR_K); /* cc prof */ \
1259 result = Hp + 1 - (MUTUPLE_HS+(n)); \
1260 SET_MUTUPLE_HDR(result,ArrayOfPtrs_info,CCC,MUTUPLE_VHS+(n),0) \
1261 for (p = result+MUTUPLE_HS; p < (result+MUTUPLE_HS+(n)); p++) { \
1269 %************************************************************************
1271 \subsubsection[StgMacros-SynchVar-primops]{Synchronizing Variables PrimOps}
1273 %************************************************************************
1276 ED_(PrelBase_Z91Z93_closure);
1278 #define newSynchVarZh(r, hp) \
1280 ALLOC_PRIM(MUTUPLE_HS,3,0,MUTUPLE_HS+3) /* ticky ticky */; \
1281 CC_ALLOC(CCC,MUTUPLE_HS+3,ARR_K); /* cc prof */ \
1282 SET_SVAR_HDR(hp,EmptySVar_info,CCC); \
1283 SVAR_HEAD(hp) = SVAR_TAIL(hp) = SVAR_VALUE(hp) = PrelBase_Z91Z93_closure; \
1291 void Yield PROTO((W_));
1293 #define takeMVarZh(r, liveness, node) \
1295 while (INFO_PTR(node) != (W_) FullSVar_info) { \
1296 if (SVAR_HEAD(node) == PrelBase_Z91Z93_closure) \
1297 SVAR_HEAD(node) = CurrentTSO; \
1299 TSO_LINK(SVAR_TAIL(node)) = CurrentTSO; \
1300 TSO_LINK(CurrentTSO) = (P_) PrelBase_Z91Z93_closure; \
1301 SVAR_TAIL(node) = CurrentTSO; \
1302 DO_YIELD(liveness << 1); \
1304 SET_INFO_PTR(node, EmptySVar_info); \
1305 r = SVAR_VALUE(node); \
1306 SVAR_VALUE(node) = PrelBase_Z91Z93_closure; \
1311 #define takeMVarZh(r, liveness, node) \
1313 if (INFO_PTR(node) != (W_) FullSVar_info) { \
1314 /* Don't wrap the calls; we're done with STG land */\
1316 fprintf(stderr, "takeMVar#: MVar is empty.\n"); \
1317 EXIT(EXIT_FAILURE); \
1319 SET_INFO_PTR(node, EmptySVar_info); \
1320 r = SVAR_VALUE(node); \
1321 SVAR_VALUE(node) = PrelBase_Z91Z93_closure; \
1332 /* Only difference to the !GRAN def: RunnableThreadsHd has been replaced by */
1333 /* ThreadQueueHd i.e. the tso is added at the end of the thread queue on */
1334 /* the CurrentProc. This means we have an implicit context switch after */
1335 /* putMVar even if unfair scheduling is used in GranSim (default)! -- HWL */
1337 #define putMVarZh(node, value) \
1340 if (INFO_PTR(node) == (W_) FullSVar_info) { \
1341 /* Don't wrap the calls; we're done with STG land */\
1343 fprintf(stderr, "putMVar#: MVar already full.\n"); \
1344 EXIT(EXIT_FAILURE); \
1346 SET_INFO_PTR(node, FullSVar_info); \
1347 SVAR_VALUE(node) = value; \
1348 tso = SVAR_HEAD(node); \
1349 if (tso != (P_) PrelBase_Z91Z93_closure) { \
1351 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1352 if (ThreadQueueHd == PrelBase_Z91Z93_closure) \
1353 ThreadQueueHd = tso; \
1355 TSO_LINK(ThreadQueueTl) = tso; \
1356 ThreadQueueTl = tso; \
1357 SVAR_HEAD(node) = TSO_LINK(tso); \
1358 TSO_LINK(tso) = (P_) PrelBase_Z91Z93_closure; \
1359 if(SVAR_HEAD(node) == (P_) PrelBase_Z91Z93_closure) \
1360 SVAR_TAIL(node) = (P_) PrelBase_Z91Z93_closure; \
1366 #define putMVarZh(node, value) \
1369 if (INFO_PTR(node) == (W_) FullSVar_info) { \
1370 /* Don't wrap the calls; we're done with STG land */\
1372 fprintf(stderr, "putMVar#: MVar already full.\n"); \
1373 EXIT(EXIT_FAILURE); \
1375 SET_INFO_PTR(node, FullSVar_info); \
1376 SVAR_VALUE(node) = value; \
1377 tso = SVAR_HEAD(node); \
1378 if (tso != (P_) PrelBase_Z91Z93_closure) { \
1380 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1381 if (RunnableThreadsHd == PrelBase_Z91Z93_closure) \
1382 RunnableThreadsHd = tso; \
1384 TSO_LINK(RunnableThreadsTl) = tso; \
1385 RunnableThreadsTl = tso; \
1386 SVAR_HEAD(node) = TSO_LINK(tso); \
1387 TSO_LINK(tso) = (P_) PrelBase_Z91Z93_closure; \
1388 if(SVAR_HEAD(node) == (P_) PrelBase_Z91Z93_closure) \
1389 SVAR_TAIL(node) = (P_) PrelBase_Z91Z93_closure; \
1397 #define putMVarZh(node, value) \
1400 if (INFO_PTR(node) == (W_) FullSVar_info) { \
1401 /* Don't wrap the calls; we're done with STG land */\
1403 fprintf(stderr, "putMVar#: MVar already full.\n"); \
1404 EXIT(EXIT_FAILURE); \
1406 SET_INFO_PTR(node, FullSVar_info); \
1407 SVAR_VALUE(node) = value; \
1416 #define readIVarZh(r, liveness, node) \
1418 if (INFO_PTR(node) != (W_) ImMutArrayOfPtrs_info) { \
1419 if (SVAR_HEAD(node) == PrelBase_Z91Z93_closure) \
1420 SVAR_HEAD(node) = CurrentTSO; \
1422 TSO_LINK(SVAR_TAIL(node)) = CurrentTSO; \
1423 TSO_LINK(CurrentTSO) = (P_) PrelBase_Z91Z93_closure; \
1424 SVAR_TAIL(node) = CurrentTSO; \
1425 DO_YIELD(liveness << 1); \
1427 r = SVAR_VALUE(node); \
1432 #define readIVarZh(r, liveness, node) \
1434 if (INFO_PTR(node) != (W_) ImMutArrayOfPtrs_info) { \
1435 /* Don't wrap the calls; we're done with STG land */\
1437 fprintf(stderr, "readIVar#: IVar is empty.\n"); \
1438 EXIT(EXIT_FAILURE); \
1440 r = SVAR_VALUE(node); \
1451 /* Only difference to the !GRAN def: RunnableThreadsHd has been replaced by */
1452 /* ThreadQueueHd i.e. the tso is added at the end of the thread queue on */
1453 /* the CurrentProc. This means we have an implicit context switch after */
1454 /* writeIVar even if unfair scheduling is used in GranSim (default)! -- HWL */
1456 #define writeIVarZh(node, value) \
1459 if (INFO_PTR(node) == (W_) ImMutArrayOfPtrs_info) { \
1460 /* Don't wrap the calls; we're done with STG land */\
1462 fprintf(stderr, "writeIVar#: IVar already full.\n");\
1463 EXIT(EXIT_FAILURE); \
1465 tso = SVAR_HEAD(node); \
1466 if (tso != (P_) PrelBase_Z91Z93_closure) { \
1467 if (ThreadQueueHd == PrelBase_Z91Z93_closure) \
1468 ThreadQueueHd = tso; \
1470 TSO_LINK(ThreadQueueTl) = tso; \
1471 while(TSO_LINK(tso) != PrelBase_Z91Z93_closure) { \
1473 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1474 tso = TSO_LINK(tso); \
1477 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1478 ThreadQueueTl = tso; \
1480 /* Don't use freeze, since it's conditional on GC */ \
1481 SET_INFO_PTR(node, ImMutArrayOfPtrs_info); \
1482 MUTUPLE_CLOSURE_SIZE(node) = (MUTUPLE_VHS+1); \
1483 SVAR_VALUE(node) = value; \
1488 #define writeIVarZh(node, value) \
1491 if (INFO_PTR(node) == (W_) ImMutArrayOfPtrs_info) { \
1492 /* Don't wrap the calls; we're done with STG land */\
1494 fprintf(stderr, "writeIVar#: IVar already full.\n");\
1495 EXIT(EXIT_FAILURE); \
1497 tso = SVAR_HEAD(node); \
1498 if (tso != (P_) PrelBase_Z91Z93_closure) { \
1499 if (RunnableThreadsHd == PrelBase_Z91Z93_closure) \
1500 RunnableThreadsHd = tso; \
1502 TSO_LINK(RunnableThreadsTl) = tso; \
1503 while(TSO_LINK(tso) != PrelBase_Z91Z93_closure) { \
1505 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1506 tso = TSO_LINK(tso); \
1509 STGCALL3(void,(void *, char *, P_, P_),QP_Event2,do_qp_prof > 1 ? "RA" : "RG",tso,CurrentTSO); \
1510 RunnableThreadsTl = tso; \
1512 /* Don't use freeze, since it's conditional on GC */ \
1513 SET_INFO_PTR(node, ImMutArrayOfPtrs_info); \
1514 MUTUPLE_CLOSURE_SIZE(node) = (MUTUPLE_VHS+1); \
1515 SVAR_VALUE(node) = value; \
1522 #define writeIVarZh(node, value) \
1525 if (INFO_PTR(node) == (W_) ImMutArrayOfPtrs_info) { \
1526 /* Don't wrap the calls; we're done with STG land */\
1528 fprintf(stderr, "writeIVar#: IVar already full.\n");\
1529 EXIT(EXIT_FAILURE); \
1531 /* Don't use freeze, since it's conditional on GC */ \
1532 SET_INFO_PTR(node, ImMutArrayOfPtrs_info); \
1533 MUTUPLE_CLOSURE_SIZE(node) = (MUTUPLE_VHS+1); \
1534 SVAR_VALUE(node) = value; \
1540 %************************************************************************
1542 \subsubsection[StgMacros-Wait-primops]{Delay/Wait PrimOps}
1544 %************************************************************************
1549 /* ToDo: for GRAN */
1551 #define delayZh(liveness, us) \
1553 if (WaitingThreadsTl == PrelBase_Z91Z93_closure) \
1554 WaitingThreadsHd = CurrentTSO; \
1556 TSO_LINK(WaitingThreadsTl) = CurrentTSO; \
1557 WaitingThreadsTl = CurrentTSO; \
1558 TSO_LINK(CurrentTSO) = PrelBase_Z91Z93_closure; \
1559 TSO_EVENT(CurrentTSO) = (W_) ((us) < 1 ? 1 : (us)); \
1560 DO_YIELD(liveness << 1); \
1565 #define delayZh(liveness, us) \
1568 fprintf(stderr, "delay#: unthreaded build.\n"); \
1569 EXIT(EXIT_FAILURE); \
1576 /* ToDo: something for GRAN */
1578 #define waitReadZh(liveness, fd) \
1580 if (WaitingThreadsTl == PrelBase_Z91Z93_closure) \
1581 WaitingThreadsHd = CurrentTSO; \
1583 TSO_LINK(WaitingThreadsTl) = CurrentTSO; \
1584 WaitingThreadsTl = CurrentTSO; \
1585 TSO_LINK(CurrentTSO) = PrelBase_Z91Z93_closure; \
1586 TSO_EVENT(CurrentTSO) = (W_) (-(fd)); \
1587 DO_YIELD(liveness << 1); \
1592 #define waitReadZh(liveness, fd) \
1595 fprintf(stderr, "waitRead#: unthreaded build.\n"); \
1596 EXIT(EXIT_FAILURE); \
1603 /* ToDo: something for GRAN */
1605 #ifdef HAVE_SYS_TYPES_H
1606 #include <sys/types.h>
1607 #endif HAVE_SYS_TYPES_H */
1609 #define waitWriteZh(liveness, fd) \
1611 if (WaitingThreadsTl == PrelBase_Z91Z93_closure) \
1612 WaitingThreadsHd = CurrentTSO; \
1614 TSO_LINK(WaitingThreadsTl) = CurrentTSO; \
1615 WaitingThreadsTl = CurrentTSO; \
1616 TSO_LINK(CurrentTSO) = PrelBase_Z91Z93_closure; \
1617 TSO_EVENT(CurrentTSO) = (W_) (-(fd+FD_SETSIZE)); \
1618 DO_YIELD(liveness << 1); \
1623 #define waitWriteZh(liveness, fd) \
1626 fprintf(stderr, "waitWrite#: unthreaded build.\n"); \
1627 EXIT(EXIT_FAILURE); \
1634 %************************************************************************
1636 \subsubsection[StgMacros-IO-primops]{Primitive I/O, error-handling primops}
1638 %************************************************************************
1641 extern P_ TopClosure;
1642 EXTFUN(ErrorIO_innards);
1643 EXTFUN(__std_entry_error__);
1645 #define errorIOZh(a) \
1646 do { TopClosure=(a); \
1647 (void) SAFESTGCALL1(I_,(void *, FILE *),fflush,stdout); \
1648 (void) SAFESTGCALL1(I_,(void *, FILE *),fflush,stderr); \
1649 JMP_(ErrorIO_innards); \
1652 #if !defined(CALLER_SAVES_SYSTEM)
1653 /* can use the macros */
1654 #define stg_getc(stream) getc((FILE *) (stream))
1655 #define stg_putc(c,stream) putc((c),((FILE *) (stream)))
1657 /* must not use the macros (they contain embedded calls to _filbuf/whatnot) */
1658 #define stg_getc(stream) SAFESTGCALL1(I_,(void *, FILE *),fgetc,(FILE *) (stream))
1659 #define stg_putc(c,stream) SAFESTGCALL2(I_,(void *, char, FILE *),fputc,(c),((FILE *) (stream)))
1662 int initialize_virtual_timer(int us);
1663 int install_segv_handler(STG_NO_ARGS);
1664 int install_vtalrm_handler(STG_NO_ARGS);
1665 void initUserSignals(STG_NO_ARGS);
1666 void blockUserSignals(STG_NO_ARGS);
1667 void unblockUserSignals(STG_NO_ARGS);
1668 IF_RTS(void blockVtAlrmSignal(STG_NO_ARGS);)
1669 IF_RTS(void unblockVtAlrmSignal(STG_NO_ARGS);)
1670 IF_RTS(void AwaitEvent(I_ delta);)
1672 #if defined(_POSIX_SOURCE) && !defined(nextstep3_TARGET_OS)
1673 /* For nextstep3_TARGET_OS comment see stgdefs.h. CaS */
1674 extern I_ sig_install PROTO((I_, I_, sigset_t *));
1675 #define stg_sig_ignore(s,m) SAFESTGCALL3(I_,(void *, I_, I_),sig_install,s,STG_SIG_IGN,(sigset_t *)m)
1676 #define stg_sig_default(s,m) SAFESTGCALL3(I_,(void *, I_, I_),sig_install,s,STG_SIG_DFL,(sigset_t *)m)
1677 #define stg_sig_catch(s,sp,m) SAFESTGCALL3(I_,(void *, I_, I_),sig_install,s,sp,(sigset_t *)m)
1679 extern I_ sig_install PROTO((I_, I_));
1680 #define stg_sig_ignore(s,m) SAFESTGCALL2(I_,(void *, I_, I_),sig_install,s,STG_SIG_IGN)
1681 #define stg_sig_default(s,m) SAFESTGCALL2(I_,(void *, I_, I_),sig_install,s,STG_SIG_DFL)
1682 #define stg_sig_catch(s,sp,m) SAFESTGCALL2(I_,(void *, I_, I_),sig_install,s,sp)
1685 #define STG_SIG_DFL (-1)
1686 #define STG_SIG_IGN (-2)
1687 #define STG_SIG_ERR (-3)
1689 StgInt getErrorHandler(STG_NO_ARGS);
1691 void raiseError PROTO((StgStablePtr handler));
1692 StgInt catchError PROTO((StgStablePtr newErrorHandler));
1694 void decrementErrorCount(STG_NO_ARGS);
1696 #define stg_catchError(sp) SAFESTGCALL1(I_,(void *, StgStablePtr),catchError,sp)
1697 #define stg_decrementErrorCount() SAFESTGCALL0(void,(void *),decrementErrorCount)
1700 %************************************************************************
1702 \subsubsection[StgMacros-stable-ptr]{Primitive ops for manipulating stable pointers}
1704 %************************************************************************
1707 The type of these should be:
1710 makeStablePointer# :: a -> State# _RealWorld -> StateAndStablePtr# _RealWorld a
1711 deRefStablePointer# :: StablePtr# a -> State# _RealWorld -> StateAndPtr _RealWorld a
1714 Since world-tokens are no longer explicitly passed around, the
1715 implementations have a few less arguments/results.
1717 The simpler one is @deRefStablePointer#@ (which is only a primop
1718 because it is more polymorphic than is allowed of a ccall).
1723 #define deRefStablePtrZh(ri,sp) \
1726 fprintf(stderr, "deRefStablePtr#: no stable pointer support.\n");\
1727 EXIT(EXIT_FAILURE); \
1732 extern StgPtr _deRefStablePointer PROTO((StgInt, StgPtr));
1734 #define deRefStablePtrZh(ri,sp) \
1735 ri = SAFESTGCALL2(I_,(void *, I_, P_),_deRefStablePointer,sp,StorageMgrInfo.StablePointerTable);
1738 Declarations for other stable pointer operations.
1741 void freeStablePointer PROTO((I_ stablePtr));
1743 void enterStablePtr PROTO((StgStablePtr, StgFunPtr));
1744 void performIO PROTO((StgStablePtr));
1745 I_ enterInt PROTO((StgStablePtr));
1746 I_ enterFloat PROTO((StgStablePtr));
1747 P_ deRefStablePointer PROTO((StgStablePtr));
1748 IF_RTS(I_ catchSoftHeapOverflow PROTO((StgStablePtr, I_));)
1749 IF_RTS(I_ getSoftHeapOverflowHandler(STG_NO_ARGS);)
1750 IF_RTS(extern StgStablePtr softHeapOverflowHandler;)
1751 IF_RTS(void shutdownHaskell(STG_NO_ARGS);)
1753 EXTFUN(stopPerformIODirectReturn);
1754 EXTFUN(startPerformIO);
1755 EXTFUN(stopEnterIntDirectReturn);
1756 EXTFUN(startEnterInt);
1757 EXTFUN(stopEnterFloatDirectReturn);
1758 EXTFUN(startEnterFloat);
1760 void enterStablePtr PROTO((StgStablePtr stableIndex, StgFunPtr startCode));
1764 IF_RTS(extern I_ ErrorIO_call_count;)
1767 Somewhat harder is @makeStablePointer#@ --- it is usually simple but
1768 if we're unlucky, it will have to allocate a new table and copy the
1769 old bit over. Since we might, very occasionally, have to call the
1770 garbage collector, this has to be a macro... sigh!
1772 NB @newSP@ is required because it is entirely possible that
1773 @stablePtr@ and @unstablePtr@ are aliases and so we can't do the
1774 assignment to @stablePtr@ until we've finished with @unstablePtr@.
1776 Another obscure piece of coding is the recalculation of the size of
1777 the table. We do this just in case Jim's threads decide they want to
1778 context switch---in which case any stack-allocated variables may get
1779 trashed. (If only there was a special heap check which didn't
1780 consider context switching...)
1785 /* Calculate SP Table size from number of pointers */
1786 #define SPTSizeFromNoPtrs( newNP ) (DYN_VHS + 1 + 2 * (newNP))
1788 /* Calculate number of pointers in new table from number in old table:
1789 any strictly increasing expression will do here */
1790 #define CalcNewNoSPtrs( i ) ((i)*2 + 100)
1792 void enlargeSPTable PROTO((P_, P_));
1794 #define makeStablePtrZh(stablePtr,liveness,unstablePtr) \
1796 EXTDATA_RO(StablePointerTable_info); \
1797 EXTDATA(UnusedSP); \
1798 StgStablePtr newSP; \
1800 if (SPT_EMPTY(StorageMgrInfo.StablePointerTable)) { /* free stack is empty */ \
1801 { /* Variables used before the heap check */ \
1802 I_ OldNoPtrs = SPT_NoPTRS( StorageMgrInfo.StablePointerTable ); \
1803 I_ NewNoPtrs = CalcNewNoSPtrs( OldNoPtrs ); \
1804 I_ NewSize = SPTSizeFromNoPtrs( NewNoPtrs ); \
1805 HEAP_CHK(liveness, _FHS+NewSize, 0); \
1807 { /* Variables used after the heap check - same values */ \
1808 I_ OldNoPtrs = SPT_NoPTRS( StorageMgrInfo.StablePointerTable ); \
1809 I_ NewNoPtrs = CalcNewNoSPtrs( OldNoPtrs ); \
1810 I_ NewSize = SPTSizeFromNoPtrs( NewNoPtrs ); \
1811 P_ SPTable = Hp + 1 - (_FHS + NewSize); \
1813 CC_ALLOC(CCC, _FHS+NewSize, SPT_K); /* cc prof */ \
1814 SET_DYN_HDR(SPTable,StablePointerTable_info,CCC,NewSize,NewNoPtrs);\
1815 SAFESTGCALL2(void, (void *, P_, P_), enlargeSPTable, SPTable, StorageMgrInfo.StablePointerTable); \
1816 StorageMgrInfo.StablePointerTable = SPTable; \
1820 newSP = SPT_POP(StorageMgrInfo.StablePointerTable); \
1821 SPT_SPTR(StorageMgrInfo.StablePointerTable, newSP) = unstablePtr; \
1822 CHECK_SPT_CLOSURE( StorageMgrInfo.StablePointerTable ); \
1823 stablePtr = newSP; \
1828 #define makeStablePtrZh(stablePtr,liveness,unstablePtr) \
1831 fprintf(stderr, "makeStablePtr#: no stable pointer support.\n");\
1832 EXIT(EXIT_FAILURE); \
1838 %************************************************************************
1840 \subsubsection[StgMacros-unsafePointerEquality]{Primitive `op' for breaking referential transparency}
1842 %************************************************************************
1844 The type of this is @reallyUnsafePtrEquality :: a -> a -> Int#@ so we
1845 can expect three parameters: the two arguments and a "register" to put
1848 Message to Will: This primop breaks referential transparency so badly
1849 you might want to leave it out. On the other hand, if you hide it
1850 away in an appropriate monad, it's perfectly safe. [ADR]
1852 Note that this primop is non-deterministic: different results can be
1853 obtained depending on just what the garbage collector (and code
1854 optimiser??) has done. However, we can guarantee that if two objects
1855 are pointer-equal, they have the same denotation --- the converse most
1856 certainly doesn't hold.
1858 ToDo ADR: The degree of non-determinism could be greatly reduced by
1859 following indirections.
1862 #define reallyUnsafePtrEqualityZh(r,a,b) r=((StgPtr)(a) == (StgPtr)(b))
1865 %************************************************************************
1867 \subsubsection[StgMacros-parallel-primop]{Primitive `op' for sparking (etc)}
1869 %************************************************************************
1871 Assuming local sparking in some form, we can now inline the spark request.
1873 We build a doubly-linked list in the heap, so that we can handle FIFO
1874 or LIFO scheduling as we please.
1876 Anything with tag >= 0 is in WHNF, so we discard it.
1881 ED_(PrelBase_Z91Z93_closure);
1885 #define parZh(r,node) \
1886 PARZh(r,node,1,0,0,0,0,0)
1888 #define parAtZh(r,node,where,identifier,gran_info,size_info,par_info,rest) \
1889 parATZh(r,node,where,identifier,gran_info,size_info,par_info,rest,1)
1891 #define parAtAbsZh(r,node,proc,identifier,gran_info,size_info,par_info,rest) \
1892 parATZh(r,node,proc,identifier,gran_info,size_info,par_info,rest,2)
1894 #define parAtRelZh(r,node,proc,identifier,gran_info,size_info,par_info,rest) \
1895 parATZh(r,node,proc,identifier,gran_info,size_info,par_info,rest,3)
1897 #define parAtForNowZh(r,node,where,identifier,gran_info,size_info,par_info,rest) \
1898 parATZh(r,node,where,identifier,gran_info,size_info,par_info,rest,0)
1900 #define parATZh(r,node,where,identifier,gran_info,size_info,par_info,rest,local) \
1903 if (SHOULD_SPARK(node)) { \
1906 result = NewSpark((P_)node,identifier,gran_info,size_info,par_info,local); \
1907 if (local==2) { /* special case for parAtAbs */ \
1908 GranSimSparkAtAbs(result,(I_)where,identifier);\
1909 } else if (local==3) { /* special case for parAtRel */ \
1910 GranSimSparkAtAbs(result,(I_)(CurrentProc+where),identifier); \
1912 GranSimSparkAt(result,where,identifier); \
1914 context_switch = 1; \
1916 RestoreAllStgRegs(); \
1917 } else if (do_qp_prof) { \
1918 I_ tid = threadId++; \
1919 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
1921 r = 1; /* return code for successful spark -- HWL */ \
1924 #define parLocalZh(r,node,identifier,gran_info,size_info,par_info,rest) \
1925 PARZh(r,node,rest,identifier,gran_info,size_info,par_info,1)
1927 #define parGlobalZh(r,node,identifier,gran_info,size_info,par_info,rest) \
1928 PARZh(r,node,rest,identifier,gran_info,size_info,par_info,0)
1932 #define PARZh(r,node,rest,identifier,gran_info,size_info,par_info,local) \
1934 if (SHOULD_SPARK(node)) { \
1937 result = NewSpark((P_)node,identifier,gran_info,size_info,par_info,local);\
1938 add_to_spark_queue(result); \
1939 GranSimSpark(local,(P_)node); \
1940 context_switch = 1; \
1942 RestoreAllStgRegs(); \
1943 } else if (do_qp_prof) { \
1944 I_ tid = threadId++; \
1945 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
1947 r = 1; /* return code for successful spark -- HWL */ \
1952 #define PARZh(r,node,rest,identifier,gran_info,size_info,par_info,local) \
1955 if (SHOULD_SPARK(node)) { \
1956 result = NewSpark((P_)node,identifier,gran_info,size_info,par_info,local);\
1957 ADD_TO_SPARK_QUEUE(result); \
1958 SAFESTGCALL2(void,(W_),GranSimSpark,local,(P_)node); \
1959 /* context_switch = 1; not needed any more -- HWL */ \
1960 } else if (do_qp_prof) { \
1961 I_ tid = threadId++; \
1962 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
1964 r = 1; /* return code for successful spark -- HWL */ \
1969 #define copyableZh(r,node) \
1970 /* copyable not yet implemented!! */
1972 #define noFollowZh(r,node) \
1973 /* noFollow not yet implemented!! */
1977 extern I_ required_thread_count;
1980 #define COUNT_SPARK TSO_GLOBALSPARKS(CurrentTSO)++
1986 Note that we must bump the required thread count NOW, rather
1987 than when the thread is actually created.
1990 #define forkZh(r,liveness,node) \
1992 while (PendingSparksTl[REQUIRED_POOL] == PendingSparksLim[REQUIRED_POOL]) \
1993 DO_YIELD((liveness << 1) | 1); \
1995 if (SHOULD_SPARK(node)) { \
1996 *PendingSparksTl[REQUIRED_POOL]++ = (P_)(node); \
1997 } else if (DO_QP_PROF) { \
1998 I_ tid = threadId++; \
1999 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
2001 required_thread_count++; \
2002 context_switch = 1; \
2003 r = 1; /* Should not be necessary */ \
2006 #define parZh(r,node) \
2009 if (SHOULD_SPARK(node) && \
2010 PendingSparksTl[ADVISORY_POOL] < PendingSparksLim[ADVISORY_POOL]) { \
2011 *PendingSparksTl[ADVISORY_POOL]++ = (P_)(node); \
2015 I_ tid = threadId++; \
2016 SAFESTGCALL2(void,(I_, P_),QP_Event0,tid,node); \
2019 r = 1; /* Should not be necessary */ \
2024 #endif /* CONCURRENT */
2027 The following seq# code should only be used in unoptimized code.
2028 Be warned: it's a potential bug-farm.
2030 First we push two words on the B stack: the current value of RetReg
2031 (which may or may not be live), and a continuation snatched largely out
2032 of thin air (it's a point within this code block). Then we set RetReg
2033 to the special polymorphic return code for seq, load up Node with the
2034 closure to be evaluated, and we're off. When the eval returns to the
2035 polymorphic seq return point, the two words are popped off the B stack,
2036 RetReg is restored, and we jump to the continuation, completing the
2037 primop and going on our merry way.
2043 #define seqZh(r,liveness,node) \
2046 /* STK_CHK(liveness,0,2,0,0,0,0); */ \
2047 /* SpB -= BREL(2); */ \
2048 SpB[BREL(0)] = (W_) RetReg; \
2049 SpB[BREL(1)] = (W_) &&cont; \
2050 RetReg = (StgRetAddr) vtbl_seq; \
2053 InfoPtr = (D_)(INFO_PTR(Node)); \
2054 JMP_(ENTRY_CODE(InfoPtr)); \
2056 r = 1; /* Should be unnecessary */ \
2061 %************************************************************************
2063 \subsubsection[StgMacros-foreign-objects]{Foreign Objects}
2065 %************************************************************************
2067 [Based on previous MallocPtr comments -- SOF]
2069 This macro is used to construct a ForeignObj on the heap.
2071 What this does is plug the pointer (which will be in a local
2072 variable) together with its finalising/free routine, into a fresh heap
2073 object and then sets a result (which will be a register) to point
2074 to the fresh heap object.
2076 To accommodate per-object finalisation, augment the macro with a
2077 finalisation routine argument. Nothing spectacular, just plug the
2078 pointer to the routine into the ForeignObj -- SOF 4/96
2080 Question: what's this "SET_ACTIVITY" stuff - should I be doing this
2081 too? (It's if you want to use the SPAT profiling tools to
2082 characterize program behavior by ``activity'' -- tail-calling,
2083 heap-checking, etc. -- see Ticky.lh. It is quite specialized.
2086 (Swapped first two arguments to make it come into line with what appears
2087 to be `standard' format, return register then liveness mask. -- SOF 4/96)
2092 StgInt eqForeignObj PROTO((StgForeignObj p1, StgForeignObj p2));
2094 #define makeForeignObjZh(r, liveness, mptr, finalise) \
2098 HEAP_CHK((W_)liveness, _FHS + ForeignObj_SIZE,0); \
2099 CC_ALLOC(CCC,_FHS + ForeignObj_SIZE,ForeignObj_K); /* cc prof */ \
2101 result = Hp + 1 - (_FHS + ForeignObj_SIZE); \
2102 SET_ForeignObj_HDR(result,ForeignObj_info,CCC,_FHS + ForeignObj_SIZE,0); \
2103 ForeignObj_CLOSURE_DATA(result) = (P_)mptr; \
2104 ForeignObj_CLOSURE_FINALISER(result) = (P_)finalise; \
2105 ForeignObj_CLOSURE_LINK(result) = StorageMgrInfo.ForeignObjList; \
2106 StorageMgrInfo.ForeignObjList = result; \
2109 /*fprintf(stderr,"DEBUG: ForeignObj(0x%x) = <0x%x, 0x%x, 0x%x, 0x%x>\n", \
2111 result[0],result[1], \
2112 result[2],result[3]);*/ \
2114 CHECK_ForeignObj_CLOSURE( result ); \
2115 VALIDATE_ForeignObjList( StorageMgrInfo.ForeignObjList ); \
2117 (r) = (P_) result; \
2120 #define writeForeignObjZh(res,datum) ((PP_) ForeignObj_CLOSURE_DATA(res)) = ((P_)datum)
2123 #define makeForeignObjZh(r, liveness, mptr, finalise) \
2126 fprintf(stderr, "makeForeignObj#: no foreign object support.\n");\
2127 EXIT(EXIT_FAILURE); \
2130 #define writeForeignObjZh(res,datum) \
2133 fprintf(stderr, "writeForeignObj#: no foreign object support.\n");\
2134 EXIT(EXIT_FAILURE); \
2141 End-of-file's multi-slurp protection:
2143 #endif /* ! STGMACROS_H */