1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
3 Copyright (C) 1991, 1992, 1993, 1994, 1996, 1997, 1999, 2000 Free Software
6 This file is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or (at your
9 option) any later version.
11 This file is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
14 License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with this file; see the file COPYING.LIB. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
19 MA 02111-1307, USA. */
21 /* You have to define the following before including this file:
23 UWtype -- An unsigned type, default type for operations (typically a "word")
24 UHWtype -- An unsigned type, at least half the size of UWtype.
25 UDWtype -- An unsigned type, at least twice as large a UWtype
26 W_TYPE_SIZE -- size in bits of UWtype
28 SItype, USItype -- Signed and unsigned 32 bit types.
29 DItype, UDItype -- Signed and unsigned 64 bit types.
31 On a 32 bit machine UWtype should typically be USItype;
32 on a 64 bit machine, UWtype should typically be UDItype.
35 #define __BITS4 (W_TYPE_SIZE / 4)
36 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
37 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
38 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
40 /* This is used to make sure no undesirable sharing between different libraries
41 that use this file takes place. */
43 #define __MPN(x) __##x
47 #if (__STDC__-0) || defined (__cplusplus)
54 /* Define auxiliary asm macros.
56 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
57 UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
58 word product in HIGH_PROD and LOW_PROD.
60 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
61 UDWtype product. This is just a variant of umul_ppmm.
63 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
64 denominator) divides a UDWtype, composed by the UWtype integers
65 HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
66 in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
67 than DENOMINATOR for correct operation. If, in addition, the most
68 significant bit of DENOMINATOR must be 1, then the pre-processor symbol
69 UDIV_NEEDS_NORMALIZATION is defined to 1.
71 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
72 denominator). Like udiv_qrnnd but the numbers are signed. The quotient
75 5) count_leading_zeros(count, x) counts the number of zero-bits from the
76 msb to the first non-zero bit in the UWtype X. This is the number of
77 steps X needs to be shifted left to set the msb. Undefined for X == 0,
78 unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
80 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
81 from the least significant end.
83 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
84 high_addend_2, low_addend_2) adds two UWtype integers, composed by
85 HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
86 respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
87 (i.e. carry out) is not stored anywhere, and is lost.
89 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
90 high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
91 composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
92 LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
93 and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
96 If any of these macros are left undefined for a particular CPU,
99 /* The CPUs come in alphabetical order below.
101 Please add support for more CPUs here, or improve the current support
102 for the CPUs below! */
104 #if defined (__alpha) && W_TYPE_SIZE == 64
105 #if defined (__GNUC__)
106 #define umul_ppmm(ph, pl, m0, m1) \
108 UDItype __m0 = (m0), __m1 = (m1); \
109 __asm__ ("umulh %r1,%2,%0" \
111 : "%rJ" (m0), "rI" (m1)); \
112 (pl) = __m0 * __m1; \
115 #ifndef LONGLONG_STANDALONE
116 #define udiv_qrnnd(q, r, n1, n0, d) \
118 __di = __MPN(invert_limb) (d); \
119 udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
121 #define UDIV_NEEDS_NORMALIZATION 1
122 #define UDIV_TIME 220
123 long __MPN(count_leading_zeros) ();
124 #define count_leading_zeros(count, x) \
125 ((count) = __MPN(count_leading_zeros) (x))
126 #endif /* LONGLONG_STANDALONE */
127 #else /* ! __GNUC__ */
128 #include <machine/builtins.h>
129 #define umul_ppmm(ph, pl, m0, m1) \
131 UDItype __m0 = (m0), __m1 = (m1); \
132 (ph) = __UMULH (m0, m1); \
133 (pl) = __m0 * __m1; \
138 #if defined (__hppa) && W_TYPE_SIZE == 64
139 /* We put the result pointer parameter last here, since it makes passing
140 of the other parameters more efficient. */
141 #ifndef LONGLONG_STANDALONE
142 #define umul_ppmm(wh, wl, u, v) \
145 (wh) = __MPN(umul_ppmm) (u, v, &__p0); \
148 extern UDItype __MPN(umul_ppmm) _PROTO ((UDItype, UDItype, UDItype *));
149 #define udiv_qrnnd(q, r, n1, n0, d) \
151 (q) = __MPN(udiv_qrnnd) (n1, n0, d, &__r); \
154 extern UDItype __MPN(udiv_qrnnd) _PROTO ((UDItype, UDItype, UDItype, UDItype *));
157 #endif /* LONGLONG_STANDALONE */
160 #if defined (__ia64) && W_TYPE_SIZE == 64
161 #if defined (__GNUC__)
162 #define umul_ppmm(ph, pl, m0, m1) \
164 UDItype __m0 = (m0), __m1 = (m1); \
165 __asm__ ("xma.hu %0 = %1, %2, f0" \
167 : "e" (m0), "e" (m1)); \
168 (pl) = __m0 * __m1; \
174 #if defined (__GNUC__) && !defined (NO_ASM)
176 /* We sometimes need to clobber "cc" with gcc2, but that would not be
177 understood by gcc1. Use cpp to avoid major code duplication. */
180 #define __AND_CLOBBER_CC
181 #else /* __GNUC__ >= 2 */
182 #define __CLOBBER_CC : "cc"
183 #define __AND_CLOBBER_CC , "cc"
184 #endif /* __GNUC__ < 2 */
186 #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
187 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
188 __asm__ ("add %1,%4,%5\n\taddc %0,%2,%3" \
189 : "=r" (sh), "=&r" (sl) \
190 : "%r" (ah), "rI" (bh), "%r" (al), "rI" (bl))
191 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
192 __asm__ ("sub %1,%4,%5\n\tsubc %0,%2,%3" \
193 : "=r" (sh), "=&r" (sl) \
194 : "r" (ah), "rI" (bh), "r" (al), "rI" (bl))
195 #define umul_ppmm(xh, xl, m0, m1) \
197 USItype __m0 = (m0), __m1 = (m1); \
198 __asm__ ("multiplu %0,%1,%2" \
200 : "r" (__m0), "r" (__m1)); \
201 __asm__ ("multmu %0,%1,%2" \
203 : "r" (__m0), "r" (__m1)); \
205 #define udiv_qrnnd(q, r, n1, n0, d) \
206 __asm__ ("dividu %0,%3,%4" \
207 : "=r" (q), "=q" (r) \
208 : "1" (n1), "r" (n0), "r" (d))
209 #define count_leading_zeros(count, x) \
210 __asm__ ("clz %0,%1" \
213 #define COUNT_LEADING_ZEROS_0 32
214 #endif /* __a29k__ */
216 #if defined (__arm__) && W_TYPE_SIZE == 32
217 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
218 __asm__ ("adds\t%1, %4, %5\n\tadc\t%0, %2, %3" \
219 : "=r" (sh), "=&r" (sl) \
220 : "%r" (ah), "rI" (bh), "%r" (al), "rI" (bl))
221 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
222 __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
223 : "=r" (sh), "=&r" (sl) \
224 : "r" (ah), "rI" (bh), "r" (al), "rI" (bl))
225 #if 1 || defined (__arm_m__) /* `M' series has widening multiply support */
226 #define umul_ppmm(xh, xl, a, b) \
227 __asm__ ("umull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
228 #define smul_ppmm(xh, xl, a, b) \
229 __asm__ ("smull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
232 #define umul_ppmm(xh, xl, a, b) \
233 __asm__ ("%@ Inlined umul_ppmm\n"
234 "mov %|r0, %2, lsr #16\n"
235 "mov %|r2, %3, lsr #16\n"
236 "bic %|r1, %2, %|r0, lsl #16\n"
237 "bic %|r2, %3, %|r2, lsl #16\n"
238 "mul %1, %|r1, %|r2\n"
239 "mul %|r2, %|r0, %|r2\n"
240 "mul %|r1, %0, %|r1\n"
242 "adds %|r1, %|r2, %|r1\n"
243 "addcs %0, %0, #65536\n"
244 "adds %1, %1, %|r1, lsl #16\n"
245 "adc %0, %0, %|r1, lsr #16" \
246 : "=&r" (xh), "=r" (xl) \
251 #define UDIV_TIME 100
254 #if defined (__clipper__) && W_TYPE_SIZE == 32
255 #define umul_ppmm(w1, w0, u, v) \
256 ({union {UDItype __ll; \
257 struct {USItype __l, __h;} __i; \
259 __asm__ ("mulwux %2,%0" \
261 : "%0" ((USItype)(u)), "r" ((USItype)(v))); \
262 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
263 #define smul_ppmm(w1, w0, u, v) \
264 ({union {DItype __ll; \
265 struct {SItype __l, __h;} __i; \
267 __asm__ ("mulwx %2,%0" \
269 : "%0" ((SItype)(u)), "r" ((SItype)(v))); \
270 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
271 #define __umulsidi3(u, v) \
273 __asm__ ("mulwux %2,%0" \
274 : "=r" (__w) : "%0" ((USItype)(u)), "r" ((USItype)(v))); \
276 #endif /* __clipper__ */
278 /* Fujitsu vector computers. */
279 #if defined (__uxp__) && W_TYPE_SIZE == 32
280 #define umul_ppmm(ph, pl, u, v) \
282 union {UDItype __ll; \
283 struct {USItype __h, __l;} __i; \
285 __asm__ ("mult.lu %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v));\
286 (ph) = __x.__i.__h; \
287 (pl) = __x.__i.__l; \
289 #define smul_ppmm(ph, pl, u, v) \
291 union {UDItype __ll; \
292 struct {USItype __h, __l;} __i; \
294 __asm__ ("mult.l %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v)); \
295 (ph) = __x.__i.__h; \
296 (pl) = __x.__i.__l; \
300 #if defined (__gmicro__) && W_TYPE_SIZE == 32
301 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
302 __asm__ ("add.w %5,%1\n\taddx %3,%0" \
303 : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl)) \
304 : "%0" ((USItype)(ah)), "g" ((USItype)(bh)), \
305 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
306 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
307 __asm__ ("sub.w %5,%1\n\tsubx %3,%0" \
308 : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl)) \
309 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
310 "1" ((USItype)(al)), "g" ((USItype)(bl)))
311 #define umul_ppmm(ph, pl, m0, m1) \
312 __asm__ ("mulx %3,%0,%1" \
313 : "=g" ((USItype)(ph)), "=r" ((USItype)(pl)) \
314 : "%0" ((USItype)(m0)), "g" ((USItype)(m1)))
315 #define udiv_qrnnd(q, r, nh, nl, d) \
316 __asm__ ("divx %4,%0,%1" \
317 : "=g" ((USItype)(q)), "=r" ((USItype)(r)) \
318 : "1" ((USItype)(nh)), "0" ((USItype)(nl)), "g" ((USItype)(d)))
319 #define count_leading_zeros(count, x) \
320 __asm__ ("bsch/1 %1,%0" \
321 : "=g" (count) : "g" ((USItype)(x)), "0" ((USItype)0))
324 #if defined (__hppa) && W_TYPE_SIZE == 32
325 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
326 __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0" \
327 : "=r" (sh), "=&r" (sl) \
328 : "%rM" (ah), "rM" (bh), "%rM" (al), "rM" (bl))
329 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
330 __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0" \
331 : "=r" (sh), "=&r" (sl) \
332 : "rM" (ah), "rM" (bh), "rM" (al), "rM" (bl))
333 #if defined (_PA_RISC1_1)
334 #define umul_ppmm(wh, wl, u, v) \
336 union {UDItype __ll; \
337 struct {USItype __h, __l;} __i; \
339 __asm__ ("xmpyu %1,%2,%0" : "=*f" (__x.__ll) : "*f" (u), "*f" (v)); \
340 (wh) = __x.__i.__h; \
341 (wl) = __x.__i.__l; \
349 #ifndef LONGLONG_STANDALONE
350 #define udiv_qrnnd(q, r, n1, n0, d) \
352 (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d)); \
355 extern USItype __MPN(udiv_qrnnd) _PROTO ((USItype *, USItype, USItype, USItype));
356 #endif /* LONGLONG_STANDALONE */
357 #define count_leading_zeros(count, x) \
362 "extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n"
363 "extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n"
364 "ldo 16(%0),%0 ; Yes. Perform add.\n"
365 "extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n"
366 "extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n"
367 "ldo 8(%0),%0 ; Yes. Perform add.\n"
368 "extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n"
369 "extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n"
370 "ldo 4(%0),%0 ; Yes. Perform add.\n"
371 "extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n"
372 "extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n"
373 "ldo 2(%0),%0 ; Yes. Perform add.\n"
374 "extru %1,30,1,%1 ; Extract bit 1.\n"
375 "sub %0,%1,%0 ; Subtract it.\n"
376 : "=r" (count), "=r" (__tmp) : "1" (x)); \
380 #if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
381 #define smul_ppmm(xh, xl, m0, m1) \
383 union {DItype __ll; \
384 struct {USItype __h, __l;} __i; \
386 __asm__ ("mr %0,%3" \
387 : "=r" (__x.__i.__h), "=r" (__x.__i.__l) \
388 : "%1" (m0), "r" (m1)); \
389 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
391 #define sdiv_qrnnd(q, r, n1, n0, d) \
393 union {DItype __ll; \
394 struct {USItype __h, __l;} __i; \
396 __x.__i.__h = n1; __x.__i.__l = n0; \
397 __asm__ ("dr %0,%2" \
399 : "0" (__x.__ll), "r" (d)); \
400 (q) = __x.__i.__l; (r) = __x.__i.__h; \
404 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
405 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
406 __asm__ ("addl %5,%1\n\tadcl %3,%0" \
407 : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl)) \
408 : "%0" ((USItype)(ah)), "g" ((USItype)(bh)), \
409 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
410 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
411 __asm__ ("subl %5,%1\n\tsbbl %3,%0" \
412 : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl)) \
413 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
414 "1" ((USItype)(al)), "g" ((USItype)(bl)))
415 #define umul_ppmm(w1, w0, u, v) \
417 : "=a" (w0), "=d" (w1) \
418 : "%0" ((USItype)(u)), "rm" ((USItype)(v)))
419 #define udiv_qrnnd(q, r, n1, n0, d) \
421 : "=a" (q), "=d" (r) \
422 : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "rm" ((USItype)(d)))
423 #define count_leading_zeros(count, x) \
426 __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
427 (count) = __cbtmp ^ 31; \
429 #define count_trailing_zeros(count, x) \
430 __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
439 #if defined (__i860__) && W_TYPE_SIZE == 32
440 #define rshift_rhlc(r,h,l,c) \
441 __asm__ ("shr %3,r0,r0\;shrd %1,%2,%0" \
442 "=r" (r) : "r" (h), "r" (l), "rn" (c))
445 #if defined (__i960__) && W_TYPE_SIZE == 32
446 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
447 __asm__ ("cmpo 1,0\;addc %5,%4,%1\;addc %3,%2,%0" \
448 : "=r" (sh), "=&r" (sl) \
449 : "%dI" (ah), "dI" (bh), "%dI" (al), "dI" (bl))
450 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
451 __asm__ ("cmpo 0,0\;subc %5,%4,%1\;subc %3,%2,%0" \
452 : "=r" (sh), "=&r" (sl) \
453 : "dI" (ah), "dI" (bh), "dI" (al), "dI" (bl))
454 #define umul_ppmm(w1, w0, u, v) \
455 ({union {UDItype __ll; \
456 struct {USItype __l, __h;} __i; \
458 __asm__ ("emul %2,%1,%0" \
459 : "=d" (__x.__ll) : "%dI" (u), "dI" (v)); \
460 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
461 #define __umulsidi3(u, v) \
463 __asm__ ("emul %2,%1,%0" : "=d" (__w) : "%dI" (u), "dI" (v)); \
465 #define udiv_qrnnd(q, r, nh, nl, d) \
467 union {UDItype __ll; \
468 struct {USItype __l, __h;} __i; \
470 __nn.__i.__h = (nh); __nn.__i.__l = (nl); \
471 __asm__ ("ediv %d,%n,%0" \
472 : "=d" (__rq.__ll) : "dI" (__nn.__ll), "dI" (d)); \
473 (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
475 #define count_leading_zeros(count, x) \
478 __asm__ ("scanbit %1,%0" : "=r" (__cbtmp) : "r" (x)); \
479 (count) = __cbtmp ^ 31; \
481 #define COUNT_LEADING_ZEROS_0 (-32) /* sic */
482 #if defined (__i960mx) /* what is the proper symbol to test??? */
483 #define rshift_rhlc(r,h,l,c) \
485 union {UDItype __ll; \
486 struct {USItype __l, __h;} __i; \
488 __nn.__i.__h = (h); __nn.__i.__l = (l); \
489 __asm__ ("shre %2,%1,%0" : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \
494 #if (defined (__mc68000__) || defined (__mc68020__) || defined(mc68020) \
495 || defined (__m68k__) || defined (__mc5200__) || defined (__mc5206e__) \
496 || defined (__mc5307__)) && W_TYPE_SIZE == 32
497 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
498 __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0" \
499 : "=d" ((USItype)(sh)), "=&d" ((USItype)(sl)) \
500 : "%0" ((USItype)(ah)), "d" ((USItype)(bh)), \
501 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
502 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
503 __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0" \
504 : "=d" ((USItype)(sh)), "=&d" ((USItype)(sl)) \
505 : "0" ((USItype)(ah)), "d" ((USItype)(bh)), \
506 "1" ((USItype)(al)), "g" ((USItype)(bl)))
507 /* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
508 #if defined (__mc68020__) || defined(mc68020) \
509 || defined (__mc68030__) || defined (mc68030) \
510 || defined (__mc68040__) || defined (mc68040) \
511 || defined (__mc68332__) || defined (mc68332) \
512 || defined (__NeXT__)
513 #define umul_ppmm(w1, w0, u, v) \
514 __asm__ ("mulu%.l %3,%1:%0" \
515 : "=d" ((USItype)(w0)), "=d" ((USItype)(w1)) \
516 : "%0" ((USItype)(u)), "dmi" ((USItype)(v)))
518 #define udiv_qrnnd(q, r, n1, n0, d) \
519 __asm__ ("divu%.l %4,%1:%0" \
520 : "=d" ((USItype)(q)), "=d" ((USItype)(r)) \
521 : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
523 #define sdiv_qrnnd(q, r, n1, n0, d) \
524 __asm__ ("divs%.l %4,%1:%0" \
525 : "=d" ((USItype)(q)), "=d" ((USItype)(r)) \
526 : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
527 #else /* for other 68k family members use 16x16->32 multiplication */
528 #define umul_ppmm(xh, xl, a, b) \
529 do { USItype __umul_tmp1, __umul_tmp2; \
530 __asm__ ("| Inlined umul_ppmm\n"
543 "add%.l %#0x10000,%0\n"
551 "| End inlined umul_ppmm" \
552 : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \
553 "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \
554 : "%2" ((USItype)(a)), "d" ((USItype)(b))); \
556 #define UMUL_TIME 100
557 #define UDIV_TIME 400
558 #endif /* not mc68020 */
559 /* The '020, '030, '040 and '060 have bitfield insns. */
560 #if defined (__mc68020__) || defined (mc68020) \
561 || defined (__mc68030__) || defined (mc68030) \
562 || defined (__mc68040__) || defined (mc68040) \
563 || defined (__mc68060__) || defined (mc68060) \
564 || defined (__NeXT__)
565 #define count_leading_zeros(count, x) \
566 __asm__ ("bfffo %1{%b2:%b2},%0" \
567 : "=d" ((USItype) (count)) \
568 : "od" ((USItype) (x)), "n" (0))
569 #define COUNT_LEADING_ZEROS_0 32
573 #if defined (__m88000__) && W_TYPE_SIZE == 32
574 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
575 __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3" \
576 : "=r" (sh), "=&r" (sl) \
577 : "%rJ" (ah), "rJ" (bh), "%rJ" (al), "rJ" (bl))
578 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
579 __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3" \
580 : "=r" (sh), "=&r" (sl) \
581 : "rJ" (ah), "rJ" (bh), "rJ" (al), "rJ" (bl))
582 #define count_leading_zeros(count, x) \
585 __asm__ ("ff1 %0,%1" : "=r" (__cbtmp) : "r" (x)); \
586 (count) = __cbtmp ^ 31; \
588 #define COUNT_LEADING_ZEROS_0 63 /* sic */
589 #if defined (__m88110__)
590 #define umul_ppmm(wh, wl, u, v) \
592 union {UDItype __ll; \
593 struct {USItype __h, __l;} __i; \
595 __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \
596 (wh) = __x.__i.__h; \
597 (wl) = __x.__i.__l; \
599 #define udiv_qrnnd(q, r, n1, n0, d) \
600 ({union {UDItype __ll; \
601 struct {USItype __h, __l;} __i; \
603 __x.__i.__h = (n1); __x.__i.__l = (n0); \
604 __asm__ ("divu.d %0,%1,%2" \
605 : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \
606 (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
611 #define UDIV_TIME 150
612 #endif /* __m88110__ */
613 #endif /* __m88000__ */
615 #if defined (__mips) && W_TYPE_SIZE == 32
616 #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
617 #define umul_ppmm(w1, w0, u, v) \
618 __asm__ ("multu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
620 #define umul_ppmm(w1, w0, u, v) \
621 __asm__ ("multu %2,%3\n\tmflo %0\n\tmfhi %1" \
622 : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
625 #define UDIV_TIME 100
628 #if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64
629 #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
630 #define umul_ppmm(w1, w0, u, v) \
631 __asm__ ("dmultu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
633 #define umul_ppmm(w1, w0, u, v) \
634 __asm__ ("dmultu %2,%3\n\tmflo %0\n\tmfhi %1" \
635 : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
638 #define UDIV_TIME 140
641 #if defined (__ns32000__) && W_TYPE_SIZE == 32
642 #define umul_ppmm(w1, w0, u, v) \
643 ({union {UDItype __ll; \
644 struct {USItype __l, __h;} __i; \
646 __asm__ ("meid %2,%0" \
648 : "%0" ((USItype)(u)), "g" ((USItype)(v))); \
649 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
650 #define __umulsidi3(u, v) \
652 __asm__ ("meid %2,%0" \
654 : "%0" ((USItype)(u)), "g" ((USItype)(v))); \
656 #define udiv_qrnnd(q, r, n1, n0, d) \
657 ({union {UDItype __ll; \
658 struct {USItype __l, __h;} __i; \
660 __x.__i.__h = (n1); __x.__i.__l = (n0); \
661 __asm__ ("deid %2,%0" \
663 : "0" (__x.__ll), "g" ((USItype)(d))); \
664 (r) = __x.__i.__l; (q) = __x.__i.__h; })
665 #define count_trailing_zeros(count,x) \
667 __asm__ ("ffsd %2,%0" \
668 : "=r" ((USItype) (count)) \
669 : "0" ((USItype) 0), "r" ((USItype) (x))); \
671 #endif /* __ns32000__ */
673 /* We should test _IBMR2 here when we add assembly support for the system
675 #if (defined (_ARCH_PPC) || defined (_ARCH_PWR) || defined (__powerpc__)) && W_TYPE_SIZE == 32
676 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
678 if (__builtin_constant_p (bh) && (bh) == 0) \
679 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
680 : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
681 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
682 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
683 : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
685 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
686 : "=r" (sh), "=&r" (sl) \
687 : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
689 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
691 if (__builtin_constant_p (ah) && (ah) == 0) \
692 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
693 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
694 else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
695 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
696 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
697 else if (__builtin_constant_p (bh) && (bh) == 0) \
698 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
699 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
700 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
701 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
702 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
704 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
705 : "=r" (sh), "=&r" (sl) \
706 : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
708 #define count_leading_zeros(count, x) \
709 __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
710 #define COUNT_LEADING_ZEROS_0 32
711 #if defined (_ARCH_PPC) || defined (__powerpc__)
712 #define umul_ppmm(ph, pl, m0, m1) \
714 USItype __m0 = (m0), __m1 = (m1); \
715 __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
716 (pl) = __m0 * __m1; \
719 #define smul_ppmm(ph, pl, m0, m1) \
721 SItype __m0 = (m0), __m1 = (m1); \
722 __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
723 (pl) = __m0 * __m1; \
726 #define UDIV_TIME 120
729 #define smul_ppmm(xh, xl, m0, m1) \
730 __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
732 #define sdiv_qrnnd(q, r, nh, nl, d) \
733 __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
734 #define UDIV_TIME 100
736 #endif /* 32-bit POWER architecture variants. */
738 /* We should test _IBMR2 here when we add assembly support for the system
740 #if (defined (_ARCH_PPC) || defined (__powerpc__)) && W_TYPE_SIZE == 64
741 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
743 if (__builtin_constant_p (bh) && (bh) == 0) \
744 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
745 : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
746 else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
747 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
748 : "=r" (sh), "=&r" (sl) : "%r" (ah), "%r" (al), "rI" (bl));\
750 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
751 : "=r" (sh), "=&r" (sl) \
752 : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
754 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
756 if (__builtin_constant_p (ah) && (ah) == 0) \
757 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
758 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
759 else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
760 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
761 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
762 else if (__builtin_constant_p (bh) && (bh) == 0) \
763 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
764 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
765 else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
766 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
767 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
769 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
770 : "=r" (sh), "=&r" (sl) \
771 : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
773 #define count_leading_zeros(count, x) \
774 __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
775 #define COUNT_LEADING_ZEROS_0 64
776 #define umul_ppmm(ph, pl, m0, m1) \
778 UDItype __m0 = (m0), __m1 = (m1); \
779 __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
780 (pl) = __m0 * __m1; \
783 #define smul_ppmm(ph, pl, m0, m1) \
785 DItype __m0 = (m0), __m1 = (m1); \
786 __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
787 (pl) = __m0 * __m1; \
789 #define SMUL_TIME 14 /* ??? */
790 #define UDIV_TIME 120 /* ??? */
791 #endif /* 64-bit PowerPC. */
793 #if defined (__pyr__) && W_TYPE_SIZE == 32
794 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
795 __asm__ ("addw %5,%1\n\taddwc %3,%0" \
796 : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl)) \
797 : "%0" ((USItype)(ah)), "g" ((USItype)(bh)), \
798 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
799 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
800 __asm__ ("subw %5,%1\n\tsubwb %3,%0" \
801 : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl)) \
802 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
803 "1" ((USItype)(al)), "g" ((USItype)(bl)))
804 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
805 #define umul_ppmm(w1, w0, u, v) \
806 ({union {UDItype __ll; \
807 struct {USItype __h, __l;} __i; \
809 __asm__ ("movw %1,%R0\n\tuemul %2,%0" \
811 : "g" ((USItype) (u)), "g" ((USItype)(v))); \
812 (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
815 #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
816 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
817 __asm__ ("a %1,%5\n\tae %0,%3" \
818 : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl)) \
819 : "%0" ((USItype)(ah)), "r" ((USItype)(bh)), \
820 "%1" ((USItype)(al)), "r" ((USItype)(bl)))
821 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
822 __asm__ ("s %1,%5\n\tse %0,%3" \
823 : "=r" ((USItype)(sh)), "=&r" ((USItype)(sl)) \
824 : "0" ((USItype)(ah)), "r" ((USItype)(bh)), \
825 "1" ((USItype)(al)), "r" ((USItype)(bl)))
826 #define smul_ppmm(ph, pl, m0, m1) \
848 : "=r" ((USItype)(ph)), "=r" ((USItype)(pl)) \
849 : "%r" ((USItype)(m0)), "r" ((USItype)(m1)) \
852 #define UDIV_TIME 200
853 #define count_leading_zeros(count, x) \
855 if ((x) >= 0x10000) \
856 __asm__ ("clz %0,%1" \
857 : "=r" ((USItype)(count)) : "r" ((USItype)(x) >> 16)); \
860 __asm__ ("clz %0,%1" \
861 : "=r" ((USItype)(count)) : "r" ((USItype)(x))); \
867 #if defined (__sh2__) && W_TYPE_SIZE == 32
868 #define umul_ppmm(w1, w0, u, v) \
869 __asm__ ("dmulu.l %2,%3\n\tsts macl,%1\n\tsts mach,%0" \
870 : "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "macl", "mach")
874 #if defined (__sparc__) && W_TYPE_SIZE == 32
875 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
876 __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \
877 : "=r" (sh), "=&r" (sl) \
878 : "%rJ" (ah), "rI" (bh),"%rJ" (al), "rI" (bl) \
880 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
881 __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \
882 : "=r" (sh), "=&r" (sl) \
883 : "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl) \
885 #if defined (__sparc_v9__) || defined (__sparcv9)
886 /* Perhaps we should use floating-point operations here? */
888 /* Triggers a bug making mpz/tests/t-gcd.c fail.
889 Perhaps we simply need explicitly zero-extend the inputs? */
890 #define umul_ppmm(w1, w0, u, v) \
891 __asm__ ("mulx %2,%3,%%g1; srl %%g1,0,%1; srlx %%g1,32,%0" : \
892 "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "g1")
894 /* Use v8 umul until above bug is fixed. */
895 #define umul_ppmm(w1, w0, u, v) \
896 __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
898 /* Use a plain v8 divide for v9. */
899 #define udiv_qrnnd(q, r, n1, n0, d) \
902 __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
903 : "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
904 (r) = (n0) - __q * (d); \
908 #if defined (__sparc_v8__)
909 /* Don't match immediate range because, 1) it is not often useful,
910 2) the 'I' flag thinks of the range as a 13 bit signed interval,
911 while we want to match a 13 bit interval, sign extended to 32 bits,
912 but INTERPRETED AS UNSIGNED. */
913 #define umul_ppmm(w1, w0, u, v) \
914 __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
916 #ifndef SUPERSPARC /* SuperSPARC's udiv only handles 53 bit dividends */
917 #define udiv_qrnnd(q, r, n1, n0, d) \
920 __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
921 : "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
922 (r) = (n0) - __q * (d); \
927 #define UDIV_TIME 60 /* SuperSPARC timing */
928 #endif /* SUPERSPARC */
929 #else /* ! __sparc_v8__ */
930 #if defined (__sparclite__)
931 /* This has hardware multiply but not divide. It also has two additional
932 instructions scan (ffs from high bit) and divscc. */
933 #define umul_ppmm(w1, w0, u, v) \
934 __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
936 #define udiv_qrnnd(q, r, n1, n0, d) \
937 __asm__ ("! Inlined udiv_qrnnd\n"
938 "wr %%g0,%2,%%y ! Not a delayed write for sparclite\n"
940 "divscc %3,%4,%%g1\n"
941 "divscc %%g1,%4,%%g1\n"
942 "divscc %%g1,%4,%%g1\n"
943 "divscc %%g1,%4,%%g1\n"
944 "divscc %%g1,%4,%%g1\n"
945 "divscc %%g1,%4,%%g1\n"
946 "divscc %%g1,%4,%%g1\n"
947 "divscc %%g1,%4,%%g1\n"
948 "divscc %%g1,%4,%%g1\n"
949 "divscc %%g1,%4,%%g1\n"
950 "divscc %%g1,%4,%%g1\n"
951 "divscc %%g1,%4,%%g1\n"
952 "divscc %%g1,%4,%%g1\n"
953 "divscc %%g1,%4,%%g1\n"
954 "divscc %%g1,%4,%%g1\n"
955 "divscc %%g1,%4,%%g1\n"
956 "divscc %%g1,%4,%%g1\n"
957 "divscc %%g1,%4,%%g1\n"
958 "divscc %%g1,%4,%%g1\n"
959 "divscc %%g1,%4,%%g1\n"
960 "divscc %%g1,%4,%%g1\n"
961 "divscc %%g1,%4,%%g1\n"
962 "divscc %%g1,%4,%%g1\n"
963 "divscc %%g1,%4,%%g1\n"
964 "divscc %%g1,%4,%%g1\n"
965 "divscc %%g1,%4,%%g1\n"
966 "divscc %%g1,%4,%%g1\n"
967 "divscc %%g1,%4,%%g1\n"
968 "divscc %%g1,%4,%%g1\n"
969 "divscc %%g1,%4,%%g1\n"
970 "divscc %%g1,%4,%%g1\n"
971 "divscc %%g1,%4,%0\n"
975 "1: ! End of inline udiv_qrnnd" \
976 : "=r" (q), "=r" (r) : "r" (n1), "r" (n0), "rI" (d)
977 : "%g1" __AND_CLOBBER_CC)
979 #define count_leading_zeros(count, x) \
980 __asm__ ("scan %1,0,%0" : "=r" (x) : "r" (count))
981 /* Early sparclites return 63 for an argument of 0, but they warn that future
982 implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
984 #endif /* __sparclite__ */
985 #endif /* __sparc_v8__ */
986 #endif /* __sparc_v9__ */
987 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
989 #define umul_ppmm(w1, w0, u, v) \
990 __asm__ ("! Inlined umul_ppmm\n"
991 "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n"
992 "sra %3,31,%%g2 ! Don't move this insn\n"
993 "and %2,%%g2,%%g2 ! Don't move this insn\n"
994 "andcc %%g0,0,%%g1 ! Don't move this insn\n"
995 "mulscc %%g1,%3,%%g1\n"
996 "mulscc %%g1,%3,%%g1\n"
997 "mulscc %%g1,%3,%%g1\n"
998 "mulscc %%g1,%3,%%g1\n"
999 "mulscc %%g1,%3,%%g1\n"
1000 "mulscc %%g1,%3,%%g1\n"
1001 "mulscc %%g1,%3,%%g1\n"
1002 "mulscc %%g1,%3,%%g1\n"
1003 "mulscc %%g1,%3,%%g1\n"
1004 "mulscc %%g1,%3,%%g1\n"
1005 "mulscc %%g1,%3,%%g1\n"
1006 "mulscc %%g1,%3,%%g1\n"
1007 "mulscc %%g1,%3,%%g1\n"
1008 "mulscc %%g1,%3,%%g1\n"
1009 "mulscc %%g1,%3,%%g1\n"
1010 "mulscc %%g1,%3,%%g1\n"
1011 "mulscc %%g1,%3,%%g1\n"
1012 "mulscc %%g1,%3,%%g1\n"
1013 "mulscc %%g1,%3,%%g1\n"
1014 "mulscc %%g1,%3,%%g1\n"
1015 "mulscc %%g1,%3,%%g1\n"
1016 "mulscc %%g1,%3,%%g1\n"
1017 "mulscc %%g1,%3,%%g1\n"
1018 "mulscc %%g1,%3,%%g1\n"
1019 "mulscc %%g1,%3,%%g1\n"
1020 "mulscc %%g1,%3,%%g1\n"
1021 "mulscc %%g1,%3,%%g1\n"
1022 "mulscc %%g1,%3,%%g1\n"
1023 "mulscc %%g1,%3,%%g1\n"
1024 "mulscc %%g1,%3,%%g1\n"
1025 "mulscc %%g1,%3,%%g1\n"
1026 "mulscc %%g1,%3,%%g1\n"
1027 "mulscc %%g1,0,%%g1\n"
1028 "add %%g1,%%g2,%0\n"
1030 : "=r" (w1), "=r" (w0) : "%rI" (u), "r" (v) \
1031 : "%g1", "%g2" __AND_CLOBBER_CC)
1032 #define UMUL_TIME 39 /* 39 instructions */
1035 #ifndef LONGLONG_STANDALONE
1036 #define udiv_qrnnd(q, r, n1, n0, d) \
1038 (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d)); \
1041 extern USItype __MPN(udiv_qrnnd) _PROTO ((USItype *, USItype, USItype, USItype));
1043 #define UDIV_TIME 140
1045 #endif /* LONGLONG_STANDALONE */
1046 #endif /* udiv_qrnnd */
1047 #endif /* __sparc__ */
1049 #if defined (__vax__) && W_TYPE_SIZE == 32
1050 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1051 __asm__ ("addl2 %5,%1\n\tadwc %3,%0" \
1052 : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl)) \
1053 : "%0" ((USItype)(ah)), "g" ((USItype)(bh)), \
1054 "%1" ((USItype)(al)), "g" ((USItype)(bl)))
1055 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1056 __asm__ ("subl2 %5,%1\n\tsbwc %3,%0" \
1057 : "=g" ((USItype)(sh)), "=&g" ((USItype)(sl)) \
1058 : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
1059 "1" ((USItype)(al)), "g" ((USItype)(bl)))
1060 #define smul_ppmm(xh, xl, m0, m1) \
1062 union {UDItype __ll; \
1063 struct {USItype __l, __h;} __i; \
1065 USItype __m0 = (m0), __m1 = (m1); \
1066 __asm__ ("emul %1,%2,$0,%0" \
1067 : "=g" (__x.__ll) : "g" (__m0), "g" (__m1)); \
1068 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
1070 #define sdiv_qrnnd(q, r, n1, n0, d) \
1072 union {DItype __ll; \
1073 struct {SItype __l, __h;} __i; \
1075 __x.__i.__h = n1; __x.__i.__l = n0; \
1076 __asm__ ("ediv %3,%2,%0,%1" \
1077 : "=g" (q), "=g" (r) : "g" (__x.__ll), "g" (d)); \
1079 #endif /* __vax__ */
1081 #if defined (__z8000__) && W_TYPE_SIZE == 16
1082 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1083 __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
1084 : "=r" ((unsigned int)(sh)), "=&r" ((unsigned int)(sl)) \
1085 : "%0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)), \
1086 "%1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
1087 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1088 __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
1089 : "=r" ((unsigned int)(sh)), "=&r" ((unsigned int)(sl)) \
1090 : "0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)), \
1091 "1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
1092 #define umul_ppmm(xh, xl, m0, m1) \
1094 union {long int __ll; \
1095 struct {unsigned int __h, __l;} __i; \
1097 unsigned int __m0 = (m0), __m1 = (m1); \
1098 __asm__ ("mult %S0,%H3" \
1099 : "=r" (__x.__i.__h), "=r" (__x.__i.__l) \
1100 : "%1" (m0), "rQR" (m1)); \
1101 (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
1102 (xh) += ((((signed int) __m0 >> 15) & __m1) \
1103 + (((signed int) __m1 >> 15) & __m0)); \
1105 #endif /* __z8000__ */
1107 #endif /* __GNUC__ */
1110 #if !defined (umul_ppmm) && defined (__umulsidi3)
1111 #define umul_ppmm(ph, pl, m0, m1) \
1113 UDWtype __ll = __umulsidi3 (m0, m1); \
1114 ph = (UWtype) (__ll >> W_TYPE_SIZE); \
1115 pl = (UWtype) __ll; \
1119 #if !defined (__umulsidi3)
1120 #define __umulsidi3(u, v) \
1121 ({UWtype __hi, __lo; \
1122 umul_ppmm (__hi, __lo, u, v); \
1123 ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
1127 /* Note the prototypes are under !define(umul_ppmm) etc too, since the HPPA
1128 versions above are different and we don't want to conflict. */
1130 #if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm
1131 #define mpn_umul_ppmm __MPN(umul_ppmm)
1132 extern mp_limb_t mpn_umul_ppmm _PROTO ((mp_limb_t *, mp_limb_t, mp_limb_t));
1133 #define umul_ppmm(wh, wl, u, v) \
1135 mp_limb_t __umul_ppmm__p0; \
1136 (wh) = __MPN(umul_ppmm) (&__umul_ppmm__p0, \
1137 (mp_limb_t) (u), (mp_limb_t) (v)); \
1138 (wl) = __umul_ppmm__p0; \
1142 #if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd
1143 #define mpn_udiv_qrnnd __MPN(udiv_qrnnd)
1144 extern mp_limb_t mpn_udiv_qrnnd _PROTO ((mp_limb_t *,
1145 mp_limb_t, mp_limb_t, mp_limb_t));
1146 #define udiv_qrnnd(q, r, n1, n0, d) \
1148 mp_limb_t __udiv_qrnnd__r; \
1149 (q) = mpn_udiv_qrnnd (&__udiv_qrnnd__r, \
1150 (mp_limb_t) (n1), (mp_limb_t) (n0), (mp_limb_t) d); \
1151 (r) = __udiv_qrnnd__r; \
1156 /* If this machine has no inline assembler, use C macros. */
1158 #if !defined (add_ssaaaa)
1159 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1162 __x = (al) + (bl); \
1163 (sh) = (ah) + (bh) + (__x < (al)); \
1168 #if !defined (sub_ddmmss)
1169 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1172 __x = (al) - (bl); \
1173 (sh) = (ah) - (bh) - (__x > (al)); \
1178 /* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
1180 #if !defined (umul_ppmm) && defined (smul_ppmm)
1181 #define umul_ppmm(w1, w0, u, v) \
1184 UWtype __xm0 = (u), __xm1 = (v); \
1185 smul_ppmm (__w1, w0, __xm0, __xm1); \
1186 (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
1187 + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
1191 /* If we still don't have umul_ppmm, define it using plain C. */
1192 #if !defined (umul_ppmm)
1193 #define umul_ppmm(w1, w0, u, v) \
1195 UWtype __x0, __x1, __x2, __x3; \
1196 UHWtype __ul, __vl, __uh, __vh; \
1197 UWtype __u = (u), __v = (v); \
1199 __ul = __ll_lowpart (__u); \
1200 __uh = __ll_highpart (__u); \
1201 __vl = __ll_lowpart (__v); \
1202 __vh = __ll_highpart (__v); \
1204 __x0 = (UWtype) __ul * __vl; \
1205 __x1 = (UWtype) __ul * __vh; \
1206 __x2 = (UWtype) __uh * __vl; \
1207 __x3 = (UWtype) __uh * __vh; \
1209 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1210 __x1 += __x2; /* but this indeed can */ \
1211 if (__x1 < __x2) /* did we get it? */ \
1212 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1214 (w1) = __x3 + __ll_highpart (__x1); \
1215 (w0) = (__x1 << W_TYPE_SIZE/2) + __ll_lowpart (__x0); \
1219 /* If we don't have smul_ppmm, define it using umul_ppmm (which surely will
1220 exist in one form or another. */
1221 #if !defined (smul_ppmm)
1222 #define smul_ppmm(w1, w0, u, v) \
1225 UWtype __xm0 = (u), __xm1 = (v); \
1226 umul_ppmm (__w1, w0, __xm0, __xm1); \
1227 (w1) = __w1 - (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
1228 - (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
1232 /* Define this unconditionally, so it can be used for debugging. */
1233 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
1235 UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
1236 __d1 = __ll_highpart (d); \
1237 __d0 = __ll_lowpart (d); \
1239 __q1 = (n1) / __d1; \
1240 __r1 = (n1) - __q1 * __d1; \
1241 __m = (UWtype) __q1 * __d0; \
1242 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1245 __q1--, __r1 += (d); \
1246 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1248 __q1--, __r1 += (d); \
1252 __q0 = __r1 / __d1; \
1253 __r0 = __r1 - __q0 * __d1; \
1254 __m = (UWtype) __q0 * __d0; \
1255 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1258 __q0--, __r0 += (d); \
1261 __q0--, __r0 += (d); \
1265 (q) = (UWtype) __q1 * __ll_B | __q0; \
1269 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1270 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1271 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1272 #define udiv_qrnnd(q, r, nh, nl, d) \
1275 (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \
1280 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1281 #if !defined (udiv_qrnnd)
1282 #define UDIV_NEEDS_NORMALIZATION 1
1283 #define udiv_qrnnd __udiv_qrnnd_c
1286 #if !defined (count_leading_zeros)
1291 unsigned char __clz_tab[];
1292 #define count_leading_zeros(count, x) \
1294 UWtype __xr = (x); \
1297 if (W_TYPE_SIZE <= 32) \
1299 __a = __xr < ((UWtype) 1 << 2*__BITS4) \
1300 ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4) \
1301 : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 : 3*__BITS4);\
1305 for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
1306 if (((__xr >> __a) & 0xff) != 0) \
1310 (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1312 /* This version gives a well-defined value for zero. */
1313 #define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
1314 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
1317 #if !defined (count_trailing_zeros)
1318 /* Define count_trailing_zeros using count_leading_zeros. The latter might be
1319 defined in asm, but if it is not, the C version above is good enough. */
1320 #define count_trailing_zeros(count, x) \
1322 UWtype __ctz_x = (x); \
1324 count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
1325 (count) = W_TYPE_SIZE - 1 - __ctz_c; \
1329 #ifndef UDIV_NEEDS_NORMALIZATION
1330 #define UDIV_NEEDS_NORMALIZATION 0
1333 /* Give defaults for UMUL_TIME and UDIV_TIME. */
1339 #define UDIV_TIME UMUL_TIME
1342 /* count_trailing_zeros is often on the slow side, so make that the default */
1343 #ifndef COUNT_TRAILING_ZEROS_TIME
1344 #define COUNT_TRAILING_ZEROS_TIME 15 /* cycles */