--- /dev/null
+/* mpn_get_str -- Convert a MSIZE long limb vector pointed to by MPTR
+ to a printable string in STR in base BASE.
+
+Copyright (C) 1991, 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+/* Convert the limb vector pointed to by MPTR and MSIZE long to a
+ char array, using base BASE for the result array. Store the
+ result in the character array STR. STR must point to an array with
+ space for the largest possible number represented by a MSIZE long
+ limb vector + 1 extra character.
+
+ The result is NOT in Ascii, to convert it to printable format, add
+ '0' or 'A' depending on the base and range.
+
+ Return the number of digits in the result string.
+ This may include some leading zeros.
+
+ The limb vector pointed to by MPTR is clobbered. */
+
+size_t
+mpn_get_str (str, base, mptr, msize)
+ unsigned char *str;
+ int base;
+ mp_ptr mptr;
+ mp_size_t msize;
+{
+ mp_limb_t big_base;
+#if UDIV_NEEDS_NORMALIZATION || UDIV_TIME > 2 * UMUL_TIME
+ int normalization_steps;
+#endif
+#if UDIV_TIME > 2 * UMUL_TIME
+ mp_limb_t big_base_inverted;
+#endif
+ unsigned int dig_per_u;
+ mp_size_t out_len;
+ register unsigned char *s;
+
+ big_base = __mp_bases[base].big_base;
+
+ s = str;
+
+ /* Special case zero, as the code below doesn't handle it. */
+ if (msize == 0)
+ {
+ s[0] = 0;
+ return 1;
+ }
+
+ if ((base & (base - 1)) == 0)
+ {
+ /* The base is a power of 2. Make conversion from most
+ significant side. */
+ mp_limb_t n1, n0;
+ register int bits_per_digit = big_base;
+ register int x;
+ register int bit_pos;
+ register int i;
+
+ n1 = mptr[msize - 1];
+ count_leading_zeros (x, n1);
+
+ /* BIT_POS should be R when input ends in least sign. nibble,
+ R + bits_per_digit * n when input ends in n:th least significant
+ nibble. */
+
+ {
+ int bits;
+
+ bits = BITS_PER_MP_LIMB * msize - x;
+ x = bits % bits_per_digit;
+ if (x != 0)
+ bits += bits_per_digit - x;
+ bit_pos = bits - (msize - 1) * BITS_PER_MP_LIMB;
+ }
+
+ /* Fast loop for bit output. */
+ i = msize - 1;
+ for (;;)
+ {
+ bit_pos -= bits_per_digit;
+ while (bit_pos >= 0)
+ {
+ *s++ = (n1 >> bit_pos) & ((1 << bits_per_digit) - 1);
+ bit_pos -= bits_per_digit;
+ }
+ i--;
+ if (i < 0)
+ break;
+ n0 = (n1 << -bit_pos) & ((1 << bits_per_digit) - 1);
+ n1 = mptr[i];
+ bit_pos += BITS_PER_MP_LIMB;
+ *s++ = n0 | (n1 >> bit_pos);
+ }
+
+ *s = 0;
+
+ return s - str;
+ }
+ else
+ {
+ /* General case. The base is not a power of 2. Make conversion
+ from least significant end. */
+
+ /* If udiv_qrnnd only handles divisors with the most significant bit
+ set, prepare BIG_BASE for being a divisor by shifting it to the
+ left exactly enough to set the most significant bit. */
+#if UDIV_NEEDS_NORMALIZATION || UDIV_TIME > 2 * UMUL_TIME
+ count_leading_zeros (normalization_steps, big_base);
+ big_base <<= normalization_steps;
+#if UDIV_TIME > 2 * UMUL_TIME
+ /* Get the fixed-point approximation to 1/(BIG_BASE << NORMALIZATION_STEPS). */
+ big_base_inverted = __mp_bases[base].big_base_inverted;
+#endif
+#endif
+
+ dig_per_u = __mp_bases[base].chars_per_limb;
+ out_len = ((size_t) msize * BITS_PER_MP_LIMB
+ * __mp_bases[base].chars_per_bit_exactly) + 1;
+ s += out_len;
+
+ while (msize != 0)
+ {
+ int i;
+ mp_limb_t n0, n1;
+
+#if UDIV_NEEDS_NORMALIZATION || UDIV_TIME > 2 * UMUL_TIME
+ /* If we shifted BIG_BASE above, shift the dividend too, to get
+ the right quotient. We need to do this every loop,
+ since the intermediate quotients are OK, but the quotient from
+ one turn in the loop is going to be the dividend in the
+ next turn, and the dividend needs to be up-shifted. */
+ if (normalization_steps != 0)
+ {
+ n0 = mpn_lshift (mptr, mptr, msize, normalization_steps);
+
+ /* If the shifting gave a carry out limb, store it and
+ increase the length. */
+ if (n0 != 0)
+ {
+ mptr[msize] = n0;
+ msize++;
+ }
+ }
+#endif
+
+ /* Divide the number at TP with BIG_BASE to get a quotient and a
+ remainder. The remainder is our new digit in base BIG_BASE. */
+ i = msize - 1;
+ n1 = mptr[i];
+
+ if (n1 >= big_base)
+ n1 = 0;
+ else
+ {
+ msize--;
+ i--;
+ }
+
+ for (; i >= 0; i--)
+ {
+ n0 = mptr[i];
+#if UDIV_TIME > 2 * UMUL_TIME
+ udiv_qrnnd_preinv (mptr[i], n1, n1, n0, big_base, big_base_inverted);
+#else
+ udiv_qrnnd (mptr[i], n1, n1, n0, big_base);
+#endif
+ }
+
+#if UDIV_NEEDS_NORMALIZATION || UDIV_TIME > 2 * UMUL_TIME
+ /* If we shifted above (at previous UDIV_NEEDS_NORMALIZATION tests)
+ the remainder will be up-shifted here. Compensate. */
+ n1 >>= normalization_steps;
+#endif
+
+ /* Convert N1 from BIG_BASE to a string of digits in BASE
+ using single precision operations. */
+ for (i = dig_per_u - 1; i >= 0; i--)
+ {
+ *--s = n1 % base;
+ n1 /= base;
+ if (n1 == 0 && msize == 0)
+ break;
+ }
+ }
+
+ while (s != str)
+ *--s = 0;
+ return out_len;
+ }
+}
--- /dev/null
+/* gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#define BITS_PER_MP_LIMB 32
+#define BYTES_PER_MP_LIMB 4
+#define BITS_PER_LONGINT 32
+#define BITS_PER_INT 32
+#define BITS_PER_SHORTINT 16
+#define BITS_PER_CHAR 8
--- /dev/null
+/* mpn_hamdist --
+
+Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+#if defined __GNUC__
+#if defined __sparc_v9__ && BITS_PER_MP_LIMB == 64
+#define popc_limb(a) \
+ ({ \
+ DItype __res; \
+ asm ("popc %1,%0" : "=r" (__res) : "rI" (a)); \
+ __res; \
+ })
+#endif
+#endif
+
+#ifndef popc_limb
+
+/* Cool population count of a mp_limb_t.
+ You have to figure out how this works, I won't tell you! */
+
+static inline unsigned int
+popc_limb (x)
+ mp_limb_t x;
+{
+#if BITS_PER_MP_LIMB == 64
+ /* We have to go into some trouble to define these constants.
+ (For mp_limb_t being `long long'.) */
+ mp_limb_t cnst;
+ cnst = 0x55555555L | ((mp_limb_t) 0x55555555L << BITS_PER_MP_LIMB/2);
+ x = ((x & ~cnst) >> 1) + (x & cnst);
+ cnst = 0x33333333L | ((mp_limb_t) 0x33333333L << BITS_PER_MP_LIMB/2);
+ x = ((x & ~cnst) >> 2) + (x & cnst);
+ cnst = 0x0f0f0f0fL | ((mp_limb_t) 0x0f0f0f0fL << BITS_PER_MP_LIMB/2);
+ x = ((x >> 4) + x) & cnst;
+ x = ((x >> 8) + x);
+ x = ((x >> 16) + x);
+ x = ((x >> 32) + x) & 0xff;
+#endif
+#if BITS_PER_MP_LIMB == 32
+ x = ((x >> 1) & 0x55555555L) + (x & 0x55555555L);
+ x = ((x >> 2) & 0x33333333L) + (x & 0x33333333L);
+ x = ((x >> 4) + x) & 0x0f0f0f0fL;
+ x = ((x >> 8) + x);
+ x = ((x >> 16) + x) & 0xff;
+#endif
+ return x;
+}
+#endif
+
+unsigned long int
+#if __STDC__
+mpn_hamdist (mp_srcptr up, mp_srcptr vp, mp_size_t size)
+#else
+mpn_hamdist (up, vp, size)
+ register mp_srcptr up;
+ register mp_srcptr vp;
+ register mp_size_t size;
+#endif
+{
+ unsigned long int hamdist;
+ mp_size_t i;
+
+ hamdist = 0;
+ for (i = 0; i < size; i++)
+ hamdist += popc_limb (up[i] ^ vp[i]);
+
+ return hamdist;
+}
--- /dev/null
+#define _FORCE_INLINES
+#define _EXTERN_INLINE /* empty */
+#include "gmp.h"
--- /dev/null
+/* mpn_lshift -- Shift left low level.
+
+Copyright (C) 1991, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left
+ and store the USIZE least significant digits of the result at WP.
+ Return the bits shifted out from the most significant digit.
+
+ Argument constraints:
+ 1. 0 < CNT < BITS_PER_MP_LIMB
+ 2. If the result is to be written over the input, WP must be >= UP.
+*/
+
+mp_limb_t
+#if __STDC__
+mpn_lshift (register mp_ptr wp,
+ register mp_srcptr up, mp_size_t usize,
+ register unsigned int cnt)
+#else
+mpn_lshift (wp, up, usize, cnt)
+ register mp_ptr wp;
+ register mp_srcptr up;
+ mp_size_t usize;
+ register unsigned int cnt;
+#endif
+{
+ register mp_limb_t high_limb, low_limb;
+ register unsigned sh_1, sh_2;
+ register mp_size_t i;
+ mp_limb_t retval;
+
+#ifdef DEBUG
+ if (usize == 0 || cnt == 0)
+ abort ();
+#endif
+
+ sh_1 = cnt;
+#if 0
+ if (sh_1 == 0)
+ {
+ if (wp != up)
+ {
+ /* Copy from high end to low end, to allow specified input/output
+ overlapping. */
+ for (i = usize - 1; i >= 0; i--)
+ wp[i] = up[i];
+ }
+ return 0;
+ }
+#endif
+
+ wp += 1;
+ sh_2 = BITS_PER_MP_LIMB - sh_1;
+ i = usize - 1;
+ low_limb = up[i];
+ retval = low_limb >> sh_2;
+ high_limb = low_limb;
+ while (--i >= 0)
+ {
+ low_limb = up[i];
+ wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
+ high_limb = low_limb;
+ }
+ wp[i] = high_limb << sh_1;
+
+ return retval;
+}
--- /dev/null
+/* mpn_mod_1(dividend_ptr, dividend_size, divisor_limb) --
+ Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
+ Return the single-limb remainder.
+ There are no constraints on the value of the divisor.
+
+Copyright (C) 1991, 1993, 1994, Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+#ifndef UMUL_TIME
+#define UMUL_TIME 1
+#endif
+
+#ifndef UDIV_TIME
+#define UDIV_TIME UMUL_TIME
+#endif
+
+/* FIXME: We should be using invert_limb (or invert_normalized_limb)
+ here (not udiv_qrnnd). */
+
+mp_limb_t
+#if __STDC__
+mpn_mod_1 (mp_srcptr dividend_ptr, mp_size_t dividend_size,
+ mp_limb_t divisor_limb)
+#else
+mpn_mod_1 (dividend_ptr, dividend_size, divisor_limb)
+ mp_srcptr dividend_ptr;
+ mp_size_t dividend_size;
+ mp_limb_t divisor_limb;
+#endif
+{
+ mp_size_t i;
+ mp_limb_t n1, n0, r;
+ int dummy;
+
+ /* Botch: Should this be handled at all? Rely on callers? */
+ if (dividend_size == 0)
+ return 0;
+
+ /* If multiplication is much faster than division, and the
+ dividend is large, pre-invert the divisor, and use
+ only multiplications in the inner loop. */
+
+ /* This test should be read:
+ Does it ever help to use udiv_qrnnd_preinv?
+ && Does what we save compensate for the inversion overhead? */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6)
+ && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME)
+ {
+ int normalization_steps;
+
+ count_leading_zeros (normalization_steps, divisor_limb);
+ if (normalization_steps != 0)
+ {
+ mp_limb_t divisor_limb_inverted;
+
+ divisor_limb <<= normalization_steps;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ most significant bit (with weight 2**N) implicit. */
+
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (divisor_limb << 1 == 0)
+ divisor_limb_inverted = ~(mp_limb_t) 0;
+ else
+ udiv_qrnnd (divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MP_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ if (r == 0
+ && divisor_limb > ((n1 << normalization_steps)
+ | (dividend_ptr[dividend_size - 2] >> ...)))
+ ...one division less... */
+
+ for (i = dividend_size - 2; i >= 0; i--)
+ {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd_preinv (dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))),
+ divisor_limb, divisor_limb_inverted);
+ n1 = n0;
+ }
+ udiv_qrnnd_preinv (dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb, divisor_limb_inverted);
+ return r >> normalization_steps;
+ }
+ else
+ {
+ mp_limb_t divisor_limb_inverted;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ most significant bit (with weight 2**N) implicit. */
+
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (divisor_limb << 1 == 0)
+ divisor_limb_inverted = ~(mp_limb_t) 0;
+ else
+ udiv_qrnnd (divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for (; i >= 0; i--)
+ {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd_preinv (dummy, r, r,
+ n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+ }
+ }
+ else
+ {
+ if (UDIV_NEEDS_NORMALIZATION)
+ {
+ int normalization_steps;
+
+ count_leading_zeros (normalization_steps, divisor_limb);
+ if (normalization_steps != 0)
+ {
+ divisor_limb <<= normalization_steps;
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MP_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ if (r == 0
+ && divisor_limb > ((n1 << normalization_steps)
+ | (dividend_ptr[dividend_size - 2] >> ...)))
+ ...one division less... */
+
+ for (i = dividend_size - 2; i >= 0; i--)
+ {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd (dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))),
+ divisor_limb);
+ n1 = n0;
+ }
+ udiv_qrnnd (dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb);
+ return r >> normalization_steps;
+ }
+ }
+ /* No normalization needed, either because udiv_qrnnd doesn't require
+ it, or because DIVISOR_LIMB is already normalized. */
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for (; i >= 0; i--)
+ {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd (dummy, r, r, n0, divisor_limb);
+ }
+ return r;
+ }
+}
--- /dev/null
+/* mpn_mul -- Multiply two natural numbers.
+
+Copyright (C) 1991, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
+ and v (pointed to by VP, with VSIZE limbs), and store the result at
+ PRODP. USIZE + VSIZE limbs are always stored, but if the input
+ operands are normalized. Return the most significant limb of the
+ result.
+
+ NOTE: The space pointed to by PRODP is overwritten before finished
+ with U and V, so overlap is an error.
+
+ Argument constraints:
+ 1. USIZE >= VSIZE.
+ 2. PRODP != UP and PRODP != VP, i.e. the destination
+ must be distinct from the multiplier and the multiplicand. */
+
+/* If KARATSUBA_THRESHOLD is not already defined, define it to a
+ value which is good on most machines. */
+#ifndef KARATSUBA_THRESHOLD
+#define KARATSUBA_THRESHOLD 32
+#endif
+
+mp_limb_t
+#if __STDC__
+mpn_mul (mp_ptr prodp,
+ mp_srcptr up, mp_size_t usize,
+ mp_srcptr vp, mp_size_t vsize)
+#else
+mpn_mul (prodp, up, usize, vp, vsize)
+ mp_ptr prodp;
+ mp_srcptr up;
+ mp_size_t usize;
+ mp_srcptr vp;
+ mp_size_t vsize;
+#endif
+{
+ mp_ptr prod_endp = prodp + usize + vsize - 1;
+ mp_limb_t cy;
+ mp_ptr tspace;
+ TMP_DECL (marker);
+
+ if (vsize < KARATSUBA_THRESHOLD)
+ {
+ /* Handle simple cases with traditional multiplication.
+
+ This is the most critical code of the entire function. All
+ multiplies rely on this, both small and huge. Small ones arrive
+ here immediately. Huge ones arrive here as this is the base case
+ for Karatsuba's recursive algorithm below. */
+ mp_size_t i;
+ mp_limb_t cy_limb;
+ mp_limb_t v_limb;
+
+ if (vsize == 0)
+ return 0;
+
+ /* Multiply by the first limb in V separately, as the result can be
+ stored (not added) to PROD. We also avoid a loop for zeroing. */
+ v_limb = vp[0];
+ if (v_limb <= 1)
+ {
+ if (v_limb == 1)
+ MPN_COPY (prodp, up, usize);
+ else
+ MPN_ZERO (prodp, usize);
+ cy_limb = 0;
+ }
+ else
+ cy_limb = mpn_mul_1 (prodp, up, usize, v_limb);
+
+ prodp[usize] = cy_limb;
+ prodp++;
+
+ /* For each iteration in the outer loop, multiply one limb from
+ U with one limb from V, and add it to PROD. */
+ for (i = 1; i < vsize; i++)
+ {
+ v_limb = vp[i];
+ if (v_limb <= 1)
+ {
+ cy_limb = 0;
+ if (v_limb == 1)
+ cy_limb = mpn_add_n (prodp, prodp, up, usize);
+ }
+ else
+ cy_limb = mpn_addmul_1 (prodp, up, usize, v_limb);
+
+ prodp[usize] = cy_limb;
+ prodp++;
+ }
+ return cy_limb;
+ }
+
+ TMP_MARK (marker);
+
+ tspace = (mp_ptr) TMP_ALLOC (2 * vsize * BYTES_PER_MP_LIMB);
+ MPN_MUL_N_RECURSE (prodp, up, vp, vsize, tspace);
+
+ prodp += vsize;
+ up += vsize;
+ usize -= vsize;
+ if (usize >= vsize)
+ {
+ mp_ptr tp = (mp_ptr) TMP_ALLOC (2 * vsize * BYTES_PER_MP_LIMB);
+ do
+ {
+ MPN_MUL_N_RECURSE (tp, up, vp, vsize, tspace);
+ cy = mpn_add_n (prodp, prodp, tp, vsize);
+ mpn_add_1 (prodp + vsize, tp + vsize, vsize, cy);
+ prodp += vsize;
+ up += vsize;
+ usize -= vsize;
+ }
+ while (usize >= vsize);
+ }
+
+ /* True: usize < vsize. */
+
+ /* Make life simple: Recurse. */
+
+ if (usize != 0)
+ {
+ mpn_mul (tspace, vp, vsize, up, usize);
+ cy = mpn_add_n (prodp, prodp, tspace, vsize);
+ mpn_add_1 (prodp + vsize, tspace + vsize, usize, cy);
+ }
+
+ TMP_FREE (marker);
+ return *prod_endp;
+}
--- /dev/null
+/* mpn_mul_1 -- Multiply a limb vector with a single limb and
+ store the product in a second limb vector.
+
+Copyright (C) 1991, 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+mp_limb_t
+mpn_mul_1 (res_ptr, s1_ptr, s1_size, s2_limb)
+ register mp_ptr res_ptr;
+ register mp_srcptr s1_ptr;
+ mp_size_t s1_size;
+ register mp_limb_t s2_limb;
+{
+ register mp_limb_t cy_limb;
+ register mp_size_t j;
+ register mp_limb_t prod_high, prod_low;
+
+ /* The loop counter and index J goes from -S1_SIZE to -1. This way
+ the loop becomes faster. */
+ j = -s1_size;
+
+ /* Offset the base pointers to compensate for the negative indices. */
+ s1_ptr -= j;
+ res_ptr -= j;
+
+ cy_limb = 0;
+ do
+ {
+ umul_ppmm (prod_high, prod_low, s1_ptr[j], s2_limb);
+
+ prod_low += cy_limb;
+ cy_limb = (prod_low < cy_limb) + prod_high;
+
+ res_ptr[j] = prod_low;
+ }
+ while (++j != 0);
+
+ return cy_limb;
+}
--- /dev/null
+/* mpn_mul_n -- Multiply two natural numbers of length n.
+
+Copyright (C) 1991, 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
+ both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
+ always stored. Return the most significant limb.
+
+ Argument constraints:
+ 1. PRODP != UP and PRODP != VP, i.e. the destination
+ must be distinct from the multiplier and the multiplicand. */
+
+/* If KARATSUBA_THRESHOLD is not already defined, define it to a
+ value which is good on most machines. */
+#ifndef KARATSUBA_THRESHOLD
+#define KARATSUBA_THRESHOLD 32
+#endif
+
+/* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */
+#if KARATSUBA_THRESHOLD < 2
+#undef KARATSUBA_THRESHOLD
+#define KARATSUBA_THRESHOLD 2
+#endif
+
+/* Handle simple cases with traditional multiplication.
+
+ This is the most critical code of multiplication. All multiplies rely
+ on this, both small and huge. Small ones arrive here immediately. Huge
+ ones arrive here as this is the base case for Karatsuba's recursive
+ algorithm below. */
+
+void
+#if __STDC__
+impn_mul_n_basecase (mp_ptr prodp, mp_srcptr up, mp_srcptr vp, mp_size_t size)
+#else
+impn_mul_n_basecase (prodp, up, vp, size)
+ mp_ptr prodp;
+ mp_srcptr up;
+ mp_srcptr vp;
+ mp_size_t size;
+#endif
+{
+ mp_size_t i;
+ mp_limb_t cy_limb;
+ mp_limb_t v_limb;
+
+ /* Multiply by the first limb in V separately, as the result can be
+ stored (not added) to PROD. We also avoid a loop for zeroing. */
+ v_limb = vp[0];
+ if (v_limb <= 1)
+ {
+ if (v_limb == 1)
+ MPN_COPY (prodp, up, size);
+ else
+ MPN_ZERO (prodp, size);
+ cy_limb = 0;
+ }
+ else
+ cy_limb = mpn_mul_1 (prodp, up, size, v_limb);
+
+ prodp[size] = cy_limb;
+ prodp++;
+
+ /* For each iteration in the outer loop, multiply one limb from
+ U with one limb from V, and add it to PROD. */
+ for (i = 1; i < size; i++)
+ {
+ v_limb = vp[i];
+ if (v_limb <= 1)
+ {
+ cy_limb = 0;
+ if (v_limb == 1)
+ cy_limb = mpn_add_n (prodp, prodp, up, size);
+ }
+ else
+ cy_limb = mpn_addmul_1 (prodp, up, size, v_limb);
+
+ prodp[size] = cy_limb;
+ prodp++;
+ }
+}
+
+void
+#if __STDC__
+impn_mul_n (mp_ptr prodp,
+ mp_srcptr up, mp_srcptr vp, mp_size_t size, mp_ptr tspace)
+#else
+impn_mul_n (prodp, up, vp, size, tspace)
+ mp_ptr prodp;
+ mp_srcptr up;
+ mp_srcptr vp;
+ mp_size_t size;
+ mp_ptr tspace;
+#endif
+{
+ if ((size & 1) != 0)
+ {
+ /* The size is odd, the code code below doesn't handle that.
+ Multiply the least significant (size - 1) limbs with a recursive
+ call, and handle the most significant limb of S1 and S2
+ separately. */
+ /* A slightly faster way to do this would be to make the Karatsuba
+ code below behave as if the size were even, and let it check for
+ odd size in the end. I.e., in essence move this code to the end.
+ Doing so would save us a recursive call, and potentially make the
+ stack grow a lot less. */
+
+ mp_size_t esize = size - 1; /* even size */
+ mp_limb_t cy_limb;
+
+ MPN_MUL_N_RECURSE (prodp, up, vp, esize, tspace);
+ cy_limb = mpn_addmul_1 (prodp + esize, up, esize, vp[esize]);
+ prodp[esize + esize] = cy_limb;
+ cy_limb = mpn_addmul_1 (prodp + esize, vp, size, up[esize]);
+
+ prodp[esize + size] = cy_limb;
+ }
+ else
+ {
+ /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
+
+ Split U in two pieces, U1 and U0, such that
+ U = U0 + U1*(B**n),
+ and V in V1 and V0, such that
+ V = V0 + V1*(B**n).
+
+ UV is then computed recursively using the identity
+
+ 2n n n n
+ UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
+ 1 1 1 0 0 1 0 0
+
+ Where B = 2**BITS_PER_MP_LIMB. */
+
+ mp_size_t hsize = size >> 1;
+ mp_limb_t cy;
+ int negflg;
+
+ /*** Product H. ________________ ________________
+ |_____U1 x V1____||____U0 x V0_____| */
+ /* Put result in upper part of PROD and pass low part of TSPACE
+ as new TSPACE. */
+ MPN_MUL_N_RECURSE (prodp + size, up + hsize, vp + hsize, hsize, tspace);
+
+ /*** Product M. ________________
+ |_(U1-U0)(V0-V1)_| */
+ if (mpn_cmp (up + hsize, up, hsize) >= 0)
+ {
+ mpn_sub_n (prodp, up + hsize, up, hsize);
+ negflg = 0;
+ }
+ else
+ {
+ mpn_sub_n (prodp, up, up + hsize, hsize);
+ negflg = 1;
+ }
+ if (mpn_cmp (vp + hsize, vp, hsize) >= 0)
+ {
+ mpn_sub_n (prodp + hsize, vp + hsize, vp, hsize);
+ negflg ^= 1;
+ }
+ else
+ {
+ mpn_sub_n (prodp + hsize, vp, vp + hsize, hsize);
+ /* No change of NEGFLG. */
+ }
+ /* Read temporary operands from low part of PROD.
+ Put result in low part of TSPACE using upper part of TSPACE
+ as new TSPACE. */
+ MPN_MUL_N_RECURSE (tspace, prodp, prodp + hsize, hsize, tspace + size);
+
+ /*** Add/copy product H. */
+ MPN_COPY (prodp + hsize, prodp + size, hsize);
+ cy = mpn_add_n (prodp + size, prodp + size, prodp + size + hsize, hsize);
+
+ /*** Add product M (if NEGFLG M is a negative number). */
+ if (negflg)
+ cy -= mpn_sub_n (prodp + hsize, prodp + hsize, tspace, size);
+ else
+ cy += mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
+
+ /*** Product L. ________________ ________________
+ |________________||____U0 x V0_____| */
+ /* Read temporary operands from low part of PROD.
+ Put result in low part of TSPACE using upper part of TSPACE
+ as new TSPACE. */
+ MPN_MUL_N_RECURSE (tspace, up, vp, hsize, tspace + size);
+
+ /*** Add/copy Product L (twice). */
+
+ cy += mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
+ if (cy)
+ mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
+
+ MPN_COPY (prodp, tspace, hsize);
+ cy = mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
+ if (cy)
+ mpn_add_1 (prodp + size, prodp + size, size, 1);
+ }
+}
+
+void
+#if __STDC__
+impn_sqr_n_basecase (mp_ptr prodp, mp_srcptr up, mp_size_t size)
+#else
+impn_sqr_n_basecase (prodp, up, size)
+ mp_ptr prodp;
+ mp_srcptr up;
+ mp_size_t size;
+#endif
+{
+ mp_size_t i;
+ mp_limb_t cy_limb;
+ mp_limb_t v_limb;
+
+ /* Multiply by the first limb in V separately, as the result can be
+ stored (not added) to PROD. We also avoid a loop for zeroing. */
+ v_limb = up[0];
+ if (v_limb <= 1)
+ {
+ if (v_limb == 1)
+ MPN_COPY (prodp, up, size);
+ else
+ MPN_ZERO (prodp, size);
+ cy_limb = 0;
+ }
+ else
+ cy_limb = mpn_mul_1 (prodp, up, size, v_limb);
+
+ prodp[size] = cy_limb;
+ prodp++;
+
+ /* For each iteration in the outer loop, multiply one limb from
+ U with one limb from V, and add it to PROD. */
+ for (i = 1; i < size; i++)
+ {
+ v_limb = up[i];
+ if (v_limb <= 1)
+ {
+ cy_limb = 0;
+ if (v_limb == 1)
+ cy_limb = mpn_add_n (prodp, prodp, up, size);
+ }
+ else
+ cy_limb = mpn_addmul_1 (prodp, up, size, v_limb);
+
+ prodp[size] = cy_limb;
+ prodp++;
+ }
+}
+
+void
+#if __STDC__
+impn_sqr_n (mp_ptr prodp,
+ mp_srcptr up, mp_size_t size, mp_ptr tspace)
+#else
+impn_sqr_n (prodp, up, size, tspace)
+ mp_ptr prodp;
+ mp_srcptr up;
+ mp_size_t size;
+ mp_ptr tspace;
+#endif
+{
+ if ((size & 1) != 0)
+ {
+ /* The size is odd, the code code below doesn't handle that.
+ Multiply the least significant (size - 1) limbs with a recursive
+ call, and handle the most significant limb of S1 and S2
+ separately. */
+ /* A slightly faster way to do this would be to make the Karatsuba
+ code below behave as if the size were even, and let it check for
+ odd size in the end. I.e., in essence move this code to the end.
+ Doing so would save us a recursive call, and potentially make the
+ stack grow a lot less. */
+
+ mp_size_t esize = size - 1; /* even size */
+ mp_limb_t cy_limb;
+
+ MPN_SQR_N_RECURSE (prodp, up, esize, tspace);
+ cy_limb = mpn_addmul_1 (prodp + esize, up, esize, up[esize]);
+ prodp[esize + esize] = cy_limb;
+ cy_limb = mpn_addmul_1 (prodp + esize, up, size, up[esize]);
+
+ prodp[esize + size] = cy_limb;
+ }
+ else
+ {
+ mp_size_t hsize = size >> 1;
+ mp_limb_t cy;
+
+ /*** Product H. ________________ ________________
+ |_____U1 x U1____||____U0 x U0_____| */
+ /* Put result in upper part of PROD and pass low part of TSPACE
+ as new TSPACE. */
+ MPN_SQR_N_RECURSE (prodp + size, up + hsize, hsize, tspace);
+
+ /*** Product M. ________________
+ |_(U1-U0)(U0-U1)_| */
+ if (mpn_cmp (up + hsize, up, hsize) >= 0)
+ {
+ mpn_sub_n (prodp, up + hsize, up, hsize);
+ }
+ else
+ {
+ mpn_sub_n (prodp, up, up + hsize, hsize);
+ }
+
+ /* Read temporary operands from low part of PROD.
+ Put result in low part of TSPACE using upper part of TSPACE
+ as new TSPACE. */
+ MPN_SQR_N_RECURSE (tspace, prodp, hsize, tspace + size);
+
+ /*** Add/copy product H. */
+ MPN_COPY (prodp + hsize, prodp + size, hsize);
+ cy = mpn_add_n (prodp + size, prodp + size, prodp + size + hsize, hsize);
+
+ /*** Add product M (if NEGFLG M is a negative number). */
+ cy -= mpn_sub_n (prodp + hsize, prodp + hsize, tspace, size);
+
+ /*** Product L. ________________ ________________
+ |________________||____U0 x U0_____| */
+ /* Read temporary operands from low part of PROD.
+ Put result in low part of TSPACE using upper part of TSPACE
+ as new TSPACE. */
+ MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size);
+
+ /*** Add/copy Product L (twice). */
+
+ cy += mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
+ if (cy)
+ mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
+
+ MPN_COPY (prodp, tspace, hsize);
+ cy = mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
+ if (cy)
+ mpn_add_1 (prodp + size, prodp + size, size, 1);
+ }
+}
+
+/* This should be made into an inline function in gmp.h. */
+inline void
+#if __STDC__
+mpn_mul_n (mp_ptr prodp, mp_srcptr up, mp_srcptr vp, mp_size_t size)
+#else
+mpn_mul_n (prodp, up, vp, size)
+ mp_ptr prodp;
+ mp_srcptr up;
+ mp_srcptr vp;
+ mp_size_t size;
+#endif
+{
+ TMP_DECL (marker);
+ TMP_MARK (marker);
+ if (up == vp)
+ {
+ if (size < KARATSUBA_THRESHOLD)
+ {
+ impn_sqr_n_basecase (prodp, up, size);
+ }
+ else
+ {
+ mp_ptr tspace;
+ tspace = (mp_ptr) TMP_ALLOC (2 * size * BYTES_PER_MP_LIMB);
+ impn_sqr_n (prodp, up, size, tspace);
+ }
+ }
+ else
+ {
+ if (size < KARATSUBA_THRESHOLD)
+ {
+ impn_mul_n_basecase (prodp, up, vp, size);
+ }
+ else
+ {
+ mp_ptr tspace;
+ tspace = (mp_ptr) TMP_ALLOC (2 * size * BYTES_PER_MP_LIMB);
+ impn_mul_n (prodp, up, vp, size, tspace);
+ }
+ }
+ TMP_FREE (marker);
+}
--- /dev/null
+/* mpn_perfect_square_p(u,usize) -- Return non-zero if U is a perfect square,
+ zero otherwise.
+
+Copyright (C) 1991, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+#ifndef UMUL_TIME
+#define UMUL_TIME 1
+#endif
+
+#ifndef UDIV_TIME
+#define UDIV_TIME UMUL_TIME
+#endif
+
+#if BITS_PER_MP_LIMB == 32
+#define PP 0xC0CFD797L /* 3 x 5 x 7 x 11 x 13 x ... x 29 */
+#define PP_INVERTED 0x53E5645CL
+#endif
+
+#if BITS_PER_MP_LIMB == 64
+#define PP 0xE221F97C30E94E1DL /* 3 x 5 x 7 x 11 x 13 x ... x 53 */
+#define PP_INVERTED 0x21CFE6CFC938B36BL
+#endif
+
+/* sq_res_0x100[x mod 0x100] == 1 iff x mod 0x100 is a quadratic residue
+ modulo 0x100. */
+static unsigned char const sq_res_0x100[0x100] =
+{
+ 1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+ 0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+ 1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+ 0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+ 0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+ 0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+ 0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+ 0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+};
+
+int
+#if __STDC__
+mpn_perfect_square_p (mp_srcptr up, mp_size_t usize)
+#else
+mpn_perfect_square_p (up, usize)
+ mp_srcptr up;
+ mp_size_t usize;
+#endif
+{
+ mp_limb_t rem;
+ mp_ptr root_ptr;
+ int res;
+ TMP_DECL (marker);
+
+ /* The first test excludes 55/64 (85.9%) of the perfect square candidates
+ in O(1) time. */
+ if ((sq_res_0x100[(unsigned int) up[0] % 0x100] & 1) == 0)
+ return 0;
+
+#if defined (PP)
+ /* The second test excludes 30652543/30808063 (99.5%) of the remaining
+ perfect square candidates in O(n) time. */
+
+ /* Firstly, compute REM = A mod PP. */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6))
+ rem = mpn_preinv_mod_1 (up, usize, (mp_limb_t) PP, (mp_limb_t) PP_INVERTED);
+ else
+ rem = mpn_mod_1 (up, usize, (mp_limb_t) PP);
+
+ /* Now decide if REM is a quadratic residue modulo the factors in PP. */
+
+ /* If A is just a few limbs, computing the square root does not take long
+ time, so things might run faster if we limit this loop according to the
+ size of A. */
+
+#if BITS_PER_MP_LIMB == 64
+ if (((0x12DD703303AED3L >> rem % 53) & 1) == 0)
+ return 0;
+ if (((0x4351B2753DFL >> rem % 47) & 1) == 0)
+ return 0;
+ if (((0x35883A3EE53L >> rem % 43) & 1) == 0)
+ return 0;
+ if (((0x1B382B50737L >> rem % 41) & 1) == 0)
+ return 0;
+ if (((0x165E211E9BL >> rem % 37) & 1) == 0)
+ return 0;
+ if (((0x121D47B7L >> rem % 31) & 1) == 0)
+ return 0;
+#endif
+ if (((0x13D122F3L >> rem % 29) & 1) == 0)
+ return 0;
+ if (((0x5335FL >> rem % 23) & 1) == 0)
+ return 0;
+ if (((0x30AF3L >> rem % 19) & 1) == 0)
+ return 0;
+ if (((0x1A317L >> rem % 17) & 1) == 0)
+ return 0;
+ if (((0x161BL >> rem % 13) & 1) == 0)
+ return 0;
+ if (((0x23BL >> rem % 11) & 1) == 0)
+ return 0;
+ if (((0x017L >> rem % 7) & 1) == 0)
+ return 0;
+ if (((0x13L >> rem % 5) & 1) == 0)
+ return 0;
+ if (((0x3L >> rem % 3) & 1) == 0)
+ return 0;
+#endif
+
+ TMP_MARK (marker);
+
+ /* For the third and last test, we finally compute the square root,
+ to make sure we've really got a perfect square. */
+ root_ptr = (mp_ptr) TMP_ALLOC ((usize + 1) / 2 * BYTES_PER_MP_LIMB);
+
+ /* Iff mpn_sqrtrem returns zero, the square is perfect. */
+ res = ! mpn_sqrtrem (root_ptr, NULL, up, usize);
+ TMP_FREE (marker);
+ return res;
+}
--- /dev/null
+/* popcount.c
+
+Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+#if defined __GNUC__
+#if defined __sparc_v9__ && BITS_PER_MP_LIMB == 64
+#define popc_limb(a) \
+ ({ \
+ DItype __res; \
+ asm ("popc %1,%0" : "=r" (__res) : "rI" (a)); \
+ __res; \
+ })
+#endif
+#endif
+
+#ifndef popc_limb
+
+/* Cool population count of a mp_limb_t.
+ You have to figure out how this works, I won't tell you! */
+
+static inline unsigned int
+popc_limb (x)
+ mp_limb_t x;
+{
+#if BITS_PER_MP_LIMB == 64
+ /* We have to go into some trouble to define these constants.
+ (For mp_limb_t being `long long'.) */
+ mp_limb_t cnst;
+ cnst = 0x55555555L | ((mp_limb_t) 0x55555555L << BITS_PER_MP_LIMB/2);
+ x = ((x & ~cnst) >> 1) + (x & cnst);
+ cnst = 0x33333333L | ((mp_limb_t) 0x33333333L << BITS_PER_MP_LIMB/2);
+ x = ((x & ~cnst) >> 2) + (x & cnst);
+ cnst = 0x0f0f0f0fL | ((mp_limb_t) 0x0f0f0f0fL << BITS_PER_MP_LIMB/2);
+ x = ((x >> 4) + x) & cnst;
+ x = ((x >> 8) + x);
+ x = ((x >> 16) + x);
+ x = ((x >> 32) + x) & 0xff;
+#endif
+#if BITS_PER_MP_LIMB == 32
+ x = ((x >> 1) & 0x55555555L) + (x & 0x55555555L);
+ x = ((x >> 2) & 0x33333333L) + (x & 0x33333333L);
+ x = ((x >> 4) + x) & 0x0f0f0f0fL;
+ x = ((x >> 8) + x);
+ x = ((x >> 16) + x) & 0xff;
+#endif
+ return x;
+}
+#endif
+
+unsigned long int
+#if __STDC__
+mpn_popcount (register mp_srcptr p, register mp_size_t size)
+#else
+mpn_popcount (p, size)
+ register mp_srcptr p;
+ register mp_size_t size;
+#endif
+{
+ unsigned long int popcnt;
+ mp_size_t i;
+
+ popcnt = 0;
+ for (i = 0; i < size; i++)
+ popcnt += popc_limb (p[i]);
+
+ return popcnt;
+}
--- /dev/null
+/* mpn_preinv_mod_1 (dividend_ptr, dividend_size, divisor_limb,
+ divisor_limb_inverted) --
+ Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by the normalized DIVISOR_LIMB.
+ DIVISOR_LIMB_INVERTED should be 2^(2*BITS_PER_MP_LIMB) / DIVISOR_LIMB +
+ - 2^BITS_PER_MP_LIMB.
+ Return the single-limb remainder.
+
+Copyright (C) 1991, 1993, 1994, Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+#ifndef UMUL_TIME
+#define UMUL_TIME 1
+#endif
+
+#ifndef UDIV_TIME
+#define UDIV_TIME UMUL_TIME
+#endif
+
+mp_limb_t
+#if __STDC__
+mpn_preinv_mod_1 (mp_srcptr dividend_ptr, mp_size_t dividend_size,
+ mp_limb_t divisor_limb, mp_limb_t divisor_limb_inverted)
+#else
+mpn_preinv_mod_1 (dividend_ptr, dividend_size, divisor_limb, divisor_limb_inverted)
+ mp_srcptr dividend_ptr;
+ mp_size_t dividend_size;
+ mp_limb_t divisor_limb;
+ mp_limb_t divisor_limb_inverted;
+#endif
+{
+ mp_size_t i;
+ mp_limb_t n0, r;
+ int dummy;
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for (; i >= 0; i--)
+ {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd_preinv (dummy, r, r, n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+}
--- /dev/null
+/* mpn_rshift -- Shift right a low-level natural-number integer.
+
+Copyright (C) 1991, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right
+ and store the USIZE least significant limbs of the result at WP.
+ The bits shifted out to the right are returned.
+
+ Argument constraints:
+ 1. 0 < CNT < BITS_PER_MP_LIMB
+ 2. If the result is to be written over the input, WP must be <= UP.
+*/
+
+mp_limb_t
+#if __STDC__
+mpn_rshift (register mp_ptr wp,
+ register mp_srcptr up, mp_size_t usize,
+ register unsigned int cnt)
+#else
+mpn_rshift (wp, up, usize, cnt)
+ register mp_ptr wp;
+ register mp_srcptr up;
+ mp_size_t usize;
+ register unsigned int cnt;
+#endif
+{
+ register mp_limb_t high_limb, low_limb;
+ register unsigned sh_1, sh_2;
+ register mp_size_t i;
+ mp_limb_t retval;
+
+#ifdef DEBUG
+ if (usize == 0 || cnt == 0)
+ abort ();
+#endif
+
+ sh_1 = cnt;
+
+#if 0
+ if (sh_1 == 0)
+ {
+ if (wp != up)
+ {
+ /* Copy from low end to high end, to allow specified input/output
+ overlapping. */
+ for (i = 0; i < usize; i++)
+ wp[i] = up[i];
+ }
+ return usize;
+ }
+#endif
+
+ wp -= 1;
+ sh_2 = BITS_PER_MP_LIMB - sh_1;
+ high_limb = up[0];
+ retval = high_limb << sh_2;
+ low_limb = high_limb;
+
+ for (i = 1; i < usize; i++)
+ {
+ high_limb = up[i];
+ wp[i] = (low_limb >> sh_1) | (high_limb << sh_2);
+ low_limb = high_limb;
+ }
+ wp[i] = low_limb >> sh_1;
+
+ return retval;
+}
--- /dev/null
+/* mpn_scan0 -- Scan from a given bit position for the next clear bit.
+
+Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+/* Design issues:
+ 1. What if starting_bit is not within U? Caller's problem?
+ 2. Bit index should be 'unsigned'?
+
+ Argument constraints:
+ 1. U must sooner ot later have a limb with a clear bit.
+ */
+
+unsigned long int
+#if __STDC__
+mpn_scan0 (register mp_srcptr up,
+ register unsigned long int starting_bit)
+#else
+mpn_scan0 (up, starting_bit)
+ register mp_srcptr up;
+ register unsigned long int starting_bit;
+#endif
+{
+ mp_size_t starting_word;
+ mp_limb_t alimb;
+ int cnt;
+ mp_srcptr p;
+
+ /* Start at the word implied by STARTING_BIT. */
+ starting_word = starting_bit / BITS_PER_MP_LIMB;
+ p = up + starting_word;
+ alimb = ~*p++;
+
+ /* Mask off any bits before STARTING_BIT in the first limb. */
+ alimb &= - (mp_limb_t) 1 << (starting_bit % BITS_PER_MP_LIMB);
+
+ while (alimb == 0)
+ alimb = ~*p++;
+
+ count_leading_zeros (cnt, alimb & -alimb);
+ return (p - up) * BITS_PER_MP_LIMB - 1 - cnt;
+}
--- /dev/null
+/* mpn_scan1 -- Scan from a given bit position for the next set bit.
+
+Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+/* Design issues:
+ 1. What if starting_bit is not within U? Caller's problem?
+ 2. Bit index should be 'unsigned'?
+
+ Argument constraints:
+ 1. U must sooner ot later have a limb != 0.
+ */
+
+unsigned long int
+#if __STDC__
+mpn_scan1 (register mp_srcptr up,
+ register unsigned long int starting_bit)
+#else
+mpn_scan1 (up, starting_bit)
+ register mp_srcptr up;
+ register unsigned long int starting_bit;
+#endif
+{
+ mp_size_t starting_word;
+ mp_limb_t alimb;
+ int cnt;
+ mp_srcptr p;
+
+ /* Start at the word implied by STARTING_BIT. */
+ starting_word = starting_bit / BITS_PER_MP_LIMB;
+ p = up + starting_word;
+ alimb = *p++;
+
+ /* Mask off any bits before STARTING_BIT in the first limb. */
+ alimb &= - (mp_limb_t) 1 << (starting_bit % BITS_PER_MP_LIMB);
+
+ while (alimb == 0)
+ alimb = *p++;
+
+ count_leading_zeros (cnt, alimb & -alimb);
+ return (p - up) * BITS_PER_MP_LIMB - 1 - cnt;
+}
--- /dev/null
+/* mpn_set_str (mp_ptr res_ptr, const char *str, size_t str_len, int base)
+ -- Convert a STR_LEN long base BASE byte string pointed to by STR to a
+ limb vector pointed to by RES_PTR. Return the number of limbs in
+ RES_PTR.
+
+Copyright (C) 1991, 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+mp_size_t
+mpn_set_str (xp, str, str_len, base)
+ mp_ptr xp;
+ const unsigned char *str;
+ size_t str_len;
+ int base;
+{
+ mp_size_t size;
+ mp_limb_t big_base;
+ int indigits_per_limb;
+ mp_limb_t res_digit;
+
+ big_base = __mp_bases[base].big_base;
+ indigits_per_limb = __mp_bases[base].chars_per_limb;
+
+/* size = str_len / indigits_per_limb + 1; */
+
+ size = 0;
+
+ if ((base & (base - 1)) == 0)
+ {
+ /* The base is a power of 2. Read the input string from
+ least to most significant character/digit. */
+
+ const unsigned char *s;
+ int next_bitpos;
+ int bits_per_indigit = big_base;
+
+ res_digit = 0;
+ next_bitpos = 0;
+
+ for (s = str + str_len - 1; s >= str; s--)
+ {
+ int inp_digit = *s;
+
+ res_digit |= (mp_limb_t) inp_digit << next_bitpos;
+ next_bitpos += bits_per_indigit;
+ if (next_bitpos >= BITS_PER_MP_LIMB)
+ {
+ xp[size++] = res_digit;
+ next_bitpos -= BITS_PER_MP_LIMB;
+ res_digit = inp_digit >> (bits_per_indigit - next_bitpos);
+ }
+ }
+
+ if (res_digit != 0)
+ xp[size++] = res_digit;
+ }
+ else
+ {
+ /* General case. The base is not a power of 2. */
+
+ size_t i;
+ int j;
+ mp_limb_t cy_limb;
+
+ for (i = indigits_per_limb; i < str_len; i += indigits_per_limb)
+ {
+ res_digit = *str++;
+ if (base == 10)
+ { /* This is a common case.
+ Help the compiler to avoid multiplication. */
+ for (j = 1; j < indigits_per_limb; j++)
+ res_digit = res_digit * 10 + *str++;
+ }
+ else
+ {
+ for (j = 1; j < indigits_per_limb; j++)
+ res_digit = res_digit * base + *str++;
+ }
+
+ if (size == 0)
+ {
+ if (res_digit != 0)
+ {
+ xp[0] = res_digit;
+ size = 1;
+ }
+ }
+ else
+ {
+ cy_limb = mpn_mul_1 (xp, xp, size, big_base);
+ cy_limb += mpn_add_1 (xp, xp, size, res_digit);
+ if (cy_limb != 0)
+ xp[size++] = cy_limb;
+ }
+ }
+
+ big_base = base;
+ res_digit = *str++;
+ if (base == 10)
+ { /* This is a common case.
+ Help the compiler to avoid multiplication. */
+ for (j = 1; j < str_len - (i - indigits_per_limb); j++)
+ {
+ res_digit = res_digit * 10 + *str++;
+ big_base *= 10;
+ }
+ }
+ else
+ {
+ for (j = 1; j < str_len - (i - indigits_per_limb); j++)
+ {
+ res_digit = res_digit * base + *str++;
+ big_base *= base;
+ }
+ }
+
+ if (size == 0)
+ {
+ if (res_digit != 0)
+ {
+ xp[0] = res_digit;
+ size = 1;
+ }
+ }
+ else
+ {
+ cy_limb = mpn_mul_1 (xp, xp, size, big_base);
+ cy_limb += mpn_add_1 (xp, xp, size, res_digit);
+ if (cy_limb != 0)
+ xp[size++] = cy_limb;
+ }
+ }
+
+ return size;
+}
--- /dev/null
+/* mpn_sqrtrem (root_ptr, rem_ptr, op_ptr, op_size)
+
+ Write the square root of {OP_PTR, OP_SIZE} at ROOT_PTR.
+ Write the remainder at REM_PTR, if REM_PTR != NULL.
+ Return the size of the remainder.
+ (The size of the root is always half of the size of the operand.)
+
+ OP_PTR and ROOT_PTR may not point to the same object.
+ OP_PTR and REM_PTR may point to the same object.
+
+ If REM_PTR is NULL, only the root is computed and the return value of
+ the function is 0 if OP is a perfect square, and *any* non-zero number
+ otherwise.
+
+Copyright (C) 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+/* This code is just correct if "unsigned char" has at least 8 bits. It
+ doesn't help to use CHAR_BIT from limits.h, as the real problem is
+ the static arrays. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+/* Square root algorithm:
+
+ 1. Shift OP (the input) to the left an even number of bits s.t. there
+ are an even number of words and either (or both) of the most
+ significant bits are set. This way, sqrt(OP) has exactly half as
+ many words as OP, and has its most significant bit set.
+
+ 2. Get a 9-bit approximation to sqrt(OP) using the pre-computed tables.
+ This approximation is used for the first single-precision
+ iterations of Newton's method, yielding a full-word approximation
+ to sqrt(OP).
+
+ 3. Perform multiple-precision Newton iteration until we have the
+ exact result. Only about half of the input operand is used in
+ this calculation, as the square root is perfectly determinable
+ from just the higher half of a number. */
+\f
+/* Define this macro for IEEE P854 machines with a fast sqrt instruction. */
+#if defined __GNUC__ && ! defined __SOFT_FLOAT
+
+#if defined __sparc__
+#define SQRT(a) \
+ ({ \
+ double __sqrt_res; \
+ asm ("fsqrtd %1,%0" : "=f" (__sqrt_res) : "f" (a)); \
+ __sqrt_res; \
+ })
+#endif
+
+#if defined __HAVE_68881__
+#define SQRT(a) \
+ ({ \
+ double __sqrt_res; \
+ asm ("fsqrtx %1,%0" : "=f" (__sqrt_res) : "f" (a)); \
+ __sqrt_res; \
+ })
+#endif
+
+#if defined __hppa
+#define SQRT(a) \
+ ({ \
+ double __sqrt_res; \
+ asm ("fsqrt,dbl %1,%0" : "=fx" (__sqrt_res) : "fx" (a)); \
+ __sqrt_res; \
+ })
+#endif
+
+#if defined _ARCH_PWR2
+#define SQRT(a) \
+ ({ \
+ double __sqrt_res; \
+ asm ("fsqrt %0,%1" : "=f" (__sqrt_res) : "f" (a)); \
+ __sqrt_res; \
+ })
+#endif
+
+#endif
+
+#ifndef SQRT
+
+/* Tables for initial approximation of the square root. These are
+ indexed with bits 1-8 of the operand for which the square root is
+ calculated, where bit 0 is the most significant non-zero bit. I.e.
+ the most significant one-bit is not used, since that per definition
+ is one. Likewise, the tables don't return the highest bit of the
+ result. That bit must be inserted by or:ing the returned value with
+ 0x100. This way, we get a 9-bit approximation from 8-bit tables! */
+
+/* Table to be used for operands with an even total number of bits.
+ (Exactly as in the decimal system there are similarities between the
+ square root of numbers with the same initial digits and an even
+ difference in the total number of digits. Consider the square root
+ of 1, 10, 100, 1000, ...) */
+static unsigned char even_approx_tab[256] =
+{
+ 0x6a, 0x6a, 0x6b, 0x6c, 0x6c, 0x6d, 0x6e, 0x6e,
+ 0x6f, 0x70, 0x71, 0x71, 0x72, 0x73, 0x73, 0x74,
+ 0x75, 0x75, 0x76, 0x77, 0x77, 0x78, 0x79, 0x79,
+ 0x7a, 0x7b, 0x7b, 0x7c, 0x7d, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x80, 0x81, 0x81, 0x82, 0x83, 0x83, 0x84,
+ 0x85, 0x85, 0x86, 0x87, 0x87, 0x88, 0x89, 0x89,
+ 0x8a, 0x8b, 0x8b, 0x8c, 0x8d, 0x8d, 0x8e, 0x8f,
+ 0x8f, 0x90, 0x90, 0x91, 0x92, 0x92, 0x93, 0x94,
+ 0x94, 0x95, 0x96, 0x96, 0x97, 0x97, 0x98, 0x99,
+ 0x99, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9d, 0x9e,
+ 0x9e, 0x9f, 0xa0, 0xa0, 0xa1, 0xa1, 0xa2, 0xa3,
+ 0xa3, 0xa4, 0xa4, 0xa5, 0xa6, 0xa6, 0xa7, 0xa7,
+ 0xa8, 0xa9, 0xa9, 0xaa, 0xaa, 0xab, 0xac, 0xac,
+ 0xad, 0xad, 0xae, 0xaf, 0xaf, 0xb0, 0xb0, 0xb1,
+ 0xb2, 0xb2, 0xb3, 0xb3, 0xb4, 0xb5, 0xb5, 0xb6,
+ 0xb6, 0xb7, 0xb7, 0xb8, 0xb9, 0xb9, 0xba, 0xba,
+ 0xbb, 0xbb, 0xbc, 0xbd, 0xbd, 0xbe, 0xbe, 0xbf,
+ 0xc0, 0xc0, 0xc1, 0xc1, 0xc2, 0xc2, 0xc3, 0xc3,
+ 0xc4, 0xc5, 0xc5, 0xc6, 0xc6, 0xc7, 0xc7, 0xc8,
+ 0xc9, 0xc9, 0xca, 0xca, 0xcb, 0xcb, 0xcc, 0xcc,
+ 0xcd, 0xce, 0xce, 0xcf, 0xcf, 0xd0, 0xd0, 0xd1,
+ 0xd1, 0xd2, 0xd3, 0xd3, 0xd4, 0xd4, 0xd5, 0xd5,
+ 0xd6, 0xd6, 0xd7, 0xd7, 0xd8, 0xd9, 0xd9, 0xda,
+ 0xda, 0xdb, 0xdb, 0xdc, 0xdc, 0xdd, 0xdd, 0xde,
+ 0xde, 0xdf, 0xe0, 0xe0, 0xe1, 0xe1, 0xe2, 0xe2,
+ 0xe3, 0xe3, 0xe4, 0xe4, 0xe5, 0xe5, 0xe6, 0xe6,
+ 0xe7, 0xe7, 0xe8, 0xe8, 0xe9, 0xea, 0xea, 0xeb,
+ 0xeb, 0xec, 0xec, 0xed, 0xed, 0xee, 0xee, 0xef,
+ 0xef, 0xf0, 0xf0, 0xf1, 0xf1, 0xf2, 0xf2, 0xf3,
+ 0xf3, 0xf4, 0xf4, 0xf5, 0xf5, 0xf6, 0xf6, 0xf7,
+ 0xf7, 0xf8, 0xf8, 0xf9, 0xf9, 0xfa, 0xfa, 0xfb,
+ 0xfb, 0xfc, 0xfc, 0xfd, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+/* Table to be used for operands with an odd total number of bits.
+ (Further comments before previous table.) */
+static unsigned char odd_approx_tab[256] =
+{
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03,
+ 0x03, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, 0x07,
+ 0x07, 0x08, 0x08, 0x09, 0x09, 0x0a, 0x0a, 0x0b,
+ 0x0b, 0x0c, 0x0c, 0x0d, 0x0d, 0x0e, 0x0e, 0x0f,
+ 0x0f, 0x10, 0x10, 0x10, 0x11, 0x11, 0x12, 0x12,
+ 0x13, 0x13, 0x14, 0x14, 0x15, 0x15, 0x16, 0x16,
+ 0x16, 0x17, 0x17, 0x18, 0x18, 0x19, 0x19, 0x1a,
+ 0x1a, 0x1b, 0x1b, 0x1b, 0x1c, 0x1c, 0x1d, 0x1d,
+ 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x21,
+ 0x21, 0x22, 0x22, 0x23, 0x23, 0x23, 0x24, 0x24,
+ 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x27, 0x28,
+ 0x28, 0x29, 0x29, 0x2a, 0x2a, 0x2a, 0x2b, 0x2b,
+ 0x2c, 0x2c, 0x2d, 0x2d, 0x2d, 0x2e, 0x2e, 0x2f,
+ 0x2f, 0x30, 0x30, 0x30, 0x31, 0x31, 0x32, 0x32,
+ 0x32, 0x33, 0x33, 0x34, 0x34, 0x35, 0x35, 0x35,
+ 0x36, 0x36, 0x37, 0x37, 0x37, 0x38, 0x38, 0x39,
+ 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c,
+ 0x3c, 0x3d, 0x3d, 0x3d, 0x3e, 0x3e, 0x3f, 0x3f,
+ 0x40, 0x40, 0x40, 0x41, 0x41, 0x41, 0x42, 0x42,
+ 0x43, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x45,
+ 0x46, 0x46, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49,
+ 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c,
+ 0x4c, 0x4c, 0x4d, 0x4d, 0x4e, 0x4e, 0x4e, 0x4f,
+ 0x4f, 0x50, 0x50, 0x50, 0x51, 0x51, 0x51, 0x52,
+ 0x52, 0x53, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55,
+ 0x55, 0x56, 0x56, 0x56, 0x57, 0x57, 0x57, 0x58,
+ 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b,
+ 0x5b, 0x5b, 0x5c, 0x5c, 0x5d, 0x5d, 0x5d, 0x5e,
+ 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61,
+ 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63,
+ 0x64, 0x64, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66,
+ 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69,
+};
+#endif
+
+\f
+mp_size_t
+#if __STDC__
+mpn_sqrtrem (mp_ptr root_ptr, mp_ptr rem_ptr, mp_srcptr op_ptr, mp_size_t op_size)
+#else
+mpn_sqrtrem (root_ptr, rem_ptr, op_ptr, op_size)
+ mp_ptr root_ptr;
+ mp_ptr rem_ptr;
+ mp_srcptr op_ptr;
+ mp_size_t op_size;
+#endif
+{
+ /* R (root result) */
+ mp_ptr rp; /* Pointer to least significant word */
+ mp_size_t rsize; /* The size in words */
+
+ /* T (OP shifted to the left a.k.a. normalized) */
+ mp_ptr tp; /* Pointer to least significant word */
+ mp_size_t tsize; /* The size in words */
+ mp_ptr t_end_ptr; /* Pointer right beyond most sign. word */
+ mp_limb_t t_high0, t_high1; /* The two most significant words */
+
+ /* TT (temporary for numerator/remainder) */
+ mp_ptr ttp; /* Pointer to least significant word */
+
+ /* X (temporary for quotient in main loop) */
+ mp_ptr xp; /* Pointer to least significant word */
+ mp_size_t xsize; /* The size in words */
+
+ unsigned cnt;
+ mp_limb_t initial_approx; /* Initially made approximation */
+ mp_size_t tsizes[BITS_PER_MP_LIMB]; /* Successive calculation precisions */
+ mp_size_t tmp;
+ mp_size_t i;
+
+ mp_limb_t cy_limb;
+ TMP_DECL (marker);
+
+ /* If OP is zero, both results are zero. */
+ if (op_size == 0)
+ return 0;
+
+ count_leading_zeros (cnt, op_ptr[op_size - 1]);
+ tsize = op_size;
+ if ((tsize & 1) != 0)
+ {
+ cnt += BITS_PER_MP_LIMB;
+ tsize++;
+ }
+
+ rsize = tsize / 2;
+ rp = root_ptr;
+
+ TMP_MARK (marker);
+
+ /* Shift OP an even number of bits into T, such that either the most or
+ the second most significant bit is set, and such that the number of
+ words in T becomes even. This way, the number of words in R=sqrt(OP)
+ is exactly half as many as in OP, and the most significant bit of R
+ is set.
+
+ Also, the initial approximation is simplified by this up-shifted OP.
+
+ Finally, the Newtonian iteration which is the main part of this
+ program performs division by R. The fast division routine expects
+ the divisor to be "normalized" in exactly the sense of having the
+ most significant bit set. */
+
+ tp = (mp_ptr) TMP_ALLOC (tsize * BYTES_PER_MP_LIMB);
+
+ if ((cnt & ~1) % BITS_PER_MP_LIMB != 0)
+ t_high0 = mpn_lshift (tp + cnt / BITS_PER_MP_LIMB, op_ptr, op_size,
+ (cnt & ~1) % BITS_PER_MP_LIMB);
+ else
+ MPN_COPY (tp + cnt / BITS_PER_MP_LIMB, op_ptr, op_size);
+
+ if (cnt >= BITS_PER_MP_LIMB)
+ tp[0] = 0;
+
+ t_high0 = tp[tsize - 1];
+ t_high1 = tp[tsize - 2]; /* Never stray. TSIZE is >= 2. */
+
+/* Is there a fast sqrt instruction defined for this machine? */
+#ifdef SQRT
+ {
+ initial_approx = SQRT (t_high0 * 2.0
+ * ((mp_limb_t) 1 << (BITS_PER_MP_LIMB - 1))
+ + t_high1);
+ /* If t_high0,,t_high1 is big, the result in INITIAL_APPROX might have
+ become incorrect due to overflow in the conversion from double to
+ mp_limb_t above. It will typically be zero in that case, but might be
+ a small number on some machines. The most significant bit of
+ INITIAL_APPROX should be set, so that bit is a good overflow
+ indication. */
+ if ((mp_limb_signed_t) initial_approx >= 0)
+ initial_approx = ~(mp_limb_t)0;
+ }
+#else
+ /* Get a 9 bit approximation from the tables. The tables expect to
+ be indexed with the 8 high bits right below the highest bit.
+ Also, the highest result bit is not returned by the tables, and
+ must be or:ed into the result. The scheme gives 9 bits of start
+ approximation with just 256-entry 8 bit tables. */
+
+ if ((cnt & 1) == 0)
+ {
+ /* The most sign bit of t_high0 is set. */
+ initial_approx = t_high0 >> (BITS_PER_MP_LIMB - 8 - 1);
+ initial_approx &= 0xff;
+ initial_approx = even_approx_tab[initial_approx];
+ }
+ else
+ {
+ /* The most significant bit of T_HIGH0 is unset,
+ the second most significant is set. */
+ initial_approx = t_high0 >> (BITS_PER_MP_LIMB - 8 - 2);
+ initial_approx &= 0xff;
+ initial_approx = odd_approx_tab[initial_approx];
+ }
+ initial_approx |= 0x100;
+ initial_approx <<= BITS_PER_MP_LIMB - 8 - 1;
+
+ /* Perform small precision Newtonian iterations to get a full word
+ approximation. For small operands, these iteration will make the
+ entire job. */
+ if (t_high0 == ~(mp_limb_t)0)
+ initial_approx = t_high0;
+ else
+ {
+ mp_limb_t quot;
+
+ if (t_high0 >= initial_approx)
+ initial_approx = t_high0 + 1;
+
+ /* First get about 18 bits with pure C arithmetics. */
+ quot = t_high0 / (initial_approx >> BITS_PER_MP_LIMB/2) << BITS_PER_MP_LIMB/2;
+ initial_approx = (initial_approx + quot) / 2;
+ initial_approx |= (mp_limb_t) 1 << (BITS_PER_MP_LIMB - 1);
+
+ /* Now get a full word by one (or for > 36 bit machines) several
+ iterations. */
+ for (i = 16; i < BITS_PER_MP_LIMB; i <<= 1)
+ {
+ mp_limb_t ignored_remainder;
+
+ udiv_qrnnd (quot, ignored_remainder,
+ t_high0, t_high1, initial_approx);
+ initial_approx = (initial_approx + quot) / 2;
+ initial_approx |= (mp_limb_t) 1 << (BITS_PER_MP_LIMB - 1);
+ }
+ }
+#endif
+
+ rp[0] = initial_approx;
+ rsize = 1;
+
+#ifdef DEBUG
+ printf ("\n\nT = ");
+ mpn_dump (tp, tsize);
+#endif
+
+ if (tsize > 2)
+ {
+ /* Determine the successive precisions to use in the iteration. We
+ minimize the precisions, beginning with the highest (i.e. last
+ iteration) to the lowest (i.e. first iteration). */
+
+ xp = (mp_ptr) TMP_ALLOC (tsize * BYTES_PER_MP_LIMB);
+ ttp = (mp_ptr) TMP_ALLOC (tsize * BYTES_PER_MP_LIMB);
+
+ t_end_ptr = tp + tsize;
+
+ tmp = tsize / 2;
+ for (i = 0;; i++)
+ {
+ tsize = (tmp + 1) / 2;
+ if (tmp == tsize)
+ break;
+ tsizes[i] = tsize + tmp;
+ tmp = tsize;
+ }
+
+ /* Main Newton iteration loop. For big arguments, most of the
+ time is spent here. */
+
+ /* It is possible to do a great optimization here. The successive
+ divisors in the mpn_divmod call below has more and more leading
+ words equal to its predecessor. Therefore the beginning of
+ each division will repeat the same work as did the last
+ division. If we could guarantee that the leading words of two
+ consecutive divisors are the same (i.e. in this case, a later
+ divisor has just more digits at the end) it would be a simple
+ matter of just using the old remainder of the last division in
+ a subsequent division, to take care of this optimization. This
+ idea would surely make a difference even for small arguments. */
+
+ /* Loop invariants:
+
+ R <= shiftdown_to_same_size(floor(sqrt(OP))) < R + 1.
+ X - 1 < shiftdown_to_same_size(floor(sqrt(OP))) <= X.
+ R <= shiftdown_to_same_size(X). */
+
+ while (--i >= 0)
+ {
+ mp_limb_t cy;
+#ifdef DEBUG
+ mp_limb_t old_least_sign_r = rp[0];
+ mp_size_t old_rsize = rsize;
+
+ printf ("R = ");
+ mpn_dump (rp, rsize);
+#endif
+ tsize = tsizes[i];
+
+ /* Need to copy the numerator into temporary space, as
+ mpn_divmod overwrites its numerator argument with the
+ remainder (which we currently ignore). */
+ MPN_COPY (ttp, t_end_ptr - tsize, tsize);
+ cy = mpn_divmod (xp, ttp, tsize, rp, rsize);
+ xsize = tsize - rsize;
+
+#ifdef DEBUG
+ printf ("X =%d ", cy);
+ mpn_dump (xp, xsize);
+#endif
+
+ /* Add X and R with the most significant limbs aligned,
+ temporarily ignoring at least one limb at the low end of X. */
+ tmp = xsize - rsize;
+ cy += mpn_add_n (xp + tmp, rp, xp + tmp, rsize);
+
+ /* If T begins with more than 2 x BITS_PER_MP_LIMB of ones, we get
+ intermediate roots that'd need an extra bit. We don't want to
+ handle that since it would make the subsequent divisor
+ non-normalized, so round such roots down to be only ones in the
+ current precision. */
+ if (cy == 2)
+ {
+ mp_size_t j;
+ for (j = xsize; j >= 0; j--)
+ xp[j] = ~(mp_limb_t)0;
+ }
+
+ /* Divide X by 2 and put the result in R. This is the new
+ approximation. Shift in the carry from the addition. */
+ mpn_rshift (rp, xp, xsize, 1);
+ rp[xsize - 1] |= ((mp_limb_t) 1 << (BITS_PER_MP_LIMB - 1));
+ rsize = xsize;
+#ifdef DEBUG
+ if (old_least_sign_r != rp[rsize - old_rsize])
+ printf (">>>>>>>> %d: %0*lX, %0*lX <<<<<<<<\n",
+ i, 2 * BYTES_PER_MP_LIMB, old_least_sign_r,
+ 2 * BYTES_PER_MP_LIMB, rp[rsize - old_rsize]);
+#endif
+ }
+ }
+
+#ifdef DEBUG
+ printf ("(final) R = ");
+ mpn_dump (rp, rsize);
+#endif
+
+ /* We computed the square root of OP * 2**(2*floor(cnt/2)).
+ This has resulted in R being 2**floor(cnt/2) to large.
+ Shift it down here to fix that. */
+ if (cnt / 2 != 0)
+ {
+ mpn_rshift (rp, rp, rsize, cnt/2);
+ rsize -= rp[rsize - 1] == 0;
+ }
+
+ /* Calculate the remainder. */
+ mpn_mul_n (tp, rp, rp, rsize);
+ tsize = rsize + rsize;
+ tsize -= tp[tsize - 1] == 0;
+ if (op_size < tsize
+ || (op_size == tsize && mpn_cmp (op_ptr, tp, op_size) < 0))
+ {
+ /* R is too large. Decrement it. */
+
+ /* These operations can't overflow. */
+ cy_limb = mpn_sub_n (tp, tp, rp, rsize);
+ cy_limb += mpn_sub_n (tp, tp, rp, rsize);
+ mpn_sub_1 (tp + rsize, tp + rsize, tsize - rsize, cy_limb);
+ mpn_add_1 (tp, tp, tsize, (mp_limb_t) 1);
+
+ mpn_sub_1 (rp, rp, rsize, (mp_limb_t) 1);
+
+#ifdef DEBUG
+ printf ("(adjusted) R = ");
+ mpn_dump (rp, rsize);
+#endif
+ }
+
+ if (rem_ptr != NULL)
+ {
+ cy_limb = mpn_sub (rem_ptr, op_ptr, op_size, tp, tsize);
+ MPN_NORMALIZE (rem_ptr, op_size);
+ TMP_FREE (marker);
+ return op_size;
+ }
+ else
+ {
+ int res;
+ res = op_size != tsize || mpn_cmp (op_ptr, tp, op_size);
+ TMP_FREE (marker);
+ return res;
+ }
+}
--- /dev/null
+/* mpn_sub_n -- Subtract two limb vectors of equal, non-zero length.
+
+Copyright (C) 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+
+mp_limb_t
+#if __STDC__
+mpn_sub_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr, mp_size_t size)
+#else
+mpn_sub_n (res_ptr, s1_ptr, s2_ptr, size)
+ register mp_ptr res_ptr;
+ register mp_srcptr s1_ptr;
+ register mp_srcptr s2_ptr;
+ mp_size_t size;
+#endif
+{
+ register mp_limb_t x, y, cy;
+ register mp_size_t j;
+
+ /* The loop counter and index J goes from -SIZE to -1. This way
+ the loop becomes faster. */
+ j = -size;
+
+ /* Offset the base pointers to compensate for the negative indices. */
+ s1_ptr -= j;
+ s2_ptr -= j;
+ res_ptr -= j;
+
+ cy = 0;
+ do
+ {
+ y = s2_ptr[j];
+ x = s1_ptr[j];
+ y += cy; /* add previous carry to subtrahend */
+ cy = (y < cy); /* get out carry from that addition */
+ y = x - y; /* main subtract */
+ cy = (y > x) + cy; /* get out carry from the subtract, combine */
+ res_ptr[j] = y;
+ }
+ while (++j != 0);
+
+ return cy;
+}
--- /dev/null
+/* mpn_submul_1 -- multiply the S1_SIZE long limb vector pointed to by S1_PTR
+ by S2_LIMB, subtract the S1_SIZE least significant limbs of the product
+ from the limb vector pointed to by RES_PTR. Return the most significant
+ limb of the product, adjusted for carry-out from the subtraction.
+
+Copyright (C) 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+mp_limb_t
+mpn_submul_1 (res_ptr, s1_ptr, s1_size, s2_limb)
+ register mp_ptr res_ptr;
+ register mp_srcptr s1_ptr;
+ mp_size_t s1_size;
+ register mp_limb_t s2_limb;
+{
+ register mp_limb_t cy_limb;
+ register mp_size_t j;
+ register mp_limb_t prod_high, prod_low;
+ register mp_limb_t x;
+
+ /* The loop counter and index J goes from -SIZE to -1. This way
+ the loop becomes faster. */
+ j = -s1_size;
+
+ /* Offset the base pointers to compensate for the negative indices. */
+ res_ptr -= j;
+ s1_ptr -= j;
+
+ cy_limb = 0;
+ do
+ {
+ umul_ppmm (prod_high, prod_low, s1_ptr[j], s2_limb);
+
+ prod_low += cy_limb;
+ cy_limb = (prod_low < cy_limb) + prod_high;
+
+ x = res_ptr[j];
+ prod_low = x - prod_low;
+ cy_limb += (prod_low > x);
+ res_ptr[j] = prod_low;
+ }
+ while (++j != 0);
+
+ return cy_limb;
+}
--- /dev/null
+/* mpn_udiv_w_sdiv -- implement udiv_qrnnd on machines with only signed
+ division.
+
+ Contributed by Peter L. Montgomery.
+
+Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB. If not, write to
+the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+MA 02111-1307, USA. */
+
+#include "gmp.h"
+#include "gmp-impl.h"
+#include "longlong.h"
+
+mp_limb_t
+mpn_udiv_w_sdiv (rp, a1, a0, d)
+ mp_limb_t *rp, a1, a0, d;
+{
+ mp_limb_t q, r;
+ mp_limb_t c0, c1, b1;
+
+ if ((mp_limb_signed_t) d >= 0)
+ {
+ if (a1 < d - a1 - (a0 >> (BITS_PER_MP_LIMB - 1)))
+ {
+ /* dividend, divisor, and quotient are nonnegative */
+ sdiv_qrnnd (q, r, a1, a0, d);
+ }
+ else
+ {
+ /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
+ sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (BITS_PER_MP_LIMB - 1));
+ /* Divide (c1*2^32 + c0) by d */
+ sdiv_qrnnd (q, r, c1, c0, d);
+ /* Add 2^31 to quotient */
+ q += (mp_limb_t) 1 << (BITS_PER_MP_LIMB - 1);
+ }
+ }
+ else
+ {
+ b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
+ c1 = a1 >> 1; /* A/2 */
+ c0 = (a1 << (BITS_PER_MP_LIMB - 1)) + (a0 >> 1);
+
+ if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
+ {
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
+ {
+ c1 = (b1 - 1) - c1;
+ c0 = ~c0; /* logical NOT */
+
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ q = ~q; /* (A/2)/b1 */
+ r = (b1 - 1) - r;
+
+ r = 2*r + (a0 & 1); /* A/(2*b1) */
+
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else /* Implies c1 = b1 */
+ { /* Hence a1 = d - 1 = 2*b1 - 1 */
+ if (a0 >= -d)
+ {
+ q = -1;
+ r = a0 + d;
+ }
+ else
+ {
+ q = -2;
+ r = a0 + 2*d;
+ }
+ }
+ }
+
+ *rp = r;
+ return q;
+}
--- /dev/null
+This directory contains mpn functions for various HP PA-RISC chips. Code
+that runs faster on the PA7100 and later implementations, is in the pa7100
+directory.
+
+RELEVANT OPTIMIZATION ISSUES
+
+ Load and Store timing
+
+On the PA7000 no memory instructions can issue the two cycles after a store.
+For the PA7100, this is reduced to one cycle.
+
+The PA7100 has a lookup-free cache, so it helps to schedule loads and the
+dependent instruction really far from each other.
+
+STATUS
+
+1. mpn_mul_1 could be improved to 6.5 cycles/limb on the PA7100, using the
+ instructions bwlow (but some sw pipelining is needed to avoid the
+ xmpyu-fstds delay):
+
+ fldds s1_ptr
+
+ xmpyu
+ fstds N(%r30)
+ xmpyu
+ fstds N(%r30)
+
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+
+ addc
+ stws res_ptr
+ addc
+ stws res_ptr
+
+ addib Loop
+
+2. mpn_addmul_1 could be improved from the current 10 to 7.5 cycles/limb
+ (asymptotically) on the PA7100, using the instructions below. With proper
+ sw pipelining and the unrolling level below, the speed becomes 8
+ cycles/limb.
+
+ fldds s1_ptr
+ fldds s1_ptr
+
+ xmpyu
+ fstds N(%r30)
+ xmpyu
+ fstds N(%r30)
+ xmpyu
+ fstds N(%r30)
+ xmpyu
+ fstds N(%r30)
+
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+ ldws N(%r30)
+ addc
+ addc
+ addc
+ addc
+ addc %r0,%r0,cy-limb
+
+ ldws res_ptr
+ ldws res_ptr
+ ldws res_ptr
+ ldws res_ptr
+ add
+ stws res_ptr
+ addc
+ stws res_ptr
+ addc
+ stws res_ptr
+ addc
+ stws res_ptr
+
+ addib