1 dnl SPARC v9 32-bit mpn_mul_1 -- Multiply a limb vector with a limb and
2 dnl store the result in a second limb vector.
4 dnl Copyright (C) 1998, 2000 Free Software Foundation, Inc.
6 dnl This file is part of the GNU MP Library.
8 dnl The GNU MP Library is free software; you can redistribute it and/or modify
9 dnl it under the terms of the GNU Lesser General Public License as published
10 dnl by the Free Software Foundation; either version 2.1 of the License, or (at
11 dnl your option) any later version.
13 dnl The GNU MP Library is distributed in the hope that it will be useful, but
14 dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16 dnl License for more details.
18 dnl You should have received a copy of the GNU Lesser General Public License
19 dnl along with the GNU MP Library; see the file COPYING.LIB. If not, write to
20 dnl the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
21 dnl MA 02111-1307, USA.
24 include(`../config.m4')
44 ld [%o7+L(noll)-L(pc)],%f10',
45 ` sethi %hi(L(noll)),%g1
46 ld [%g1+%lo(L(noll))],%f10')
48 sethi %hi(0xffff0000),%o0
64 add %i1,4,%i1 C s1_ptr++
68 add %i1,4,%i1 C s1_ptr++
80 add %i1,4,%i1 C s1_ptr++
92 add %i1,4,%i1 C s1_ptr++
93 ldx [%fp-24],%g2 C p16
97 sllx %g2,16,%g2 C align p16
99 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
102 add %i0,4,%i0 C res_ptr++
114 add %i1,4,%i1 C s1_ptr++
115 add %g3,%g1,%g4 C p += cy
117 ldx [%fp-24],%g2 C p16
119 ldx [%fp-16],%g1 C p0
121 sllx %g2,16,%g2 C align p16
124 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
130 add %i0,4,%i0 C res_ptr++
134 add %i1,4,%i1 C s1_ptr++
135 add %g3,%g1,%g4 C p += cy
137 ldx [%fp-40],%g2 C p16
139 ldx [%fp-32],%g1 C p0
141 sllx %g2,16,%g2 C align p16
144 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
150 add %i0,4,%i0 C res_ptr++
154 add %g3,%g1,%g4 C p += cy
156 ldx [%fp-24],%g2 C p16
158 ldx [%fp-16],%g1 C p0
160 sllx %g2,16,%g2 C align p16
166 add %g3,%g1,%g4 C p += cy
168 ldx [%fp-40],%g2 C p16
170 ldx [%fp-32],%g1 C p0
172 sllx %g2,16,%g2 C align p16
175 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
179 add %i0,4,%i0 C res_ptr++
181 add %g3,%g1,%g4 C p += cy
183 ldx [%fp-24],%g2 C p16
184 ldx [%fp-16],%g1 C p0
185 sllx %g2,16,%g2 C align p16
191 ldx [%fp-24],%g2 C p16
193 ldx [%fp-16],%g1 C p0
195 sllx %g2,16,%g2 C align p16
196 L(xxx): fdtox %f16,%f14
197 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
201 add %i0,4,%i0 C res_ptr++
203 add %g3,%g1,%g4 C p += cy
205 ldx [%fp-40],%g2 C p16
206 ldx [%fp-32],%g1 C p0
207 sllx %g2,16,%g2 C align p16
209 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
210 add %i0,4,%i0 C res_ptr++
212 add %g3,%g1,%g4 C p += cy
214 ldx [%fp-24],%g2 C p16
215 ldx [%fp-16],%g1 C p0
216 sllx %g2,16,%g2 C align p16
218 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
219 add %i0,4,%i0 C res_ptr++
230 ldx [%fp-24],%g2 C p16
231 ldx [%fp-16],%g1 C p0
232 sllx %g2,16,%g2 C align p16
233 L(yyy): add %g2,%g1,%g1 C add p16 to p0 (ADD1)
234 add %i0,4,%i0 C res_ptr++
236 add %g3,%g1,%g4 C p += cy
238 ldx [%fp-40],%g2 C p16
239 ldx [%fp-32],%g1 C p0
240 sllx %g2,16,%g2 C align p16
242 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
243 add %i0,4,%i0 C res_ptr++
255 ldx [%fp-24],%g2 C p16
256 ldx [%fp-16],%g1 C p0
257 sllx %g2,16,%g2 C align p16
258 add %g2,%g1,%g1 C add p16 to p0 (ADD1)
259 add %i0,4,%i0 C res_ptr++
261 L(ret): add %g3,%g1,%g4 C p += cy
266 restore %g0,%g3,%o0 C sideeffect: put cy in retreg