[project @ 1998-06-05 14:43:47 by simonm]
authorsimonm <unknown>
Fri, 5 Jun 1998 14:43:58 +0000 (14:43 +0000)
committersimonm <unknown>
Fri, 5 Jun 1998 14:43:58 +0000 (14:43 +0000)
Import GMP 2.0.2

13 files changed:
ghc/rts/gmp/mpn/clipper/add_n.s [new file with mode: 0644]
ghc/rts/gmp/mpn/clipper/mul_1.s [new file with mode: 0644]
ghc/rts/gmp/mpn/clipper/sub_n.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/add_n.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/hppa1_1/addmul_1.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/hppa1_1/mul_1.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/hppa1_1/pa7100/add_n.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/hppa1_1/pa7100/lshift.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/hppa1_1/submul_1.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/lshift.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/rshift.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/sub_n.s [new file with mode: 0644]
ghc/rts/gmp/mpn/hppa/udiv_qrnnd.s [new file with mode: 0644]

diff --git a/ghc/rts/gmp/mpn/clipper/add_n.s b/ghc/rts/gmp/mpn/clipper/add_n.s
new file mode 100644 (file)
index 0000000..8d9b986
--- /dev/null
@@ -0,0 +1,48 @@
+; Clipper __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+; sum in a third limb vector.
+
+; Copyright (C) 1995 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+.text
+       .align 16
+.globl ___mpn_add_n
+___mpn_add_n:
+       subq    $8,sp
+       storw   r6,(sp)
+       loadw   12(sp),r2
+       loadw   16(sp),r3
+       loadq   $0,r6           ; clear carry-save register
+
+.Loop: loadw   (r1),r4
+       loadw   (r2),r5
+       addwc   r6,r6           ; restore carry from r6
+       addwc   r5,r4
+       storw   r4,(r0)
+       subwc   r6,r6           ; save carry in r6
+       addq    $4,r0
+       addq    $4,r1
+       addq    $4,r2
+       subq    $1,r3
+       brne    .Loop
+
+       negw    r6,r0
+       loadw   (sp),r6
+       addq    $8,sp
+       ret     sp
diff --git a/ghc/rts/gmp/mpn/clipper/mul_1.s b/ghc/rts/gmp/mpn/clipper/mul_1.s
new file mode 100644 (file)
index 0000000..44d92c3
--- /dev/null
@@ -0,0 +1,47 @@
+; Clipper __mpn_mul_1 -- Multiply a limb vector with a limb and store
+; the result in a second limb vector.
+
+; Copyright (C) 1995 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+.text
+       .align  16
+.globl ___mpn_mul_1
+___mpn_mul_1:
+       subq    $8,sp
+       storw   r6,(sp)
+       loadw   12(sp),r2
+       loadw   16(sp),r3
+       loadq   $0,r6           ; clear carry limb
+
+.Loop: loadw   (r1),r4
+       mulwux  r3,r4
+       addw    r6,r4           ; add old carry limb into low product limb
+       loadq   $0,r6
+       addwc   r5,r6           ; propagate cy into high product limb
+       storw   r4,(r0)
+       addq    $4,r0
+       addq    $4,r1
+       subq    $1,r2
+       brne    .Loop
+
+       movw    r6,r0
+       loadw   0(sp),r6
+       addq    $8,sp
+       ret     sp
diff --git a/ghc/rts/gmp/mpn/clipper/sub_n.s b/ghc/rts/gmp/mpn/clipper/sub_n.s
new file mode 100644 (file)
index 0000000..882c991
--- /dev/null
@@ -0,0 +1,48 @@
+; Clipper __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+; store difference in a third limb vector.
+
+; Copyright (C) 1995 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+.text
+       .align 16
+.globl ___mpn_sub_n
+___mpn_sub_n:
+       subq    $8,sp
+       storw   r6,(sp)
+       loadw   12(sp),r2
+       loadw   16(sp),r3
+       loadq   $0,r6           ; clear carry-save register
+
+.Loop: loadw   (r1),r4
+       loadw   (r2),r5
+       addwc   r6,r6           ; restore carry from r6
+       subwc   r5,r4
+       storw   r4,(r0)
+       subwc   r6,r6           ; save carry in r6
+       addq    $4,r0
+       addq    $4,r1
+       addq    $4,r2
+       subq    $1,r3
+       brne    .Loop
+
+       negw    r6,r0
+       loadw   (sp),r6
+       addq    $8,sp
+       ret     sp
diff --git a/ghc/rts/gmp/mpn/hppa/add_n.s b/ghc/rts/gmp/mpn/hppa/add_n.s
new file mode 100644 (file)
index 0000000..b4a1428
--- /dev/null
@@ -0,0 +1,58 @@
+; HP-PA  __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+; sum in a third limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      gr26
+; s1_ptr       gr25
+; s2_ptr       gr24
+; size         gr23
+
+; One might want to unroll this as for other processors, but it turns
+; out that the data cache contention after a store makes such
+; unrolling useless.  We can't come under 5 cycles/limb anyway.
+
+       .code
+       .export         __mpn_add_n
+__mpn_add_n
+       .proc
+       .callinfo       frame=0,no_calls
+       .entry
+
+       ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+
+       addib,=         -1,%r23,L$end   ; check for (SIZE == 1)
+        add            %r20,%r19,%r28  ; add first limbs ignoring cy
+
+L$loop ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+       stws,ma         %r28,4(0,%r26)
+       addib,<>        -1,%r23,L$loop
+        addc           %r20,%r19,%r28
+
+L$end  stws            %r28,0(0,%r26)
+       bv              0(%r2)
+        addc           %r0,%r0,%r28
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/hppa1_1/addmul_1.s b/ghc/rts/gmp/mpn/hppa/hppa1_1/addmul_1.s
new file mode 100644 (file)
index 0000000..0fdcb3c
--- /dev/null
@@ -0,0 +1,102 @@
+; HP-PA-1.1 __mpn_addmul_1 -- Multiply a limb vector with a limb and
+; add the result to a second limb vector.
+
+; Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      r26
+; s1_ptr       r25
+; size         r24
+; s2_limb      r23
+
+; This runs at 11 cycles/limb on a PA7000.  With the used instructions, it
+; can not become faster due to data cache contention after a store.  On the
+; PA7100 it runs at 10 cycles/limb, and that can not be improved either,
+; since only the xmpyu does not need the integer pipeline, so the only
+; dual-issue we will get are addc+xmpyu.  Unrolling could gain a cycle/limb
+; on the PA7100.
+
+; There are some ideas described in mul_1.s that applies to this code too.
+
+       .code
+       .export         __mpn_addmul_1
+__mpn_addmul_1
+       .proc
+       .callinfo       frame=64,no_calls
+       .entry
+
+       ldo             64(%r30),%r30
+       fldws,ma        4(%r25),%fr5
+       stw             %r23,-16(%r30)          ; move s2_limb ...
+       addib,=         -1,%r24,L$just_one_limb
+        fldws          -16(%r30),%fr4          ; ... into fr4
+       add             %r0,%r0,%r0             ; clear carry
+       xmpyu           %fr4,%fr5,%fr6
+       fldws,ma        4(%r25),%fr7
+       fstds           %fr6,-16(%r30)
+       xmpyu           %fr4,%fr7,%fr8
+       ldw             -12(%r30),%r19          ; least significant limb in product
+       ldw             -16(%r30),%r28
+
+       fstds           %fr8,-16(%r30)
+       addib,=         -1,%r24,L$end
+        ldw            -12(%r30),%r1
+
+; Main loop
+L$loop ldws            0(%r26),%r29
+       fldws,ma        4(%r25),%fr5
+       add             %r29,%r19,%r19
+       stws,ma         %r19,4(%r26)
+       addc            %r28,%r1,%r19
+       xmpyu           %fr4,%fr5,%fr6
+       ldw             -16(%r30),%r28
+       fstds           %fr6,-16(%r30)
+       addc            %r0,%r28,%r28
+       addib,<>        -1,%r24,L$loop
+        ldw            -12(%r30),%r1
+
+L$end  ldw             0(%r26),%r29
+       add             %r29,%r19,%r19
+       stws,ma         %r19,4(%r26)
+       addc            %r28,%r1,%r19
+       ldw             -16(%r30),%r28
+       ldws            0(%r26),%r29
+       addc            %r0,%r28,%r28
+       add             %r29,%r19,%r19
+       stws,ma         %r19,4(%r26)
+       addc            %r0,%r28,%r28
+       bv              0(%r2)
+        ldo            -64(%r30),%r30
+
+L$just_one_limb
+       xmpyu           %fr4,%fr5,%fr6
+       ldw             0(%r26),%r29
+       fstds           %fr6,-16(%r30)
+       ldw             -12(%r30),%r1
+       ldw             -16(%r30),%r28
+       add             %r29,%r1,%r19
+       stw             %r19,0(%r26)
+       addc            %r0,%r28,%r28
+       bv              0(%r2)
+        ldo            -64(%r30),%r30
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/hppa1_1/mul_1.s b/ghc/rts/gmp/mpn/hppa/hppa1_1/mul_1.s
new file mode 100644 (file)
index 0000000..cdd0c1d
--- /dev/null
@@ -0,0 +1,98 @@
+; HP-PA-1.1 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+; the result in a second limb vector.
+
+; Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      r26
+; s1_ptr       r25
+; size         r24
+; s2_limb      r23
+
+; This runs at 9 cycles/limb on a PA7000.  With the used instructions, it can
+; not become faster due to data cache contention after a store.  On the
+; PA7100 it runs at 7 cycles/limb, and that can not be improved either, since
+; only the xmpyu does not need the integer pipeline, so the only dual-issue
+; we will get are addc+xmpyu.  Unrolling would not help either CPU.
+
+; We could use fldds to read two limbs at a time from the S1 array, and that
+; could bring down the times to 8.5 and 6.5 cycles/limb for the PA7000 and
+; PA7100, respectively.  We don't do that since it does not seem worth the
+; (alignment) troubles...
+
+; At least the PA7100 is rumored to be able to deal with cache-misses
+; without stalling instruction issue.  If this is true, and the cache is
+; actually also lockup-free, we should use a deeper software pipeline, and
+; load from S1 very early!  (The loads and stores to -12(sp) will surely be
+; in the cache.)
+
+       .code
+       .export         __mpn_mul_1
+__mpn_mul_1
+       .proc
+       .callinfo       frame=64,no_calls
+       .entry
+
+       ldo             64(%r30),%r30
+       fldws,ma        4(%r25),%fr5
+       stw             %r23,-16(%r30)          ; move s2_limb ...
+       addib,=         -1,%r24,L$just_one_limb
+        fldws          -16(%r30),%fr4          ; ... into fr4
+       add             %r0,%r0,%r0             ; clear carry
+       xmpyu           %fr4,%fr5,%fr6
+       fldws,ma        4(%r25),%fr7
+       fstds           %fr6,-16(%r30)
+       xmpyu           %fr4,%fr7,%fr8
+       ldw             -12(%r30),%r19          ; least significant limb in product
+       ldw             -16(%r30),%r28
+
+       fstds           %fr8,-16(%r30)
+       addib,=         -1,%r24,L$end
+        ldw            -12(%r30),%r1
+
+; Main loop
+L$loop fldws,ma        4(%r25),%fr5
+       stws,ma         %r19,4(%r26)
+       addc            %r28,%r1,%r19
+       xmpyu           %fr4,%fr5,%fr6
+       ldw             -16(%r30),%r28
+       fstds           %fr6,-16(%r30)
+       addib,<>        -1,%r24,L$loop
+        ldw            -12(%r30),%r1
+
+L$end  stws,ma         %r19,4(%r26)
+       addc            %r28,%r1,%r19
+       ldw             -16(%r30),%r28
+       stws,ma         %r19,4(%r26)
+       addc            %r0,%r28,%r28
+       bv              0(%r2)
+        ldo            -64(%r30),%r30
+
+L$just_one_limb
+       xmpyu           %fr4,%fr5,%fr6
+       fstds           %fr6,-16(%r30)
+       ldw             -16(%r30),%r28
+       ldo             -64(%r30),%r30
+       bv              0(%r2)
+        fstws          %fr6R,0(%r26)
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/hppa1_1/pa7100/add_n.s b/ghc/rts/gmp/mpn/hppa/hppa1_1/pa7100/add_n.s
new file mode 100644 (file)
index 0000000..21fe161
--- /dev/null
@@ -0,0 +1,75 @@
+; HP-PA  __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+; sum in a third limb vector.
+; This is optimized for the PA7100, where is runs at 4.25 cycles/limb
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      gr26
+; s1_ptr       gr25
+; s2_ptr       gr24
+; size         gr23
+
+       .code
+       .export         __mpn_add_n
+__mpn_add_n
+       .proc
+       .callinfo       frame=0,no_calls
+       .entry
+
+       ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+
+       addib,<=        -5,%r23,L$rest
+        add            %r20,%r19,%r28  ; add first limbs ignoring cy
+
+L$loop ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+       stws,ma         %r28,4(0,%r26)
+       addc            %r20,%r19,%r28
+       ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+       stws,ma         %r28,4(0,%r26)
+       addc            %r20,%r19,%r28
+       ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+       stws,ma         %r28,4(0,%r26)
+       addc            %r20,%r19,%r28
+       ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+       stws,ma         %r28,4(0,%r26)
+       addib,>         -4,%r23,L$loop
+       addc            %r20,%r19,%r28
+
+L$rest addib,=         4,%r23,L$end
+       nop
+L$eloop        ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+       stws,ma         %r28,4(0,%r26)
+       addib,>         -1,%r23,L$eloop
+       addc            %r20,%r19,%r28
+
+L$end  stws            %r28,0(0,%r26)
+       bv              0(%r2)
+        addc           %r0,%r0,%r28
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/hppa1_1/pa7100/lshift.s b/ghc/rts/gmp/mpn/hppa/hppa1_1/pa7100/lshift.s
new file mode 100644 (file)
index 0000000..4c74a50
--- /dev/null
@@ -0,0 +1,83 @@
+; HP-PA  __mpn_lshift --
+; This is optimized for the PA7100, where is runs at 3.25 cycles/limb
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      gr26
+; s_ptr                gr25
+; size         gr24
+; cnt          gr23
+
+       .code
+       .export         __mpn_lshift
+__mpn_lshift
+       .proc
+       .callinfo       frame=64,no_calls
+       .entry
+
+       sh2add          %r24,%r25,%r25
+       sh2add          %r24,%r26,%r26
+       ldws,mb         -4(0,%r25),%r22
+       subi            32,%r23,%r1
+       mtsar           %r1
+       addib,=         -1,%r24,L$0004
+       vshd            %r0,%r22,%r28           ; compute carry out limb
+       ldws,mb         -4(0,%r25),%r29
+       addib,<=        -5,%r24,L$rest
+       vshd            %r22,%r29,%r20
+
+L$loop ldws,mb         -4(0,%r25),%r22
+       stws,mb         %r20,-4(0,%r26)
+       vshd            %r29,%r22,%r20
+       ldws,mb         -4(0,%r25),%r29
+       stws,mb         %r20,-4(0,%r26)
+       vshd            %r22,%r29,%r20
+       ldws,mb         -4(0,%r25),%r22
+       stws,mb         %r20,-4(0,%r26)
+       vshd            %r29,%r22,%r20
+       ldws,mb         -4(0,%r25),%r29
+       stws,mb         %r20,-4(0,%r26)
+       addib,>         -4,%r24,L$loop
+       vshd            %r22,%r29,%r20
+
+L$rest addib,=         4,%r24,L$end1
+       nop
+L$eloop        ldws,mb         -4(0,%r25),%r22
+       stws,mb         %r20,-4(0,%r26)
+       addib,<=        -1,%r24,L$end2
+       vshd            %r29,%r22,%r20
+       ldws,mb         -4(0,%r25),%r29
+       stws,mb         %r20,-4(0,%r26)
+       addib,>         -1,%r24,L$eloop
+       vshd            %r22,%r29,%r20
+
+L$end1 stws,mb         %r20,-4(0,%r26)
+       vshd            %r29,%r0,%r20
+       bv              0(%r2)
+       stw             %r20,-4(0,%r26)
+L$end2 stws,mb         %r20,-4(0,%r26)
+L$0004 vshd            %r22,%r0,%r20
+       bv              0(%r2)
+       stw             %r20,-4(0,%r26)
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/hppa1_1/submul_1.s b/ghc/rts/gmp/mpn/hppa/hppa1_1/submul_1.s
new file mode 100644 (file)
index 0000000..a4a3854
--- /dev/null
@@ -0,0 +1,111 @@
+; HP-PA-1.1 __mpn_submul_1 -- Multiply a limb vector with a limb and
+; subtract the result from a second limb vector.
+
+; Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      r26
+; s1_ptr       r25
+; size         r24
+; s2_limb      r23
+
+; This runs at 12 cycles/limb on a PA7000.  With the used instructions, it
+; can not become faster due to data cache contention after a store.  On the
+; PA7100 it runs at 11 cycles/limb, and that can not be improved either,
+; since only the xmpyu does not need the integer pipeline, so the only
+; dual-issue we will get are addc+xmpyu.  Unrolling could gain a cycle/limb
+; on the PA7100.
+
+; There are some ideas described in mul_1.s that applies to this code too.
+
+; It seems possible to make this run as fast as __mpn_addmul_1, if we use
+;      sub,>>= %r29,%r19,%r22
+;      addi    1,%r28,%r28
+; but that requires reworking the hairy software pipeline...
+
+       .code
+       .export         __mpn_submul_1
+__mpn_submul_1
+       .proc
+       .callinfo       frame=64,no_calls
+       .entry
+
+       ldo             64(%r30),%r30
+       fldws,ma        4(%r25),%fr5
+       stw             %r23,-16(%r30)          ; move s2_limb ...
+       addib,=         -1,%r24,L$just_one_limb
+        fldws          -16(%r30),%fr4          ; ... into fr4
+       add             %r0,%r0,%r0             ; clear carry
+       xmpyu           %fr4,%fr5,%fr6
+       fldws,ma        4(%r25),%fr7
+       fstds           %fr6,-16(%r30)
+       xmpyu           %fr4,%fr7,%fr8
+       ldw             -12(%r30),%r19          ; least significant limb in product
+       ldw             -16(%r30),%r28
+
+       fstds           %fr8,-16(%r30)
+       addib,=         -1,%r24,L$end
+        ldw            -12(%r30),%r1
+
+; Main loop
+L$loop ldws            0(%r26),%r29
+       fldws,ma        4(%r25),%fr5
+       sub             %r29,%r19,%r22
+       add             %r22,%r19,%r0
+       stws,ma         %r22,4(%r26)
+       addc            %r28,%r1,%r19
+       xmpyu           %fr4,%fr5,%fr6
+       ldw             -16(%r30),%r28
+       fstds           %fr6,-16(%r30)
+       addc            %r0,%r28,%r28
+       addib,<>        -1,%r24,L$loop
+        ldw            -12(%r30),%r1
+
+L$end  ldw             0(%r26),%r29
+       sub             %r29,%r19,%r22
+       add             %r22,%r19,%r0
+       stws,ma         %r22,4(%r26)
+       addc            %r28,%r1,%r19
+       ldw             -16(%r30),%r28
+       ldws            0(%r26),%r29
+       addc            %r0,%r28,%r28
+       sub             %r29,%r19,%r22
+       add             %r22,%r19,%r0
+       stws,ma         %r22,4(%r26)
+       addc            %r0,%r28,%r28
+       bv              0(%r2)
+        ldo            -64(%r30),%r30
+
+L$just_one_limb
+       xmpyu           %fr4,%fr5,%fr6
+       ldw             0(%r26),%r29
+       fstds           %fr6,-16(%r30)
+       ldw             -12(%r30),%r1
+       ldw             -16(%r30),%r28
+       sub             %r29,%r1,%r22
+       add             %r22,%r1,%r0
+       stw             %r22,0(%r26)
+       addc            %r0,%r28,%r28
+       bv              0(%r2)
+        ldo            -64(%r30),%r30
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/lshift.s b/ghc/rts/gmp/mpn/hppa/lshift.s
new file mode 100644 (file)
index 0000000..abac6ec
--- /dev/null
@@ -0,0 +1,66 @@
+; HP-PA  __mpn_lshift --
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      gr26
+; s_ptr                gr25
+; size         gr24
+; cnt          gr23
+
+       .code
+       .export         __mpn_lshift
+__mpn_lshift
+       .proc
+       .callinfo       frame=64,no_calls
+       .entry
+
+       sh2add          %r24,%r25,%r25
+       sh2add          %r24,%r26,%r26
+       ldws,mb         -4(0,%r25),%r22
+       subi            32,%r23,%r1
+       mtsar           %r1
+       addib,=         -1,%r24,L$0004
+       vshd            %r0,%r22,%r28           ; compute carry out limb
+       ldws,mb         -4(0,%r25),%r29
+       addib,=         -1,%r24,L$0002
+       vshd            %r22,%r29,%r20
+
+L$loop ldws,mb         -4(0,%r25),%r22
+       stws,mb         %r20,-4(0,%r26)
+       addib,=         -1,%r24,L$0003
+       vshd            %r29,%r22,%r20
+       ldws,mb         -4(0,%r25),%r29
+       stws,mb         %r20,-4(0,%r26)
+       addib,<>        -1,%r24,L$loop
+       vshd            %r22,%r29,%r20
+
+L$0002 stws,mb         %r20,-4(0,%r26)
+       vshd            %r29,%r0,%r20
+       bv              0(%r2)
+       stw             %r20,-4(0,%r26)
+L$0003 stws,mb         %r20,-4(0,%r26)
+L$0004 vshd            %r22,%r0,%r20
+       bv              0(%r2)
+       stw             %r20,-4(0,%r26)
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/rshift.s b/ghc/rts/gmp/mpn/hppa/rshift.s
new file mode 100644 (file)
index 0000000..c1480e5
--- /dev/null
@@ -0,0 +1,63 @@
+; HP-PA  __mpn_rshift -- 
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      gr26
+; s_ptr                gr25
+; size         gr24
+; cnt          gr23
+
+       .code
+       .export         __mpn_rshift
+__mpn_rshift
+       .proc
+       .callinfo       frame=64,no_calls
+       .entry
+
+       ldws,ma         4(0,%r25),%r22
+       mtsar           %r23
+       addib,=         -1,%r24,L$0004
+       vshd            %r22,%r0,%r28           ; compute carry out limb
+       ldws,ma         4(0,%r25),%r29
+       addib,=         -1,%r24,L$0002
+       vshd            %r29,%r22,%r20
+
+L$loop ldws,ma         4(0,%r25),%r22
+       stws,ma         %r20,4(0,%r26)
+       addib,=         -1,%r24,L$0003
+       vshd            %r22,%r29,%r20
+       ldws,ma         4(0,%r25),%r29
+       stws,ma         %r20,4(0,%r26)
+       addib,<>        -1,%r24,L$loop
+       vshd            %r29,%r22,%r20
+
+L$0002 stws,ma         %r20,4(0,%r26)
+       vshd            %r0,%r29,%r20
+       bv              0(%r2)
+       stw             %r20,0(0,%r26)
+L$0003 stws,ma         %r20,4(0,%r26)
+L$0004 vshd            %r0,%r22,%r20
+       bv              0(%r2)
+       stw             %r20,0(0,%r26)
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/sub_n.s b/ghc/rts/gmp/mpn/hppa/sub_n.s
new file mode 100644 (file)
index 0000000..04fa3e1
--- /dev/null
@@ -0,0 +1,59 @@
+; HP-PA  __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+; store difference in a third limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr      gr26
+; s1_ptr       gr25
+; s2_ptr       gr24
+; size         gr23
+
+; One might want to unroll this as for other processors, but it turns
+; out that the data cache contention after a store makes such
+; unrolling useless.  We can't come under 5 cycles/limb anyway.
+
+       .code
+       .export         __mpn_sub_n
+__mpn_sub_n
+       .proc
+       .callinfo       frame=0,no_calls
+       .entry
+
+       ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+
+       addib,=         -1,%r23,L$end   ; check for (SIZE == 1)
+        sub            %r20,%r19,%r28  ; subtract first limbs ignoring cy
+
+L$loop ldws,ma         4(0,%r25),%r20
+       ldws,ma         4(0,%r24),%r19
+       stws,ma         %r28,4(0,%r26)
+       addib,<>        -1,%r23,L$loop
+        subb           %r20,%r19,%r28
+
+L$end  stws            %r28,0(0,%r26)
+       addc            %r0,%r0,%r28
+       bv              0(%r2)
+        subi           1,%r28,%r28
+
+       .exit
+       .procend
diff --git a/ghc/rts/gmp/mpn/hppa/udiv_qrnnd.s b/ghc/rts/gmp/mpn/hppa/udiv_qrnnd.s
new file mode 100644 (file)
index 0000000..9b45eb4
--- /dev/null
@@ -0,0 +1,286 @@
+; HP-PA  __udiv_qrnnd division support, used from longlong.h.
+; This version runs fast on pre-PA7000 CPUs.
+
+; Copyright (C) 1993, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+; MA 02111-1307, USA.
+
+
+; INPUT PARAMETERS
+; rem_ptr      gr26
+; n1           gr25
+; n0           gr24
+; d            gr23
+
+; The code size is a bit excessive.  We could merge the last two ds;addc
+; sequences by simply moving the "bb,< Odd" instruction down.  The only
+; trouble is the FFFFFFFF code that would need some hacking.
+
+       .code
+       .export         __udiv_qrnnd
+__udiv_qrnnd
+       .proc
+       .callinfo       frame=0,no_calls
+       .entry
+
+       comb,<          %r23,0,L$largedivisor
+        sub            %r0,%r23,%r1            ; clear cy as side-effect
+       ds              %r0,%r1,%r0
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r23,%r25
+       addc            %r24,%r24,%r28
+       ds              %r25,%r23,%r25
+       comclr,>=       %r25,%r0,%r0
+       addl            %r25,%r23,%r25
+       stws            %r25,0(0,%r26)
+       bv              0(%r2)
+        addc           %r28,%r28,%r28
+
+L$largedivisor
+       extru           %r24,31,1,%r19          ; r19 = n0 & 1
+       bb,<            %r23,31,L$odd
+        extru          %r23,30,31,%r22         ; r22 = d >> 1
+       shd             %r25,%r24,1,%r24        ; r24 = new n0
+       extru           %r25,30,31,%r25         ; r25 = new n1
+       sub             %r0,%r22,%r21
+       ds              %r0,%r21,%r0
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       comclr,>=       %r25,%r0,%r0
+       addl            %r25,%r22,%r25
+       sh1addl         %r25,%r19,%r25
+       stws            %r25,0(0,%r26)
+       bv              0(%r2)
+        addc           %r24,%r24,%r28
+
+L$odd  addib,sv,n      1,%r22,L$FF..           ; r22 = (d / 2 + 1)
+       shd             %r25,%r24,1,%r24        ; r24 = new n0
+       extru           %r25,30,31,%r25         ; r25 = new n1
+       sub             %r0,%r22,%r21
+       ds              %r0,%r21,%r0
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r24
+       ds              %r25,%r22,%r25
+       addc            %r24,%r24,%r28
+       comclr,>=       %r25,%r0,%r0
+       addl            %r25,%r22,%r25
+       sh1addl         %r25,%r19,%r25
+; We have computed (n1,,n0) / (d + 1), q' = r28, r' = r25
+       add,nuv         %r28,%r25,%r25
+       addl            %r25,%r1,%r25
+       addc            %r0,%r28,%r28
+       sub,<<          %r25,%r23,%r0
+       addl            %r25,%r1,%r25
+       stws            %r25,0(0,%r26)
+       bv              0(%r2)
+        addc           %r0,%r28,%r28
+
+; This is just a special case of the code above.
+; We come here when d == 0xFFFFFFFF
+L$FF.. add,uv          %r25,%r24,%r24
+       sub,<<          %r24,%r23,%r0
+       ldo             1(%r24),%r24
+       stws            %r24,0(0,%r26)
+       bv              0(%r2)
+        addc           %r0,%r25,%r28
+
+       .exit
+       .procend