1dnl SPARC v8 mpn_mul_1 -- Multiply a limb vector with a single limb and 2dnl store the product in a second limb vector. 3 4dnl Copyright 1992, 1994, 1995, 2000 Free Software Foundation, Inc. 5 6dnl This file is part of the GNU MP Library. 7 8dnl The GNU MP Library is free software; you can redistribute it and/or modify 9dnl it under the terms of the GNU Lesser General Public License as published 10dnl by the Free Software Foundation; either version 3 of the License, or (at 11dnl your option) any later version. 12 13dnl The GNU MP Library is distributed in the hope that it will be useful, but 14dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 16dnl License for more details. 17 18dnl You should have received a copy of the GNU Lesser General Public License 19dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. 20 21 22include(`../config.m4') 23 24C INPUT PARAMETERS 25C res_ptr o0 26C s1_ptr o1 27C size o2 28C s2_limb o3 29 30ASM_START() 31PROLOGUE(mpn_mul_1) 32 sll %o2,4,%g1 33 and %g1,(4-1)<<4,%g1 34ifdef(`PIC', 35` mov %o7,%g4 C Save return address register 360: call 1f 37 add %o7,L(1)-0b,%g3 381: mov %g4,%o7 C Restore return address register 39', 40` sethi %hi(L(1)),%g3 41 or %g3,%lo(L(1)),%g3 42') 43 jmp %g3+%g1 44 ld [%o1+0],%o4 C 1 45L(1): 46L(L00): add %o0,-4,%o0 47 add %o1,-4,%o1 48 b L(loop00) C 4, 8, 12, ... 49 orcc %g0,%g0,%g2 50L(L01): b L(loop01) C 1, 5, 9, ... 51 orcc %g0,%g0,%g2 52 nop 53 nop 54L(L10): add %o0,-12,%o0 C 2, 6, 10, ... 55 add %o1,4,%o1 56 b L(loop10) 57 orcc %g0,%g0,%g2 58 nop 59L(L11): add %o0,-8,%o0 C 3, 7, 11, ... 60 add %o1,-8,%o1 61 b L(loop11) 62 orcc %g0,%g0,%g2 63 64L(loop): 65 addcc %g3,%g2,%g3 C 1 66 ld [%o1+4],%o4 C 2 67 st %g3,[%o0+0] C 1 68 rd %y,%g2 C 1 69L(loop00): 70 umul %o4,%o3,%g3 C 2 71 addxcc %g3,%g2,%g3 C 2 72 ld [%o1+8],%o4 C 3 73 st %g3,[%o0+4] C 2 74 rd %y,%g2 C 2 75L(loop11): 76 umul %o4,%o3,%g3 C 3 77 addxcc %g3,%g2,%g3 C 3 78 ld [%o1+12],%o4 C 4 79 add %o1,16,%o1 80 st %g3,[%o0+8] C 3 81 rd %y,%g2 C 3 82L(loop10): 83 umul %o4,%o3,%g3 C 4 84 addxcc %g3,%g2,%g3 C 4 85 ld [%o1+0],%o4 C 1 86 st %g3,[%o0+12] C 4 87 add %o0,16,%o0 88 rd %y,%g2 C 4 89 addx %g0,%g2,%g2 90L(loop01): 91 addcc %o2,-4,%o2 92 bg L(loop) 93 umul %o4,%o3,%g3 C 1 94 95 addcc %g3,%g2,%g3 C 4 96 st %g3,[%o0+0] C 4 97 rd %y,%g2 C 4 98 99 retl 100 addx %g0,%g2,%o0 101EPILOGUE(mpn_mul_1) 102