1/* SPARC v8 __mpn_mul_1 -- Multiply a limb vector with a single limb and 2 * store the product in a second limb vector. 3 * 4 * Copyright (C) 1992, 1994, 1995, 1998, 5 * 2001, 2002 Free Software Foundation, Inc. 6 * 7 * This file is part of Libgcrypt. 8 * 9 * Libgcrypt is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU Lesser General Public License as 11 * published by the Free Software Foundation; either version 2.1 of 12 * the License, or (at your option) any later version. 13 * 14 * Libgcrypt is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 22 * 23 * Note: This code is heavily based on the GNU MP Library. 24 * Actually it's the same code with only minor changes in the 25 * way the data is stored; this is to support the abstraction 26 * of an optional secure memory allocation which may be used 27 * to avoid revealing of sensitive data due to paging etc. 28 */ 29 30 31 32! INPUT PARAMETERS 33! res_ptr o0 34! s1_ptr o1 35! size o2 36! s2_limb o3 37 38#include "sysdep.h" 39 40.text 41 .align 8 42 .global C_SYMBOL_NAME(_gcry_mpih_mul_1) 43C_SYMBOL_NAME(_gcry_mpih_mul_1): 44 sll %o2,4,%g1 45 and %g1,(4-1)<<4,%g1 46#if PIC 47 mov %o7,%g4 ! Save return address register 48 call 1f 49 add %o7,LL-1f,%g3 501: mov %g4,%o7 ! Restore return address register 51#else 52 sethi %hi(LL),%g3 53 or %g3,%lo(LL),%g3 54#endif 55 jmp %g3+%g1 56 ld [%o1+0],%o4 ! 1 57LL: 58LL00: add %o0,-4,%o0 59 add %o1,-4,%o1 60 b Loop00 /* 4, 8, 12, ... */ 61 orcc %g0,%g0,%g2 62LL01: b Loop01 /* 1, 5, 9, ... */ 63 orcc %g0,%g0,%g2 64 nop 65 nop 66LL10: add %o0,-12,%o0 /* 2, 6, 10, ... */ 67 add %o1,4,%o1 68 b Loop10 69 orcc %g0,%g0,%g2 70 nop 71LL11: add %o0,-8,%o0 /* 3, 7, 11, ... */ 72 add %o1,-8,%o1 73 b Loop11 74 orcc %g0,%g0,%g2 75 76Loop: addcc %g3,%g2,%g3 ! 1 77 ld [%o1+4],%o4 ! 2 78 st %g3,[%o0+0] ! 1 79 rd %y,%g2 ! 1 80Loop00: umul %o4,%o3,%g3 ! 2 81 addxcc %g3,%g2,%g3 ! 2 82 ld [%o1+8],%o4 ! 3 83 st %g3,[%o0+4] ! 2 84 rd %y,%g2 ! 2 85Loop11: umul %o4,%o3,%g3 ! 3 86 addxcc %g3,%g2,%g3 ! 3 87 ld [%o1+12],%o4 ! 4 88 add %o1,16,%o1 89 st %g3,[%o0+8] ! 3 90 rd %y,%g2 ! 3 91Loop10: umul %o4,%o3,%g3 ! 4 92 addxcc %g3,%g2,%g3 ! 4 93 ld [%o1+0],%o4 ! 1 94 st %g3,[%o0+12] ! 4 95 add %o0,16,%o0 96 rd %y,%g2 ! 4 97 addx %g0,%g2,%g2 98Loop01: addcc %o2,-4,%o2 99 bg Loop 100 umul %o4,%o3,%g3 ! 1 101 102 addcc %g3,%g2,%g3 ! 4 103 st %g3,[%o0+0] ! 4 104 rd %y,%g2 ! 4 105 106 retl 107 addx %g0,%g2,%o0 108 109 110