1/* hppa1.1 submul_1 -- Multiply a limb vector with a limb and add 2 * the result to a second limb vector. 3 * 4 * Copyright (C) 1992, 1993, 1994, 1998, 5 * 2001, 2002 Free Software Foundation, Inc. 6 * 7 * This file is part of Libgcrypt. 8 * 9 * Libgcrypt is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU Lesser General Public License as 11 * published by the Free Software Foundation; either version 2.1 of 12 * the License, or (at your option) any later version. 13 * 14 * Libgcrypt is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 22 * 23 * Note: This code is heavily based on the GNU MP Library. 24 * Actually it's the same code with only minor changes in the 25 * way the data is stored; this is to support the abstraction 26 * of an optional secure memory allocation which may be used 27 * to avoid revealing of sensitive data due to paging etc. 28 */ 29 30 31/******************* 32 * mpi_limb_t 33 * _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (r26) 34 * mpi_ptr_t s1_ptr, (r25) 35 * mpi_size_t s1_size, (r24) 36 * mpi_limb_t s2_limb) (r23) 37 * 38 * 39 * This runs at 12 cycles/limb on a PA7000. With the used instructions, it 40 * can not become faster due to data cache contention after a store. On the 41 * PA7100 it runs at 11 cycles/limb, and that can not be improved either, 42 * since only the xmpyu does not need the integer pipeline, so the only 43 * dual-issue we will get are addc+xmpyu. Unrolling could gain a cycle/limb 44 * on the PA7100. 45 * 46 * There are some ideas described in mul1.S that applies to this code too. 47 * 48 * It seems possible to make this run as fast as addmul_1, if we use 49 * sub,>>= %r29,%r19,%r22 50 * addi 1,%r28,%r28 51 * but that requires reworking the hairy software pipeline... 52 */ 53 54 .level 1.1 55 56 .code 57 .export _gcry_mpih_submul_1 58 .label _gcry_mpih_submul_1 59 .proc 60 .callinfo frame=64,no_calls 61 .entry 62 63 ldo 64(%r30),%r30 64 fldws,ma 4(%r25),%fr5 65 stw %r23,-16(%r30) ; move s2_limb ... 66 addib,= -1,%r24,L$just_one_limb 67 fldws -16(%r30),%fr4 ; ... into fr4 68 add %r0,%r0,%r0 ; clear carry 69 xmpyu %fr4,%fr5,%fr6 70 fldws,ma 4(%r25),%fr7 71 fstds %fr6,-16(%r30) 72 xmpyu %fr4,%fr7,%fr8 73 ldw -12(%r30),%r19 ; least significant limb in product 74 ldw -16(%r30),%r28 75 76 fstds %fr8,-16(%r30) 77 addib,= -1,%r24,L$end 78 ldw -12(%r30),%r1 79 80; Main loop 81 .label L$loop 82 ldws 0(%r26),%r29 83 fldws,ma 4(%r25),%fr5 84 sub %r29,%r19,%r22 85 add %r22,%r19,%r0 86 stws,ma %r22,4(%r26) 87 addc %r28,%r1,%r19 88 xmpyu %fr4,%fr5,%fr6 89 ldw -16(%r30),%r28 90 fstds %fr6,-16(%r30) 91 addc %r0,%r28,%r28 92 addib,<> -1,%r24,L$loop 93 ldw -12(%r30),%r1 94 95 .label L$end 96 ldw 0(%r26),%r29 97 sub %r29,%r19,%r22 98 add %r22,%r19,%r0 99 stws,ma %r22,4(%r26) 100 addc %r28,%r1,%r19 101 ldw -16(%r30),%r28 102 ldws 0(%r26),%r29 103 addc %r0,%r28,%r28 104 sub %r29,%r19,%r22 105 add %r22,%r19,%r0 106 stws,ma %r22,4(%r26) 107 addc %r0,%r28,%r28 108 bv 0(%r2) 109 ldo -64(%r30),%r30 110 111 .label L$just_one_limb 112 xmpyu %fr4,%fr5,%fr6 113 ldw 0(%r26),%r29 114 fstds %fr6,-16(%r30) 115 ldw -12(%r30),%r1 116 ldw -16(%r30),%r28 117 sub %r29,%r1,%r22 118 add %r22,%r1,%r0 119 stw %r22,0(%r26) 120 addc %r0,%r28,%r28 121 bv 0(%r2) 122 ldo -64(%r30),%r30 123 124 .exit 125 .procend 126 127