1/* SPARC v8 __mpn_addmul_1 -- Multiply a limb vector with a limb and
2 *                            add the result to a second limb vector.
3 *
4 *      Copyright (C) 1992, 1993, 1994, 1995, 1998,
5 *                    2001, 2002 Free Software Foundation, Inc.
6 *
7 * This file is part of Libgcrypt.
8 *
9 * Libgcrypt is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as
11 * published by the Free Software Foundation; either version 2.1 of
12 * the License, or (at your option) any later version.
13 *
14 * Libgcrypt is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
22 *
23 * Note: This code is heavily based on the GNU MP Library.
24 *	 Actually it's the same code with only minor changes in the
25 *	 way the data is stored; this is to support the abstraction
26 *	 of an optional secure memory allocation which may be used
27 *	 to avoid revealing of sensitive data due to paging etc.
28 */
29
30
31
32! INPUT PARAMETERS
33! res_ptr	o0
34! s1_ptr	o1
35! size		o2
36! s2_limb	o3
37
38#include "sysdep.h"
39
40.text
41	.align 4
42	.global C_SYMBOL_NAME(_gcry_mpih_addmul_1)
43C_SYMBOL_NAME(_gcry_mpih_addmul_1):
44	orcc	%g0,%g0,%g2
45	ld	[%o1+0],%o4	! 1
46
47	sll	%o2,4,%g1
48	and	%g1,(4-1)<<4,%g1
49#if PIC
50	mov	%o7,%g4 		! Save return address register
51	call	1f
52	add	%o7,LL-1f,%g3
531:	mov	%g4,%o7 		! Restore return address register
54#else
55	sethi	%hi(LL),%g3
56	or	%g3,%lo(LL),%g3
57#endif
58	jmp	%g3+%g1
59	nop
60LL:
61LL00:	add	%o0,-4,%o0
62	b	Loop00		/* 4, 8, 12, ... */
63	add	%o1,-4,%o1
64	nop
65LL01:	b	Loop01		/* 1, 5, 9, ... */
66	nop
67	nop
68	nop
69LL10:	add	%o0,-12,%o0	/* 2, 6, 10, ... */
70	b	Loop10
71	add	%o1,4,%o1
72	nop
73LL11:	add	%o0,-8,%o0	/* 3, 7, 11, ... */
74	b	Loop11
75	add	%o1,-8,%o1
76	nop
77
781:	addcc	%g3,%g2,%g3	! 1
79	ld	[%o1+4],%o4	! 2
80	rd	%y,%g2		! 1
81	addx	%g0,%g2,%g2
82	ld	[%o0+0],%g1	! 2
83	addcc	%g1,%g3,%g3
84	st	%g3,[%o0+0]	! 1
85Loop00: umul	%o4,%o3,%g3	! 2
86	ld	[%o0+4],%g1	! 2
87	addxcc	%g3,%g2,%g3	! 2
88	ld	[%o1+8],%o4	! 3
89	rd	%y,%g2		! 2
90	addx	%g0,%g2,%g2
91	nop
92	addcc	%g1,%g3,%g3
93	st	%g3,[%o0+4]	! 2
94Loop11: umul	%o4,%o3,%g3	! 3
95	addxcc	%g3,%g2,%g3	! 3
96	ld	[%o1+12],%o4	! 4
97	rd	%y,%g2		! 3
98	add	%o1,16,%o1
99	addx	%g0,%g2,%g2
100	ld	[%o0+8],%g1	! 2
101	addcc	%g1,%g3,%g3
102	st	%g3,[%o0+8]	! 3
103Loop10: umul	%o4,%o3,%g3	! 4
104	addxcc	%g3,%g2,%g3	! 4
105	ld	[%o1+0],%o4	! 1
106	rd	%y,%g2		! 4
107	addx	%g0,%g2,%g2
108	ld	[%o0+12],%g1	! 2
109	addcc	%g1,%g3,%g3
110	st	%g3,[%o0+12]	! 4
111	add	%o0,16,%o0
112	addx	%g0,%g2,%g2
113Loop01: addcc	%o2,-4,%o2
114	bg	1b
115	umul	%o4,%o3,%g3	! 1
116
117	addcc	%g3,%g2,%g3	! 4
118	rd	%y,%g2		! 4
119	addx	%g0,%g2,%g2
120	ld	[%o0+0],%g1	! 2
121	addcc	%g1,%g3,%g3
122	st	%g3,[%o0+0]	! 4
123	addx	%g0,%g2,%o0
124
125	retl
126	 nop
127
128
129!	umul, ld, addxcc, rd, st
130
131!	umul, ld, addxcc, rd, ld, addcc, st, addx
132
133