armv4-gf2m.pl revision 305152
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# May 2011
11#
12# The module implements bn_GF2m_mul_2x2 polynomial multiplication
13# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
14# C for the time being... Except that it has two code paths: pure
15# integer code suitable for any ARMv4 and later CPU and NEON code
16# suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
17# in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
18# faster than compiler-generated code. For ECDH and ECDSA verify (but
19# not for ECDSA sign) it means 25%-45% improvement depending on key
20# length, more for longer keys. Even though NEON 1x1 multiplication
21# runs in even less cycles, ~30, improvement is measurable only on
22# longer keys. One has to optimize code elsewhere to get NEON glow...
23#
24# April 2014
25#
26# Double bn_GF2m_mul_2x2 performance by using algorithm from paper
27# referred below, which improves ECDH and ECDSA verify benchmarks
28# by 18-40%.
29#
30# C��mara, D.; Gouv��a, C. P. L.; L��pez, J. & Dahab, R.: Fast Software
31# Polynomial Multiplication on ARM Processors using the NEON Engine.
32#
33# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
34
35while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
36open STDOUT,">$output";
37
38$code=<<___;
39#include "arm_arch.h"
40
41.text
42.code	32
43___
44################
45# private interface to mul_1x1_ialu
46#
47$a="r1";
48$b="r0";
49
50($a0,$a1,$a2,$a12,$a4,$a14)=
51($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
52
53$mask="r12";
54
55$code.=<<___;
56.type	mul_1x1_ialu,%function
57.align	5
58mul_1x1_ialu:
59	mov	$a0,#0
60	bic	$a1,$a,#3<<30		@ a1=a&0x3fffffff
61	str	$a0,[sp,#0]		@ tab[0]=0
62	add	$a2,$a1,$a1		@ a2=a1<<1
63	str	$a1,[sp,#4]		@ tab[1]=a1
64	eor	$a12,$a1,$a2		@ a1^a2
65	str	$a2,[sp,#8]		@ tab[2]=a2
66	mov	$a4,$a1,lsl#2		@ a4=a1<<2
67	str	$a12,[sp,#12]		@ tab[3]=a1^a2
68	eor	$a14,$a1,$a4		@ a1^a4
69	str	$a4,[sp,#16]		@ tab[4]=a4
70	eor	$a0,$a2,$a4		@ a2^a4
71	str	$a14,[sp,#20]		@ tab[5]=a1^a4
72	eor	$a12,$a12,$a4		@ a1^a2^a4
73	str	$a0,[sp,#24]		@ tab[6]=a2^a4
74	and	$i0,$mask,$b,lsl#2
75	str	$a12,[sp,#28]		@ tab[7]=a1^a2^a4
76
77	and	$i1,$mask,$b,lsr#1
78	ldr	$lo,[sp,$i0]		@ tab[b       & 0x7]
79	and	$i0,$mask,$b,lsr#4
80	ldr	$t1,[sp,$i1]		@ tab[b >>  3 & 0x7]
81	and	$i1,$mask,$b,lsr#7
82	ldr	$t0,[sp,$i0]		@ tab[b >>  6 & 0x7]
83	eor	$lo,$lo,$t1,lsl#3	@ stall
84	mov	$hi,$t1,lsr#29
85	ldr	$t1,[sp,$i1]		@ tab[b >>  9 & 0x7]
86
87	and	$i0,$mask,$b,lsr#10
88	eor	$lo,$lo,$t0,lsl#6
89	eor	$hi,$hi,$t0,lsr#26
90	ldr	$t0,[sp,$i0]		@ tab[b >> 12 & 0x7]
91
92	and	$i1,$mask,$b,lsr#13
93	eor	$lo,$lo,$t1,lsl#9
94	eor	$hi,$hi,$t1,lsr#23
95	ldr	$t1,[sp,$i1]		@ tab[b >> 15 & 0x7]
96
97	and	$i0,$mask,$b,lsr#16
98	eor	$lo,$lo,$t0,lsl#12
99	eor	$hi,$hi,$t0,lsr#20
100	ldr	$t0,[sp,$i0]		@ tab[b >> 18 & 0x7]
101
102	and	$i1,$mask,$b,lsr#19
103	eor	$lo,$lo,$t1,lsl#15
104	eor	$hi,$hi,$t1,lsr#17
105	ldr	$t1,[sp,$i1]		@ tab[b >> 21 & 0x7]
106
107	and	$i0,$mask,$b,lsr#22
108	eor	$lo,$lo,$t0,lsl#18
109	eor	$hi,$hi,$t0,lsr#14
110	ldr	$t0,[sp,$i0]		@ tab[b >> 24 & 0x7]
111
112	and	$i1,$mask,$b,lsr#25
113	eor	$lo,$lo,$t1,lsl#21
114	eor	$hi,$hi,$t1,lsr#11
115	ldr	$t1,[sp,$i1]		@ tab[b >> 27 & 0x7]
116
117	tst	$a,#1<<30
118	and	$i0,$mask,$b,lsr#28
119	eor	$lo,$lo,$t0,lsl#24
120	eor	$hi,$hi,$t0,lsr#8
121	ldr	$t0,[sp,$i0]		@ tab[b >> 30      ]
122
123	eorne	$lo,$lo,$b,lsl#30
124	eorne	$hi,$hi,$b,lsr#2
125	tst	$a,#1<<31
126	eor	$lo,$lo,$t1,lsl#27
127	eor	$hi,$hi,$t1,lsr#5
128	eorne	$lo,$lo,$b,lsl#31
129	eorne	$hi,$hi,$b,lsr#1
130	eor	$lo,$lo,$t0,lsl#30
131	eor	$hi,$hi,$t0,lsr#2
132
133	mov	pc,lr
134.size	mul_1x1_ialu,.-mul_1x1_ialu
135___
136################
137# void	bn_GF2m_mul_2x2(BN_ULONG *r,
138#	BN_ULONG a1,BN_ULONG a0,
139#	BN_ULONG b1,BN_ULONG b0);	# r[3..0]=a1a0��b1b0
140{
141$code.=<<___;
142.global	bn_GF2m_mul_2x2
143.type	bn_GF2m_mul_2x2,%function
144.align	5
145bn_GF2m_mul_2x2:
146#if __ARM_MAX_ARCH__>=7
147	ldr	r12,.LOPENSSL_armcap
148.Lpic:	ldr	r12,[pc,r12]
149	tst	r12,#1
150	bne	.LNEON
151#endif
152___
153$ret="r10";	# reassigned 1st argument
154$code.=<<___;
155	stmdb	sp!,{r4-r10,lr}
156	mov	$ret,r0			@ reassign 1st argument
157	mov	$b,r3			@ $b=b1
158	ldr	r3,[sp,#32]		@ load b0
159	mov	$mask,#7<<2
160	sub	sp,sp,#32		@ allocate tab[8]
161
162	bl	mul_1x1_ialu		@ a1��b1
163	str	$lo,[$ret,#8]
164	str	$hi,[$ret,#12]
165
166	eor	$b,$b,r3		@ flip b0 and b1
167	 eor	$a,$a,r2		@ flip a0 and a1
168	eor	r3,r3,$b
169	 eor	r2,r2,$a
170	eor	$b,$b,r3
171	 eor	$a,$a,r2
172	bl	mul_1x1_ialu		@ a0��b0
173	str	$lo,[$ret]
174	str	$hi,[$ret,#4]
175
176	eor	$a,$a,r2
177	eor	$b,$b,r3
178	bl	mul_1x1_ialu		@ (a1+a0)��(b1+b0)
179___
180@r=map("r$_",(6..9));
181$code.=<<___;
182	ldmia	$ret,{@r[0]-@r[3]}
183	eor	$lo,$lo,$hi
184	eor	$hi,$hi,@r[1]
185	eor	$lo,$lo,@r[0]
186	eor	$hi,$hi,@r[2]
187	eor	$lo,$lo,@r[3]
188	eor	$hi,$hi,@r[3]
189	str	$hi,[$ret,#8]
190	eor	$lo,$lo,$hi
191	add	sp,sp,#32		@ destroy tab[8]
192	str	$lo,[$ret,#4]
193
194#if __ARM_ARCH__>=5
195	ldmia	sp!,{r4-r10,pc}
196#else
197	ldmia	sp!,{r4-r10,lr}
198	tst	lr,#1
199	moveq	pc,lr			@ be binary compatible with V4, yet
200	bx	lr			@ interoperable with Thumb ISA:-)
201#endif
202___
203}
204{
205my ($r,$t0,$t1,$t2,$t3)=map("q$_",(0..3,8..12));
206my ($a,$b,$k48,$k32,$k16)=map("d$_",(26..31));
207
208$code.=<<___;
209#if __ARM_MAX_ARCH__>=7
210.arch	armv7-a
211.fpu	neon
212
213.align	5
214.LNEON:
215	ldr		r12, [sp]		@ 5th argument
216	vmov		$a, r2, r1
217	vmov		$b, r12, r3
218	vmov.i64	$k48, #0x0000ffffffffffff
219	vmov.i64	$k32, #0x00000000ffffffff
220	vmov.i64	$k16, #0x000000000000ffff
221
222	vext.8		$t0#lo, $a, $a, #1	@ A1
223	vmull.p8	$t0, $t0#lo, $b		@ F = A1*B
224	vext.8		$r#lo, $b, $b, #1	@ B1
225	vmull.p8	$r, $a, $r#lo		@ E = A*B1
226	vext.8		$t1#lo, $a, $a, #2	@ A2
227	vmull.p8	$t1, $t1#lo, $b		@ H = A2*B
228	vext.8		$t3#lo, $b, $b, #2	@ B2
229	vmull.p8	$t3, $a, $t3#lo		@ G = A*B2
230	vext.8		$t2#lo, $a, $a, #3	@ A3
231	veor		$t0, $t0, $r		@ L = E + F
232	vmull.p8	$t2, $t2#lo, $b		@ J = A3*B
233	vext.8		$r#lo, $b, $b, #3	@ B3
234	veor		$t1, $t1, $t3		@ M = G + H
235	vmull.p8	$r, $a, $r#lo		@ I = A*B3
236	veor		$t0#lo, $t0#lo, $t0#hi	@ t0 = (L) (P0 + P1) << 8
237	vand		$t0#hi, $t0#hi, $k48
238	vext.8		$t3#lo, $b, $b, #4	@ B4
239	veor		$t1#lo, $t1#lo, $t1#hi	@ t1 = (M) (P2 + P3) << 16
240	vand		$t1#hi, $t1#hi, $k32
241	vmull.p8	$t3, $a, $t3#lo		@ K = A*B4
242	veor		$t2, $t2, $r		@ N = I + J
243	veor		$t0#lo, $t0#lo, $t0#hi
244	veor		$t1#lo, $t1#lo, $t1#hi
245	veor		$t2#lo, $t2#lo, $t2#hi	@ t2 = (N) (P4 + P5) << 24
246	vand		$t2#hi, $t2#hi, $k16
247	vext.8		$t0, $t0, $t0, #15
248	veor		$t3#lo, $t3#lo, $t3#hi	@ t3 = (K) (P6 + P7) << 32
249	vmov.i64	$t3#hi, #0
250	vext.8		$t1, $t1, $t1, #14
251	veor		$t2#lo, $t2#lo, $t2#hi
252	vmull.p8	$r, $a, $b		@ D = A*B
253	vext.8		$t3, $t3, $t3, #12
254	vext.8		$t2, $t2, $t2, #13
255	veor		$t0, $t0, $t1
256	veor		$t2, $t2, $t3
257	veor		$r, $r, $t0
258	veor		$r, $r, $t2
259
260	vst1.32		{$r}, [r0]
261	ret		@ bx lr
262#endif
263___
264}
265$code.=<<___;
266.size	bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
267#if __ARM_MAX_ARCH__>=7
268.align	5
269.LOPENSSL_armcap:
270.word	OPENSSL_armcap_P-(.Lpic+8)
271#endif
272.asciz	"GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
273.align	5
274
275#if __ARM_MAX_ARCH__>=7
276.comm	OPENSSL_armcap_P,4,4
277#endif
278___
279
280foreach (split("\n",$code)) {
281	s/\`([^\`]*)\`/eval $1/geo;
282
283	s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo	or
284	s/\bret\b/bx	lr/go		or
285	s/\bbx\s+lr\b/.word\t0xe12fff1e/go;    # make it possible to compile with -march=armv4
286
287	print $_,"\n";
288}
289close STDOUT;   # enforce flush
290