ghashv8-armx.S revision 305153
1/* $FreeBSD: stable/11/secure/lib/libcrypto/arm/ghashv8-armx.S 305153 2016-08-31 20:33:59Z jkim $ */
2/* Do not modify. This file is auto-generated from ghashv8-armx.pl. */
3#include "arm_arch.h"
4
5.text
6.fpu	neon
7.code	32
8.global	gcm_init_v8
9.type	gcm_init_v8,%function
10.align	4
11gcm_init_v8:
12	vld1.64		{q9},[r1]		@ load input H
13	vmov.i8		q11,#0xe1
14	vshl.i64	q11,q11,#57		@ 0xc2.0
15	vext.8		q3,q9,q9,#8
16	vshr.u64	q10,q11,#63
17	vdup.32	q9,d18[1]
18	vext.8		q8,q10,q11,#8		@ t0=0xc2....01
19	vshr.u64	q10,q3,#63
20	vshr.s32	q9,q9,#31		@ broadcast carry bit
21	vand		q10,q10,q8
22	vshl.i64	q3,q3,#1
23	vext.8		q10,q10,q10,#8
24	vand		q8,q8,q9
25	vorr		q3,q3,q10		@ H<<<=1
26	veor		q12,q3,q8		@ twisted H
27	vst1.64		{q12},[r0]!		@ store Htable[0]
28
29	@ calculate H^2
30	vext.8		q8,q12,q12,#8		@ Karatsuba pre-processing
31	.byte	0xa8,0x0e,0xa8,0xf2	@ pmull q0,q12,q12
32	veor		q8,q8,q12
33	.byte	0xa9,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q12
34	.byte	0xa0,0x2e,0xa0,0xf2	@ pmull q1,q8,q8
35
36	vext.8		q9,q0,q2,#8		@ Karatsuba post-processing
37	veor		q10,q0,q2
38	veor		q1,q1,q9
39	veor		q1,q1,q10
40	.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase
41
42	vmov		d4,d3		@ Xh|Xm - 256-bit result
43	vmov		d3,d0		@ Xm is rotated Xl
44	veor		q0,q1,q10
45
46	vext.8		q10,q0,q0,#8		@ 2nd phase
47	.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
48	veor		q10,q10,q2
49	veor		q14,q0,q10
50
51	vext.8		q9,q14,q14,#8		@ Karatsuba pre-processing
52	veor		q9,q9,q14
53	vext.8		q13,q8,q9,#8		@ pack Karatsuba pre-processed
54	vst1.64		{q13-q14},[r0]		@ store Htable[1..2]
55
56	bx	lr
57.size	gcm_init_v8,.-gcm_init_v8
58.global	gcm_gmult_v8
59.type	gcm_gmult_v8,%function
60.align	4
61gcm_gmult_v8:
62	vld1.64		{q9},[r0]		@ load Xi
63	vmov.i8		q11,#0xe1
64	vld1.64		{q12-q13},[r1]	@ load twisted H, ...
65	vshl.u64	q11,q11,#57
66#ifndef __ARMEB__
67	vrev64.8	q9,q9
68#endif
69	vext.8		q3,q9,q9,#8
70
71	.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo��Xi.lo
72	veor		q9,q9,q3		@ Karatsuba pre-processing
73	.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi��Xi.hi
74	.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)��(Xi.lo+Xi.hi)
75
76	vext.8		q9,q0,q2,#8		@ Karatsuba post-processing
77	veor		q10,q0,q2
78	veor		q1,q1,q9
79	veor		q1,q1,q10
80	.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
81
82	vmov		d4,d3		@ Xh|Xm - 256-bit result
83	vmov		d3,d0		@ Xm is rotated Xl
84	veor		q0,q1,q10
85
86	vext.8		q10,q0,q0,#8		@ 2nd phase of reduction
87	.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
88	veor		q10,q10,q2
89	veor		q0,q0,q10
90
91#ifndef __ARMEB__
92	vrev64.8	q0,q0
93#endif
94	vext.8		q0,q0,q0,#8
95	vst1.64		{q0},[r0]		@ write out Xi
96
97	bx	lr
98.size	gcm_gmult_v8,.-gcm_gmult_v8
99.global	gcm_ghash_v8
100.type	gcm_ghash_v8,%function
101.align	4
102gcm_ghash_v8:
103	vstmdb		sp!,{d8-d15}		@ 32-bit ABI says so
104	vld1.64		{q0},[r0]		@ load [rotated] Xi
105						@ "[rotated]" means that
106						@ loaded value would have
107						@ to be rotated in order to
108						@ make it appear as in
109						@ alorithm specification
110	subs		r3,r3,#32		@ see if r3 is 32 or larger
111	mov		r12,#16		@ r12 is used as post-
112						@ increment for input pointer;
113						@ as loop is modulo-scheduled
114						@ r12 is zeroed just in time
115						@ to preclude oversteping
116						@ inp[len], which means that
117						@ last block[s] are actually
118						@ loaded twice, but last
119						@ copy is not processed
120	vld1.64		{q12-q13},[r1]!	@ load twisted H, ..., H^2
121	vmov.i8		q11,#0xe1
122	vld1.64		{q14},[r1]
123	moveq	r12,#0			@ is it time to zero r12?
124	vext.8		q0,q0,q0,#8		@ rotate Xi
125	vld1.64		{q8},[r2]!	@ load [rotated] I[0]
126	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
127#ifndef __ARMEB__
128	vrev64.8	q8,q8
129	vrev64.8	q0,q0
130#endif
131	vext.8		q3,q8,q8,#8		@ rotate I[0]
132	blo		.Lodd_tail_v8		@ r3 was less than 32
133	vld1.64		{q9},[r2],r12	@ load [rotated] I[1]
134#ifndef __ARMEB__
135	vrev64.8	q9,q9
136#endif
137	vext.8		q7,q9,q9,#8
138	veor		q3,q3,q0		@ I[i]^=Xi
139	.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H��Ii+1
140	veor		q9,q9,q7		@ Karatsuba pre-processing
141	.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
142	b		.Loop_mod2x_v8
143
144.align	4
145.Loop_mod2x_v8:
146	vext.8		q10,q3,q3,#8
147	subs		r3,r3,#32		@ is there more data?
148	.byte	0x86,0x0e,0xac,0xf2	@ pmull q0,q14,q3		@ H^2.lo��Xi.lo
149	movlo	r12,#0			@ is it time to zero r12?
150
151	 .byte	0xa2,0xae,0xaa,0xf2	@ pmull q5,q13,q9
152	veor		q10,q10,q3		@ Karatsuba pre-processing
153	.byte	0x87,0x4e,0xad,0xf2	@ pmull2 q2,q14,q3		@ H^2.hi��Xi.hi
154	veor		q0,q0,q4		@ accumulate
155	.byte	0xa5,0x2e,0xab,0xf2	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)��(Xi.lo+Xi.hi)
156	 vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
157
158	veor		q2,q2,q6
159	 moveq	r12,#0			@ is it time to zero r12?
160	veor		q1,q1,q5
161
162	vext.8		q9,q0,q2,#8		@ Karatsuba post-processing
163	veor		q10,q0,q2
164	veor		q1,q1,q9
165	 vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
166#ifndef __ARMEB__
167	 vrev64.8	q8,q8
168#endif
169	veor		q1,q1,q10
170	.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
171
172#ifndef __ARMEB__
173	 vrev64.8	q9,q9
174#endif
175	vmov		d4,d3		@ Xh|Xm - 256-bit result
176	vmov		d3,d0		@ Xm is rotated Xl
177	 vext.8		q7,q9,q9,#8
178	 vext.8		q3,q8,q8,#8
179	veor		q0,q1,q10
180	 .byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H��Ii+1
181	veor		q3,q3,q2		@ accumulate q3 early
182
183	vext.8		q10,q0,q0,#8		@ 2nd phase of reduction
184	.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
185	veor		q3,q3,q10
186	 veor		q9,q9,q7		@ Karatsuba pre-processing
187	veor		q3,q3,q0
188	 .byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
189	bhs		.Loop_mod2x_v8		@ there was at least 32 more bytes
190
191	veor		q2,q2,q10
192	vext.8		q3,q8,q8,#8		@ re-construct q3
193	adds		r3,r3,#32		@ re-construct r3
194	veor		q0,q0,q2		@ re-construct q0
195	beq		.Ldone_v8		@ is r3 zero?
196.Lodd_tail_v8:
197	vext.8		q10,q0,q0,#8
198	veor		q3,q3,q0		@ inp^=Xi
199	veor		q9,q8,q10		@ q9 is rotated inp^Xi
200
201	.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo��Xi.lo
202	veor		q9,q9,q3		@ Karatsuba pre-processing
203	.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi��Xi.hi
204	.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)��(Xi.lo+Xi.hi)
205
206	vext.8		q9,q0,q2,#8		@ Karatsuba post-processing
207	veor		q10,q0,q2
208	veor		q1,q1,q9
209	veor		q1,q1,q10
210	.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
211
212	vmov		d4,d3		@ Xh|Xm - 256-bit result
213	vmov		d3,d0		@ Xm is rotated Xl
214	veor		q0,q1,q10
215
216	vext.8		q10,q0,q0,#8		@ 2nd phase of reduction
217	.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
218	veor		q10,q10,q2
219	veor		q0,q0,q10
220
221.Ldone_v8:
222#ifndef __ARMEB__
223	vrev64.8	q0,q0
224#endif
225	vext.8		q0,q0,q0,#8
226	vst1.64		{q0},[r0]		@ write out Xi
227
228	vldmia		sp!,{d8-d15}		@ 32-bit ABI says so
229	bx	lr
230.size	gcm_ghash_v8,.-gcm_ghash_v8
231.asciz  "GHASH for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
232.align  2
233