1#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# November 2014
18#
19# ChaCha20 for x86_64.
20#
21# December 2016
22#
23# Add AVX512F code path.
24#
25# Performance in cycles per byte out of large buffer.
26#
27#		IALU/gcc 4.8(i)	1xSSSE3/SSE2	4xSSSE3	    NxAVX(v)
28#
29# P4		9.48/+99%	-/22.7(ii)	-
30# Core2		7.83/+55%	7.90/8.08	4.35
31# Westmere	7.19/+50%	5.60/6.70	3.00
32# Sandy Bridge	8.31/+42%	5.45/6.76	2.72
33# Ivy Bridge	6.71/+46%	5.40/6.49	2.41
34# Haswell	5.92/+43%	5.20/6.45	2.42	    1.23
35# Skylake[-X]	5.87/+39%	4.70/-		2.31	    1.19[0.57]
36# Silvermont	12.0/+33%	7.75/7.40	7.03(iii)
37# Knights L	11.7/-		-		9.60(iii)   0.80
38# Goldmont	10.6/+17%	5.10/-		3.28
39# Sledgehammer	7.28/+52%	-/14.2(ii)	-
40# Bulldozer	9.66/+28%	9.85/11.1	3.06(iv)
41# Ryzen		5.96/+50%	5.19/-		2.40        2.09
42# VIA Nano	10.5/+46%	6.72/8.60	6.05
43#
44# (i)	compared to older gcc 3.x one can observe >2x improvement on
45#	most platforms;
46# (ii)	as it can be seen, SSE2 performance is too low on legacy
47#	processors; NxSSE2 results are naturally better, but not
48#	impressively better than IALU ones, which is why you won't
49#	find SSE2 code below;
50# (iii)	this is not optimal result for Atom because of MSROM
51#	limitations, SSE2 can do better, but gain is considered too
52#	low to justify the [maintenance] effort;
53# (iv)	Bulldozer actually executes 4xXOP code path that delivers 2.20;
54#
55# Modified from upstream OpenSSL to remove the XOP code.
56
57$flavour = shift;
58$output  = shift;
59if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
60
61$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
62
63$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
65( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
66die "can't locate x86_64-xlate.pl";
67
68$avx = 2;
69
70open OUT,"| \"$^X\" $xlate $flavour $output";
71*STDOUT=*OUT;
72
73# input parameter block
74($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
75
76$code.=<<___;
77.text
78
79.extern OPENSSL_ia32cap_P
80
81.align	64
82.Lzero:
83.long	0,0,0,0
84.Lone:
85.long	1,0,0,0
86.Linc:
87.long	0,1,2,3
88.Lfour:
89.long	4,4,4,4
90.Lincy:
91.long	0,2,4,6,1,3,5,7
92.Leight:
93.long	8,8,8,8,8,8,8,8
94.Lrot16:
95.byte	0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
96.Lrot24:
97.byte	0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
98.Lsigma:
99.asciz	"expand 32-byte k"
100.align	64
101.Lzeroz:
102.long	0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
103.Lfourz:
104.long	4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
105.Lincz:
106.long	0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
107.Lsixteen:
108.long	16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
109.asciz	"ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
110___
111
112sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
113{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
114  my $arg = pop;
115    $arg = "\$$arg" if ($arg*1 eq $arg);
116    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
117}
118
119@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
120    "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
121@t=("%esi","%edi");
122
123sub ROUND {			# critical path is 24 cycles per round
124my ($a0,$b0,$c0,$d0)=@_;
125my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
126my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
127my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
128my ($xc,$xc_)=map("\"$_\"",@t);
129my @x=map("\"$_\"",@x);
130
131	# Consider order in which variables are addressed by their
132	# index:
133	#
134	#	a   b   c   d
135	#
136	#	0   4   8  12 < even round
137	#	1   5   9  13
138	#	2   6  10  14
139	#	3   7  11  15
140	#	0   5  10  15 < odd round
141	#	1   6  11  12
142	#	2   7   8  13
143	#	3   4   9  14
144	#
145	# 'a', 'b' and 'd's are permanently allocated in registers,
146	# @x[0..7,12..15], while 'c's are maintained in memory. If
147	# you observe 'c' column, you'll notice that pair of 'c's is
148	# invariant between rounds. This means that we have to reload
149	# them once per round, in the middle. This is why you'll see
150	# bunch of 'c' stores and loads in the middle, but none in
151	# the beginning or end.
152
153	# Normally instructions would be interleaved to favour in-order
154	# execution. Generally out-of-order cores manage it gracefully,
155	# but not this time for some reason. As in-order execution
156	# cores are dying breed, old Atom is the only one around,
157	# instructions are left uninterleaved. Besides, Atom is better
158	# off executing 1xSSSE3 code anyway...
159
160	(
161	"&add	(@x[$a0],@x[$b0])",	# Q1
162	"&xor	(@x[$d0],@x[$a0])",
163	"&rol	(@x[$d0],16)",
164	 "&add	(@x[$a1],@x[$b1])",	# Q2
165	 "&xor	(@x[$d1],@x[$a1])",
166	 "&rol	(@x[$d1],16)",
167
168	"&add	($xc,@x[$d0])",
169	"&xor	(@x[$b0],$xc)",
170	"&rol	(@x[$b0],12)",
171	 "&add	($xc_,@x[$d1])",
172	 "&xor	(@x[$b1],$xc_)",
173	 "&rol	(@x[$b1],12)",
174
175	"&add	(@x[$a0],@x[$b0])",
176	"&xor	(@x[$d0],@x[$a0])",
177	"&rol	(@x[$d0],8)",
178	 "&add	(@x[$a1],@x[$b1])",
179	 "&xor	(@x[$d1],@x[$a1])",
180	 "&rol	(@x[$d1],8)",
181
182	"&add	($xc,@x[$d0])",
183	"&xor	(@x[$b0],$xc)",
184	"&rol	(@x[$b0],7)",
185	 "&add	($xc_,@x[$d1])",
186	 "&xor	(@x[$b1],$xc_)",
187	 "&rol	(@x[$b1],7)",
188
189	"&mov	(\"4*$c0(%rsp)\",$xc)",	# reload pair of 'c's
190	 "&mov	(\"4*$c1(%rsp)\",$xc_)",
191	"&mov	($xc,\"4*$c2(%rsp)\")",
192	 "&mov	($xc_,\"4*$c3(%rsp)\")",
193
194	"&add	(@x[$a2],@x[$b2])",	# Q3
195	"&xor	(@x[$d2],@x[$a2])",
196	"&rol	(@x[$d2],16)",
197	 "&add	(@x[$a3],@x[$b3])",	# Q4
198	 "&xor	(@x[$d3],@x[$a3])",
199	 "&rol	(@x[$d3],16)",
200
201	"&add	($xc,@x[$d2])",
202	"&xor	(@x[$b2],$xc)",
203	"&rol	(@x[$b2],12)",
204	 "&add	($xc_,@x[$d3])",
205	 "&xor	(@x[$b3],$xc_)",
206	 "&rol	(@x[$b3],12)",
207
208	"&add	(@x[$a2],@x[$b2])",
209	"&xor	(@x[$d2],@x[$a2])",
210	"&rol	(@x[$d2],8)",
211	 "&add	(@x[$a3],@x[$b3])",
212	 "&xor	(@x[$d3],@x[$a3])",
213	 "&rol	(@x[$d3],8)",
214
215	"&add	($xc,@x[$d2])",
216	"&xor	(@x[$b2],$xc)",
217	"&rol	(@x[$b2],7)",
218	 "&add	($xc_,@x[$d3])",
219	 "&xor	(@x[$b3],$xc_)",
220	 "&rol	(@x[$b3],7)"
221	);
222}
223
224########################################################################
225# Generic code path that handles all lengths on pre-SSSE3 processors.
226$code.=<<___;
227.globl	ChaCha20_ctr32
228.type	ChaCha20_ctr32,\@function,5
229.align	64
230ChaCha20_ctr32:
231	cmp	\$0,$len
232	je	.Lno_data
233	mov	OPENSSL_ia32cap_P+4(%rip),%r10
234___
235$code.=<<___	if ($avx>2);
236	bt	\$48,%r10		# check for AVX512F
237	jc	.LChaCha20_avx512
238___
239$code.=<<___;
240	test	\$`1<<(41-32)`,%r10d
241	jnz	.LChaCha20_ssse3
242
243	push	%rbx
244	push	%rbp
245	push	%r12
246	push	%r13
247	push	%r14
248	push	%r15
249	sub	\$64+24,%rsp
250.Lctr32_body:
251
252	#movdqa	.Lsigma(%rip),%xmm0
253	movdqu	($key),%xmm1
254	movdqu	16($key),%xmm2
255	movdqu	($counter),%xmm3
256	movdqa	.Lone(%rip),%xmm4
257
258	#movdqa	%xmm0,4*0(%rsp)		# key[0]
259	movdqa	%xmm1,4*4(%rsp)		# key[1]
260	movdqa	%xmm2,4*8(%rsp)		# key[2]
261	movdqa	%xmm3,4*12(%rsp)	# key[3]
262	mov	$len,%rbp		# reassign $len
263	jmp	.Loop_outer
264
265.align	32
266.Loop_outer:
267	mov	\$0x61707865,@x[0]      # 'expa'
268	mov	\$0x3320646e,@x[1]      # 'nd 3'
269	mov	\$0x79622d32,@x[2]      # '2-by'
270	mov	\$0x6b206574,@x[3]      # 'te k'
271	mov	4*4(%rsp),@x[4]
272	mov	4*5(%rsp),@x[5]
273	mov	4*6(%rsp),@x[6]
274	mov	4*7(%rsp),@x[7]
275	movd	%xmm3,@x[12]
276	mov	4*13(%rsp),@x[13]
277	mov	4*14(%rsp),@x[14]
278	mov	4*15(%rsp),@x[15]
279
280	mov	%rbp,64+0(%rsp)		# save len
281	mov	\$10,%ebp
282	mov	$inp,64+8(%rsp)		# save inp
283	movq	%xmm2,%rsi		# "@x[8]"
284	mov	$out,64+16(%rsp)	# save out
285	mov	%rsi,%rdi
286	shr	\$32,%rdi		# "@x[9]"
287	jmp	.Loop
288
289.align	32
290.Loop:
291___
292	foreach (&ROUND (0, 4, 8,12)) { eval; }
293	foreach (&ROUND	(0, 5,10,15)) { eval; }
294	&dec	("%ebp");
295	&jnz	(".Loop");
296
297$code.=<<___;
298	mov	@t[1],4*9(%rsp)		# modulo-scheduled
299	mov	@t[0],4*8(%rsp)
300	mov	64(%rsp),%rbp		# load len
301	movdqa	%xmm2,%xmm1
302	mov	64+8(%rsp),$inp		# load inp
303	paddd	%xmm4,%xmm3		# increment counter
304	mov	64+16(%rsp),$out	# load out
305
306	add	\$0x61707865,@x[0]      # 'expa'
307	add	\$0x3320646e,@x[1]      # 'nd 3'
308	add	\$0x79622d32,@x[2]      # '2-by'
309	add	\$0x6b206574,@x[3]      # 'te k'
310	add	4*4(%rsp),@x[4]
311	add	4*5(%rsp),@x[5]
312	add	4*6(%rsp),@x[6]
313	add	4*7(%rsp),@x[7]
314	add	4*12(%rsp),@x[12]
315	add	4*13(%rsp),@x[13]
316	add	4*14(%rsp),@x[14]
317	add	4*15(%rsp),@x[15]
318	paddd	4*8(%rsp),%xmm1
319
320	cmp	\$64,%rbp
321	jb	.Ltail
322
323	xor	4*0($inp),@x[0]		# xor with input
324	xor	4*1($inp),@x[1]
325	xor	4*2($inp),@x[2]
326	xor	4*3($inp),@x[3]
327	xor	4*4($inp),@x[4]
328	xor	4*5($inp),@x[5]
329	xor	4*6($inp),@x[6]
330	xor	4*7($inp),@x[7]
331	movdqu	4*8($inp),%xmm0
332	xor	4*12($inp),@x[12]
333	xor	4*13($inp),@x[13]
334	xor	4*14($inp),@x[14]
335	xor	4*15($inp),@x[15]
336	lea	4*16($inp),$inp		# inp+=64
337	pxor	%xmm1,%xmm0
338
339	movdqa	%xmm2,4*8(%rsp)
340	movd	%xmm3,4*12(%rsp)
341
342	mov	@x[0],4*0($out)		# write output
343	mov	@x[1],4*1($out)
344	mov	@x[2],4*2($out)
345	mov	@x[3],4*3($out)
346	mov	@x[4],4*4($out)
347	mov	@x[5],4*5($out)
348	mov	@x[6],4*6($out)
349	mov	@x[7],4*7($out)
350	movdqu	%xmm0,4*8($out)
351	mov	@x[12],4*12($out)
352	mov	@x[13],4*13($out)
353	mov	@x[14],4*14($out)
354	mov	@x[15],4*15($out)
355	lea	4*16($out),$out		# out+=64
356
357	sub	\$64,%rbp
358	jnz	.Loop_outer
359
360	jmp	.Ldone
361
362.align	16
363.Ltail:
364	mov	@x[0],4*0(%rsp)
365	mov	@x[1],4*1(%rsp)
366	xor	%rbx,%rbx
367	mov	@x[2],4*2(%rsp)
368	mov	@x[3],4*3(%rsp)
369	mov	@x[4],4*4(%rsp)
370	mov	@x[5],4*5(%rsp)
371	mov	@x[6],4*6(%rsp)
372	mov	@x[7],4*7(%rsp)
373	movdqa	%xmm1,4*8(%rsp)
374	mov	@x[12],4*12(%rsp)
375	mov	@x[13],4*13(%rsp)
376	mov	@x[14],4*14(%rsp)
377	mov	@x[15],4*15(%rsp)
378
379.Loop_tail:
380	movzb	($inp,%rbx),%eax
381	movzb	(%rsp,%rbx),%edx
382	lea	1(%rbx),%rbx
383	xor	%edx,%eax
384	mov	%al,-1($out,%rbx)
385	dec	%rbp
386	jnz	.Loop_tail
387
388.Ldone:
389	lea	64+24+48(%rsp),%rsi
390	mov	-48(%rsi),%r15
391	mov	-40(%rsi),%r14
392	mov	-32(%rsi),%r13
393	mov	-24(%rsi),%r12
394	mov	-16(%rsi),%rbp
395	mov	-8(%rsi),%rbx
396	lea	(%rsi),%rsp
397.Lno_data:
398	ret
399.size	ChaCha20_ctr32,.-ChaCha20_ctr32
400___
401
402########################################################################
403# SSSE3 code path that handles shorter lengths
404{
405my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
406
407sub SSSE3ROUND {	# critical path is 20 "SIMD ticks" per round
408	&paddd	($a,$b);
409	&pxor	($d,$a);
410	&pshufb	($d,$rot16);
411
412	&paddd	($c,$d);
413	&pxor	($b,$c);
414	&movdqa	($t,$b);
415	&psrld	($b,20);
416	&pslld	($t,12);
417	&por	($b,$t);
418
419	&paddd	($a,$b);
420	&pxor	($d,$a);
421	&pshufb	($d,$rot24);
422
423	&paddd	($c,$d);
424	&pxor	($b,$c);
425	&movdqa	($t,$b);
426	&psrld	($b,25);
427	&pslld	($t,7);
428	&por	($b,$t);
429}
430
431my $xframe = $win64 ? 32+8 : 8;
432
433$code.=<<___;
434.type	ChaCha20_ssse3,\@function,5
435.align	32
436ChaCha20_ssse3:
437.LChaCha20_ssse3:
438	mov	%rsp,%r9		# frame pointer
439___
440$code.=<<___;
441	cmp	\$128,$len		# we might throw away some data,
442	ja	.LChaCha20_4x		# but overall it won't be slower
443
444.Ldo_sse3_after_all:
445	sub	\$64+$xframe,%rsp
446___
447$code.=<<___	if ($win64);
448	movaps	%xmm6,-0x28(%r9)
449	movaps	%xmm7,-0x18(%r9)
450.Lssse3_body:
451___
452$code.=<<___;
453	movdqa	.Lsigma(%rip),$a
454	movdqu	($key),$b
455	movdqu	16($key),$c
456	movdqu	($counter),$d
457	movdqa	.Lrot16(%rip),$rot16
458	movdqa	.Lrot24(%rip),$rot24
459
460	movdqa	$a,0x00(%rsp)
461	movdqa	$b,0x10(%rsp)
462	movdqa	$c,0x20(%rsp)
463	movdqa	$d,0x30(%rsp)
464	mov	\$10,$counter		# reuse $counter
465	jmp	.Loop_ssse3
466
467.align	32
468.Loop_outer_ssse3:
469	movdqa	.Lone(%rip),$d
470	movdqa	0x00(%rsp),$a
471	movdqa	0x10(%rsp),$b
472	movdqa	0x20(%rsp),$c
473	paddd	0x30(%rsp),$d
474	mov	\$10,$counter
475	movdqa	$d,0x30(%rsp)
476	jmp	.Loop_ssse3
477
478.align	32
479.Loop_ssse3:
480___
481	&SSSE3ROUND();
482	&pshufd	($c,$c,0b01001110);
483	&pshufd	($b,$b,0b00111001);
484	&pshufd	($d,$d,0b10010011);
485	&nop	();
486
487	&SSSE3ROUND();
488	&pshufd	($c,$c,0b01001110);
489	&pshufd	($b,$b,0b10010011);
490	&pshufd	($d,$d,0b00111001);
491
492	&dec	($counter);
493	&jnz	(".Loop_ssse3");
494
495$code.=<<___;
496	paddd	0x00(%rsp),$a
497	paddd	0x10(%rsp),$b
498	paddd	0x20(%rsp),$c
499	paddd	0x30(%rsp),$d
500
501	cmp	\$64,$len
502	jb	.Ltail_ssse3
503
504	movdqu	0x00($inp),$t
505	movdqu	0x10($inp),$t1
506	pxor	$t,$a			# xor with input
507	movdqu	0x20($inp),$t
508	pxor	$t1,$b
509	movdqu	0x30($inp),$t1
510	lea	0x40($inp),$inp		# inp+=64
511	pxor	$t,$c
512	pxor	$t1,$d
513
514	movdqu	$a,0x00($out)		# write output
515	movdqu	$b,0x10($out)
516	movdqu	$c,0x20($out)
517	movdqu	$d,0x30($out)
518	lea	0x40($out),$out		# out+=64
519
520	sub	\$64,$len
521	jnz	.Loop_outer_ssse3
522
523	jmp	.Ldone_ssse3
524
525.align	16
526.Ltail_ssse3:
527	movdqa	$a,0x00(%rsp)
528	movdqa	$b,0x10(%rsp)
529	movdqa	$c,0x20(%rsp)
530	movdqa	$d,0x30(%rsp)
531	xor	$counter,$counter
532
533.Loop_tail_ssse3:
534	movzb	($inp,$counter),%eax
535	movzb	(%rsp,$counter),%ecx
536	lea	1($counter),$counter
537	xor	%ecx,%eax
538	mov	%al,-1($out,$counter)
539	dec	$len
540	jnz	.Loop_tail_ssse3
541
542.Ldone_ssse3:
543___
544$code.=<<___	if ($win64);
545	movaps	-0x28(%r9),%xmm6
546	movaps	-0x18(%r9),%xmm7
547___
548$code.=<<___;
549	lea	(%r9),%rsp
550.Lssse3_epilogue:
551	ret
552.size	ChaCha20_ssse3,.-ChaCha20_ssse3
553___
554}
555
556########################################################################
557# SSSE3 code path that handles longer messages.
558{
559# assign variables to favor Atom front-end
560my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
561    $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
562my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
563	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
564
565sub SSSE3_lane_ROUND {
566my ($a0,$b0,$c0,$d0)=@_;
567my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
568my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
569my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
570my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
571my @x=map("\"$_\"",@xx);
572
573	# Consider order in which variables are addressed by their
574	# index:
575	#
576	#	a   b   c   d
577	#
578	#	0   4   8  12 < even round
579	#	1   5   9  13
580	#	2   6  10  14
581	#	3   7  11  15
582	#	0   5  10  15 < odd round
583	#	1   6  11  12
584	#	2   7   8  13
585	#	3   4   9  14
586	#
587	# 'a', 'b' and 'd's are permanently allocated in registers,
588	# @x[0..7,12..15], while 'c's are maintained in memory. If
589	# you observe 'c' column, you'll notice that pair of 'c's is
590	# invariant between rounds. This means that we have to reload
591	# them once per round, in the middle. This is why you'll see
592	# bunch of 'c' stores and loads in the middle, but none in
593	# the beginning or end.
594
595	(
596	"&paddd		(@x[$a0],@x[$b0])",	# Q1
597	 "&paddd	(@x[$a1],@x[$b1])",	# Q2
598	"&pxor		(@x[$d0],@x[$a0])",
599	 "&pxor		(@x[$d1],@x[$a1])",
600	"&pshufb	(@x[$d0],$t1)",
601	 "&pshufb	(@x[$d1],$t1)",
602
603	"&paddd		($xc,@x[$d0])",
604	 "&paddd	($xc_,@x[$d1])",
605	"&pxor		(@x[$b0],$xc)",
606	 "&pxor		(@x[$b1],$xc_)",
607	"&movdqa	($t0,@x[$b0])",
608	"&pslld		(@x[$b0],12)",
609	"&psrld		($t0,20)",
610	 "&movdqa	($t1,@x[$b1])",
611	 "&pslld	(@x[$b1],12)",
612	"&por		(@x[$b0],$t0)",
613	 "&psrld	($t1,20)",
614	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
615	 "&por		(@x[$b1],$t1)",
616
617	"&paddd		(@x[$a0],@x[$b0])",
618	 "&paddd	(@x[$a1],@x[$b1])",
619	"&pxor		(@x[$d0],@x[$a0])",
620	 "&pxor		(@x[$d1],@x[$a1])",
621	"&pshufb	(@x[$d0],$t0)",
622	 "&pshufb	(@x[$d1],$t0)",
623
624	"&paddd		($xc,@x[$d0])",
625	 "&paddd	($xc_,@x[$d1])",
626	"&pxor		(@x[$b0],$xc)",
627	 "&pxor		(@x[$b1],$xc_)",
628	"&movdqa	($t1,@x[$b0])",
629	"&pslld		(@x[$b0],7)",
630	"&psrld		($t1,25)",
631	 "&movdqa	($t0,@x[$b1])",
632	 "&pslld	(@x[$b1],7)",
633	"&por		(@x[$b0],$t1)",
634	 "&psrld	($t0,25)",
635	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
636	 "&por		(@x[$b1],$t0)",
637
638	"&movdqa	(\"`16*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
639	 "&movdqa	(\"`16*($c1-8)`(%rsp)\",$xc_)",
640	"&movdqa	($xc,\"`16*($c2-8)`(%rsp)\")",
641	 "&movdqa	($xc_,\"`16*($c3-8)`(%rsp)\")",
642
643	"&paddd		(@x[$a2],@x[$b2])",	# Q3
644	 "&paddd	(@x[$a3],@x[$b3])",	# Q4
645	"&pxor		(@x[$d2],@x[$a2])",
646	 "&pxor		(@x[$d3],@x[$a3])",
647	"&pshufb	(@x[$d2],$t1)",
648	 "&pshufb	(@x[$d3],$t1)",
649
650	"&paddd		($xc,@x[$d2])",
651	 "&paddd	($xc_,@x[$d3])",
652	"&pxor		(@x[$b2],$xc)",
653	 "&pxor		(@x[$b3],$xc_)",
654	"&movdqa	($t0,@x[$b2])",
655	"&pslld		(@x[$b2],12)",
656	"&psrld		($t0,20)",
657	 "&movdqa	($t1,@x[$b3])",
658	 "&pslld	(@x[$b3],12)",
659	"&por		(@x[$b2],$t0)",
660	 "&psrld	($t1,20)",
661	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
662	 "&por		(@x[$b3],$t1)",
663
664	"&paddd		(@x[$a2],@x[$b2])",
665	 "&paddd	(@x[$a3],@x[$b3])",
666	"&pxor		(@x[$d2],@x[$a2])",
667	 "&pxor		(@x[$d3],@x[$a3])",
668	"&pshufb	(@x[$d2],$t0)",
669	 "&pshufb	(@x[$d3],$t0)",
670
671	"&paddd		($xc,@x[$d2])",
672	 "&paddd	($xc_,@x[$d3])",
673	"&pxor		(@x[$b2],$xc)",
674	 "&pxor		(@x[$b3],$xc_)",
675	"&movdqa	($t1,@x[$b2])",
676	"&pslld		(@x[$b2],7)",
677	"&psrld		($t1,25)",
678	 "&movdqa	($t0,@x[$b3])",
679	 "&pslld	(@x[$b3],7)",
680	"&por		(@x[$b2],$t1)",
681	 "&psrld	($t0,25)",
682	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
683	 "&por		(@x[$b3],$t0)"
684	);
685}
686
687my $xframe = $win64 ? 0xa8 : 8;
688
689$code.=<<___;
690.type	ChaCha20_4x,\@function,5
691.align	32
692ChaCha20_4x:
693.LChaCha20_4x:
694	mov		%rsp,%r9		# frame pointer
695	mov		%r10,%r11
696___
697$code.=<<___	if ($avx>1);
698	shr		\$32,%r10		# OPENSSL_ia32cap_P+8
699	test		\$`1<<5`,%r10		# test AVX2
700	jnz		.LChaCha20_8x
701___
702$code.=<<___;
703	cmp		\$192,$len
704	ja		.Lproceed4x
705
706	and		\$`1<<26|1<<22`,%r11	# isolate XSAVE+MOVBE
707	cmp		\$`1<<22`,%r11		# check for MOVBE without XSAVE
708	je		.Ldo_sse3_after_all	# to detect Atom
709
710.Lproceed4x:
711	sub		\$0x140+$xframe,%rsp
712___
713	################ stack layout
714	# +0x00		SIMD equivalent of @x[8-12]
715	# ...
716	# +0x40		constant copy of key[0-2] smashed by lanes
717	# ...
718	# +0x100	SIMD counters (with nonce smashed by lanes)
719	# ...
720	# +0x140
721$code.=<<___	if ($win64);
722	movaps		%xmm6,-0xa8(%r9)
723	movaps		%xmm7,-0x98(%r9)
724	movaps		%xmm8,-0x88(%r9)
725	movaps		%xmm9,-0x78(%r9)
726	movaps		%xmm10,-0x68(%r9)
727	movaps		%xmm11,-0x58(%r9)
728	movaps		%xmm12,-0x48(%r9)
729	movaps		%xmm13,-0x38(%r9)
730	movaps		%xmm14,-0x28(%r9)
731	movaps		%xmm15,-0x18(%r9)
732.L4x_body:
733___
734$code.=<<___;
735	movdqa		.Lsigma(%rip),$xa3	# key[0]
736	movdqu		($key),$xb3		# key[1]
737	movdqu		16($key),$xt3		# key[2]
738	movdqu		($counter),$xd3		# key[3]
739	lea		0x100(%rsp),%rcx	# size optimization
740	lea		.Lrot16(%rip),%r10
741	lea		.Lrot24(%rip),%r11
742
743	pshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
744	pshufd		\$0x55,$xa3,$xa1
745	movdqa		$xa0,0x40(%rsp)		# ... and offload
746	pshufd		\$0xaa,$xa3,$xa2
747	movdqa		$xa1,0x50(%rsp)
748	pshufd		\$0xff,$xa3,$xa3
749	movdqa		$xa2,0x60(%rsp)
750	movdqa		$xa3,0x70(%rsp)
751
752	pshufd		\$0x00,$xb3,$xb0
753	pshufd		\$0x55,$xb3,$xb1
754	movdqa		$xb0,0x80-0x100(%rcx)
755	pshufd		\$0xaa,$xb3,$xb2
756	movdqa		$xb1,0x90-0x100(%rcx)
757	pshufd		\$0xff,$xb3,$xb3
758	movdqa		$xb2,0xa0-0x100(%rcx)
759	movdqa		$xb3,0xb0-0x100(%rcx)
760
761	pshufd		\$0x00,$xt3,$xt0	# "$xc0"
762	pshufd		\$0x55,$xt3,$xt1	# "$xc1"
763	movdqa		$xt0,0xc0-0x100(%rcx)
764	pshufd		\$0xaa,$xt3,$xt2	# "$xc2"
765	movdqa		$xt1,0xd0-0x100(%rcx)
766	pshufd		\$0xff,$xt3,$xt3	# "$xc3"
767	movdqa		$xt2,0xe0-0x100(%rcx)
768	movdqa		$xt3,0xf0-0x100(%rcx)
769
770	pshufd		\$0x00,$xd3,$xd0
771	pshufd		\$0x55,$xd3,$xd1
772	paddd		.Linc(%rip),$xd0	# don't save counters yet
773	pshufd		\$0xaa,$xd3,$xd2
774	movdqa		$xd1,0x110-0x100(%rcx)
775	pshufd		\$0xff,$xd3,$xd3
776	movdqa		$xd2,0x120-0x100(%rcx)
777	movdqa		$xd3,0x130-0x100(%rcx)
778
779	jmp		.Loop_enter4x
780
781.align	32
782.Loop_outer4x:
783	movdqa		0x40(%rsp),$xa0		# re-load smashed key
784	movdqa		0x50(%rsp),$xa1
785	movdqa		0x60(%rsp),$xa2
786	movdqa		0x70(%rsp),$xa3
787	movdqa		0x80-0x100(%rcx),$xb0
788	movdqa		0x90-0x100(%rcx),$xb1
789	movdqa		0xa0-0x100(%rcx),$xb2
790	movdqa		0xb0-0x100(%rcx),$xb3
791	movdqa		0xc0-0x100(%rcx),$xt0	# "$xc0"
792	movdqa		0xd0-0x100(%rcx),$xt1	# "$xc1"
793	movdqa		0xe0-0x100(%rcx),$xt2	# "$xc2"
794	movdqa		0xf0-0x100(%rcx),$xt3	# "$xc3"
795	movdqa		0x100-0x100(%rcx),$xd0
796	movdqa		0x110-0x100(%rcx),$xd1
797	movdqa		0x120-0x100(%rcx),$xd2
798	movdqa		0x130-0x100(%rcx),$xd3
799	paddd		.Lfour(%rip),$xd0	# next SIMD counters
800
801.Loop_enter4x:
802	movdqa		$xt2,0x20(%rsp)		# SIMD equivalent of "@x[10]"
803	movdqa		$xt3,0x30(%rsp)		# SIMD equivalent of "@x[11]"
804	movdqa		(%r10),$xt3		# .Lrot16(%rip)
805	mov		\$10,%eax
806	movdqa		$xd0,0x100-0x100(%rcx)	# save SIMD counters
807	jmp		.Loop4x
808
809.align	32
810.Loop4x:
811___
812	foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
813	foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
814$code.=<<___;
815	dec		%eax
816	jnz		.Loop4x
817
818	paddd		0x40(%rsp),$xa0		# accumulate key material
819	paddd		0x50(%rsp),$xa1
820	paddd		0x60(%rsp),$xa2
821	paddd		0x70(%rsp),$xa3
822
823	movdqa		$xa0,$xt2		# "de-interlace" data
824	punpckldq	$xa1,$xa0
825	movdqa		$xa2,$xt3
826	punpckldq	$xa3,$xa2
827	punpckhdq	$xa1,$xt2
828	punpckhdq	$xa3,$xt3
829	movdqa		$xa0,$xa1
830	punpcklqdq	$xa2,$xa0		# "a0"
831	movdqa		$xt2,$xa3
832	punpcklqdq	$xt3,$xt2		# "a2"
833	punpckhqdq	$xa2,$xa1		# "a1"
834	punpckhqdq	$xt3,$xa3		# "a3"
835___
836	($xa2,$xt2)=($xt2,$xa2);
837$code.=<<___;
838	paddd		0x80-0x100(%rcx),$xb0
839	paddd		0x90-0x100(%rcx),$xb1
840	paddd		0xa0-0x100(%rcx),$xb2
841	paddd		0xb0-0x100(%rcx),$xb3
842
843	movdqa		$xa0,0x00(%rsp)		# offload $xaN
844	movdqa		$xa1,0x10(%rsp)
845	movdqa		0x20(%rsp),$xa0		# "xc2"
846	movdqa		0x30(%rsp),$xa1		# "xc3"
847
848	movdqa		$xb0,$xt2
849	punpckldq	$xb1,$xb0
850	movdqa		$xb2,$xt3
851	punpckldq	$xb3,$xb2
852	punpckhdq	$xb1,$xt2
853	punpckhdq	$xb3,$xt3
854	movdqa		$xb0,$xb1
855	punpcklqdq	$xb2,$xb0		# "b0"
856	movdqa		$xt2,$xb3
857	punpcklqdq	$xt3,$xt2		# "b2"
858	punpckhqdq	$xb2,$xb1		# "b1"
859	punpckhqdq	$xt3,$xb3		# "b3"
860___
861	($xb2,$xt2)=($xt2,$xb2);
862	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
863$code.=<<___;
864	paddd		0xc0-0x100(%rcx),$xc0
865	paddd		0xd0-0x100(%rcx),$xc1
866	paddd		0xe0-0x100(%rcx),$xc2
867	paddd		0xf0-0x100(%rcx),$xc3
868
869	movdqa		$xa2,0x20(%rsp)		# keep offloading $xaN
870	movdqa		$xa3,0x30(%rsp)
871
872	movdqa		$xc0,$xt2
873	punpckldq	$xc1,$xc0
874	movdqa		$xc2,$xt3
875	punpckldq	$xc3,$xc2
876	punpckhdq	$xc1,$xt2
877	punpckhdq	$xc3,$xt3
878	movdqa		$xc0,$xc1
879	punpcklqdq	$xc2,$xc0		# "c0"
880	movdqa		$xt2,$xc3
881	punpcklqdq	$xt3,$xt2		# "c2"
882	punpckhqdq	$xc2,$xc1		# "c1"
883	punpckhqdq	$xt3,$xc3		# "c3"
884___
885	($xc2,$xt2)=($xt2,$xc2);
886	($xt0,$xt1)=($xa2,$xa3);		# use $xaN as temporary
887$code.=<<___;
888	paddd		0x100-0x100(%rcx),$xd0
889	paddd		0x110-0x100(%rcx),$xd1
890	paddd		0x120-0x100(%rcx),$xd2
891	paddd		0x130-0x100(%rcx),$xd3
892
893	movdqa		$xd0,$xt2
894	punpckldq	$xd1,$xd0
895	movdqa		$xd2,$xt3
896	punpckldq	$xd3,$xd2
897	punpckhdq	$xd1,$xt2
898	punpckhdq	$xd3,$xt3
899	movdqa		$xd0,$xd1
900	punpcklqdq	$xd2,$xd0		# "d0"
901	movdqa		$xt2,$xd3
902	punpcklqdq	$xt3,$xt2		# "d2"
903	punpckhqdq	$xd2,$xd1		# "d1"
904	punpckhqdq	$xt3,$xd3		# "d3"
905___
906	($xd2,$xt2)=($xt2,$xd2);
907$code.=<<___;
908	cmp		\$64*4,$len
909	jb		.Ltail4x
910
911	movdqu		0x00($inp),$xt0		# xor with input
912	movdqu		0x10($inp),$xt1
913	movdqu		0x20($inp),$xt2
914	movdqu		0x30($inp),$xt3
915	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
916	pxor		$xb0,$xt1
917	pxor		$xc0,$xt2
918	pxor		$xd0,$xt3
919
920	 movdqu		$xt0,0x00($out)
921	movdqu		0x40($inp),$xt0
922	 movdqu		$xt1,0x10($out)
923	movdqu		0x50($inp),$xt1
924	 movdqu		$xt2,0x20($out)
925	movdqu		0x60($inp),$xt2
926	 movdqu		$xt3,0x30($out)
927	movdqu		0x70($inp),$xt3
928	lea		0x80($inp),$inp		# size optimization
929	pxor		0x10(%rsp),$xt0
930	pxor		$xb1,$xt1
931	pxor		$xc1,$xt2
932	pxor		$xd1,$xt3
933
934	 movdqu		$xt0,0x40($out)
935	movdqu		0x00($inp),$xt0
936	 movdqu		$xt1,0x50($out)
937	movdqu		0x10($inp),$xt1
938	 movdqu		$xt2,0x60($out)
939	movdqu		0x20($inp),$xt2
940	 movdqu		$xt3,0x70($out)
941	 lea		0x80($out),$out		# size optimization
942	movdqu		0x30($inp),$xt3
943	pxor		0x20(%rsp),$xt0
944	pxor		$xb2,$xt1
945	pxor		$xc2,$xt2
946	pxor		$xd2,$xt3
947
948	 movdqu		$xt0,0x00($out)
949	movdqu		0x40($inp),$xt0
950	 movdqu		$xt1,0x10($out)
951	movdqu		0x50($inp),$xt1
952	 movdqu		$xt2,0x20($out)
953	movdqu		0x60($inp),$xt2
954	 movdqu		$xt3,0x30($out)
955	movdqu		0x70($inp),$xt3
956	lea		0x80($inp),$inp		# inp+=64*4
957	pxor		0x30(%rsp),$xt0
958	pxor		$xb3,$xt1
959	pxor		$xc3,$xt2
960	pxor		$xd3,$xt3
961	movdqu		$xt0,0x40($out)
962	movdqu		$xt1,0x50($out)
963	movdqu		$xt2,0x60($out)
964	movdqu		$xt3,0x70($out)
965	lea		0x80($out),$out		# out+=64*4
966
967	sub		\$64*4,$len
968	jnz		.Loop_outer4x
969
970	jmp		.Ldone4x
971
972.Ltail4x:
973	cmp		\$192,$len
974	jae		.L192_or_more4x
975	cmp		\$128,$len
976	jae		.L128_or_more4x
977	cmp		\$64,$len
978	jae		.L64_or_more4x
979
980	#movdqa		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
981	xor		%r10,%r10
982	#movdqa		$xt0,0x00(%rsp)
983	movdqa		$xb0,0x10(%rsp)
984	movdqa		$xc0,0x20(%rsp)
985	movdqa		$xd0,0x30(%rsp)
986	jmp		.Loop_tail4x
987
988.align	32
989.L64_or_more4x:
990	movdqu		0x00($inp),$xt0		# xor with input
991	movdqu		0x10($inp),$xt1
992	movdqu		0x20($inp),$xt2
993	movdqu		0x30($inp),$xt3
994	pxor		0x00(%rsp),$xt0		# $xaxN is offloaded, remember?
995	pxor		$xb0,$xt1
996	pxor		$xc0,$xt2
997	pxor		$xd0,$xt3
998	movdqu		$xt0,0x00($out)
999	movdqu		$xt1,0x10($out)
1000	movdqu		$xt2,0x20($out)
1001	movdqu		$xt3,0x30($out)
1002	je		.Ldone4x
1003
1004	movdqa		0x10(%rsp),$xt0		# $xaN is offloaded, remember?
1005	lea		0x40($inp),$inp		# inp+=64*1
1006	xor		%r10,%r10
1007	movdqa		$xt0,0x00(%rsp)
1008	movdqa		$xb1,0x10(%rsp)
1009	lea		0x40($out),$out		# out+=64*1
1010	movdqa		$xc1,0x20(%rsp)
1011	sub		\$64,$len		# len-=64*1
1012	movdqa		$xd1,0x30(%rsp)
1013	jmp		.Loop_tail4x
1014
1015.align	32
1016.L128_or_more4x:
1017	movdqu		0x00($inp),$xt0		# xor with input
1018	movdqu		0x10($inp),$xt1
1019	movdqu		0x20($inp),$xt2
1020	movdqu		0x30($inp),$xt3
1021	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
1022	pxor		$xb0,$xt1
1023	pxor		$xc0,$xt2
1024	pxor		$xd0,$xt3
1025
1026	 movdqu		$xt0,0x00($out)
1027	movdqu		0x40($inp),$xt0
1028	 movdqu		$xt1,0x10($out)
1029	movdqu		0x50($inp),$xt1
1030	 movdqu		$xt2,0x20($out)
1031	movdqu		0x60($inp),$xt2
1032	 movdqu		$xt3,0x30($out)
1033	movdqu		0x70($inp),$xt3
1034	pxor		0x10(%rsp),$xt0
1035	pxor		$xb1,$xt1
1036	pxor		$xc1,$xt2
1037	pxor		$xd1,$xt3
1038	movdqu		$xt0,0x40($out)
1039	movdqu		$xt1,0x50($out)
1040	movdqu		$xt2,0x60($out)
1041	movdqu		$xt3,0x70($out)
1042	je		.Ldone4x
1043
1044	movdqa		0x20(%rsp),$xt0		# $xaN is offloaded, remember?
1045	lea		0x80($inp),$inp		# inp+=64*2
1046	xor		%r10,%r10
1047	movdqa		$xt0,0x00(%rsp)
1048	movdqa		$xb2,0x10(%rsp)
1049	lea		0x80($out),$out		# out+=64*2
1050	movdqa		$xc2,0x20(%rsp)
1051	sub		\$128,$len		# len-=64*2
1052	movdqa		$xd2,0x30(%rsp)
1053	jmp		.Loop_tail4x
1054
1055.align	32
1056.L192_or_more4x:
1057	movdqu		0x00($inp),$xt0		# xor with input
1058	movdqu		0x10($inp),$xt1
1059	movdqu		0x20($inp),$xt2
1060	movdqu		0x30($inp),$xt3
1061	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
1062	pxor		$xb0,$xt1
1063	pxor		$xc0,$xt2
1064	pxor		$xd0,$xt3
1065
1066	 movdqu		$xt0,0x00($out)
1067	movdqu		0x40($inp),$xt0
1068	 movdqu		$xt1,0x10($out)
1069	movdqu		0x50($inp),$xt1
1070	 movdqu		$xt2,0x20($out)
1071	movdqu		0x60($inp),$xt2
1072	 movdqu		$xt3,0x30($out)
1073	movdqu		0x70($inp),$xt3
1074	lea		0x80($inp),$inp		# size optimization
1075	pxor		0x10(%rsp),$xt0
1076	pxor		$xb1,$xt1
1077	pxor		$xc1,$xt2
1078	pxor		$xd1,$xt3
1079
1080	 movdqu		$xt0,0x40($out)
1081	movdqu		0x00($inp),$xt0
1082	 movdqu		$xt1,0x50($out)
1083	movdqu		0x10($inp),$xt1
1084	 movdqu		$xt2,0x60($out)
1085	movdqu		0x20($inp),$xt2
1086	 movdqu		$xt3,0x70($out)
1087	 lea		0x80($out),$out		# size optimization
1088	movdqu		0x30($inp),$xt3
1089	pxor		0x20(%rsp),$xt0
1090	pxor		$xb2,$xt1
1091	pxor		$xc2,$xt2
1092	pxor		$xd2,$xt3
1093	movdqu		$xt0,0x00($out)
1094	movdqu		$xt1,0x10($out)
1095	movdqu		$xt2,0x20($out)
1096	movdqu		$xt3,0x30($out)
1097	je		.Ldone4x
1098
1099	movdqa		0x30(%rsp),$xt0		# $xaN is offloaded, remember?
1100	lea		0x40($inp),$inp		# inp+=64*3
1101	xor		%r10,%r10
1102	movdqa		$xt0,0x00(%rsp)
1103	movdqa		$xb3,0x10(%rsp)
1104	lea		0x40($out),$out		# out+=64*3
1105	movdqa		$xc3,0x20(%rsp)
1106	sub		\$192,$len		# len-=64*3
1107	movdqa		$xd3,0x30(%rsp)
1108
1109.Loop_tail4x:
1110	movzb		($inp,%r10),%eax
1111	movzb		(%rsp,%r10),%ecx
1112	lea		1(%r10),%r10
1113	xor		%ecx,%eax
1114	mov		%al,-1($out,%r10)
1115	dec		$len
1116	jnz		.Loop_tail4x
1117
1118.Ldone4x:
1119___
1120$code.=<<___	if ($win64);
1121	movaps		-0xa8(%r9),%xmm6
1122	movaps		-0x98(%r9),%xmm7
1123	movaps		-0x88(%r9),%xmm8
1124	movaps		-0x78(%r9),%xmm9
1125	movaps		-0x68(%r9),%xmm10
1126	movaps		-0x58(%r9),%xmm11
1127	movaps		-0x48(%r9),%xmm12
1128	movaps		-0x38(%r9),%xmm13
1129	movaps		-0x28(%r9),%xmm14
1130	movaps		-0x18(%r9),%xmm15
1131___
1132$code.=<<___;
1133	lea		(%r9),%rsp
1134.L4x_epilogue:
1135	ret
1136.size	ChaCha20_4x,.-ChaCha20_4x
1137___
1138}
1139
1140########################################################################
1141# AVX2 code path
1142if ($avx>1) {
1143my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1144    $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1145my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1146	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1147
1148sub AVX2_lane_ROUND {
1149my ($a0,$b0,$c0,$d0)=@_;
1150my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1151my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1152my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1153my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1154my @x=map("\"$_\"",@xx);
1155
1156	# Consider order in which variables are addressed by their
1157	# index:
1158	#
1159	#	a   b   c   d
1160	#
1161	#	0   4   8  12 < even round
1162	#	1   5   9  13
1163	#	2   6  10  14
1164	#	3   7  11  15
1165	#	0   5  10  15 < odd round
1166	#	1   6  11  12
1167	#	2   7   8  13
1168	#	3   4   9  14
1169	#
1170	# 'a', 'b' and 'd's are permanently allocated in registers,
1171	# @x[0..7,12..15], while 'c's are maintained in memory. If
1172	# you observe 'c' column, you'll notice that pair of 'c's is
1173	# invariant between rounds. This means that we have to reload
1174	# them once per round, in the middle. This is why you'll see
1175	# bunch of 'c' stores and loads in the middle, but none in
1176	# the beginning or end.
1177
1178	(
1179	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
1180	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
1181	"&vpshufb	(@x[$d0],@x[$d0],$t1)",
1182	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
1183	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
1184	 "&vpshufb	(@x[$d1],@x[$d1],$t1)",
1185
1186	"&vpaddd	($xc,$xc,@x[$d0])",
1187	"&vpxor		(@x[$b0],$xc,@x[$b0])",
1188	"&vpslld	($t0,@x[$b0],12)",
1189	"&vpsrld	(@x[$b0],@x[$b0],20)",
1190	"&vpor		(@x[$b0],$t0,@x[$b0])",
1191	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
1192	 "&vpaddd	($xc_,$xc_,@x[$d1])",
1193	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
1194	 "&vpslld	($t1,@x[$b1],12)",
1195	 "&vpsrld	(@x[$b1],@x[$b1],20)",
1196	 "&vpor		(@x[$b1],$t1,@x[$b1])",
1197
1198	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",
1199	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
1200	"&vpshufb	(@x[$d0],@x[$d0],$t0)",
1201	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",
1202	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
1203	 "&vpshufb	(@x[$d1],@x[$d1],$t0)",
1204
1205	"&vpaddd	($xc,$xc,@x[$d0])",
1206	"&vpxor		(@x[$b0],$xc,@x[$b0])",
1207	"&vpslld	($t1,@x[$b0],7)",
1208	"&vpsrld	(@x[$b0],@x[$b0],25)",
1209	"&vpor		(@x[$b0],$t1,@x[$b0])",
1210	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
1211	 "&vpaddd	($xc_,$xc_,@x[$d1])",
1212	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
1213	 "&vpslld	($t0,@x[$b1],7)",
1214	 "&vpsrld	(@x[$b1],@x[$b1],25)",
1215	 "&vpor		(@x[$b1],$t0,@x[$b1])",
1216
1217	"&vmovdqa	(\"`32*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
1218	 "&vmovdqa	(\"`32*($c1-8)`(%rsp)\",$xc_)",
1219	"&vmovdqa	($xc,\"`32*($c2-8)`(%rsp)\")",
1220	 "&vmovdqa	($xc_,\"`32*($c3-8)`(%rsp)\")",
1221
1222	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
1223	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
1224	"&vpshufb	(@x[$d2],@x[$d2],$t1)",
1225	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
1226	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
1227	 "&vpshufb	(@x[$d3],@x[$d3],$t1)",
1228
1229	"&vpaddd	($xc,$xc,@x[$d2])",
1230	"&vpxor		(@x[$b2],$xc,@x[$b2])",
1231	"&vpslld	($t0,@x[$b2],12)",
1232	"&vpsrld	(@x[$b2],@x[$b2],20)",
1233	"&vpor		(@x[$b2],$t0,@x[$b2])",
1234	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
1235	 "&vpaddd	($xc_,$xc_,@x[$d3])",
1236	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
1237	 "&vpslld	($t1,@x[$b3],12)",
1238	 "&vpsrld	(@x[$b3],@x[$b3],20)",
1239	 "&vpor		(@x[$b3],$t1,@x[$b3])",
1240
1241	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
1242	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
1243	"&vpshufb	(@x[$d2],@x[$d2],$t0)",
1244	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
1245	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
1246	 "&vpshufb	(@x[$d3],@x[$d3],$t0)",
1247
1248	"&vpaddd	($xc,$xc,@x[$d2])",
1249	"&vpxor		(@x[$b2],$xc,@x[$b2])",
1250	"&vpslld	($t1,@x[$b2],7)",
1251	"&vpsrld	(@x[$b2],@x[$b2],25)",
1252	"&vpor		(@x[$b2],$t1,@x[$b2])",
1253	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
1254	 "&vpaddd	($xc_,$xc_,@x[$d3])",
1255	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
1256	 "&vpslld	($t0,@x[$b3],7)",
1257	 "&vpsrld	(@x[$b3],@x[$b3],25)",
1258	 "&vpor		(@x[$b3],$t0,@x[$b3])"
1259	);
1260}
1261
1262my $xframe = $win64 ? 0xa8 : 8;
1263
1264$code.=<<___;
1265.type	ChaCha20_8x,\@function,5
1266.align	32
1267ChaCha20_8x:
1268.LChaCha20_8x:
1269	mov		%rsp,%r9		# frame register
1270	sub		\$0x280+$xframe,%rsp
1271	and		\$-32,%rsp
1272___
1273$code.=<<___	if ($win64);
1274	movaps		%xmm6,-0xa8(%r9)
1275	movaps		%xmm7,-0x98(%r9)
1276	movaps		%xmm8,-0x88(%r9)
1277	movaps		%xmm9,-0x78(%r9)
1278	movaps		%xmm10,-0x68(%r9)
1279	movaps		%xmm11,-0x58(%r9)
1280	movaps		%xmm12,-0x48(%r9)
1281	movaps		%xmm13,-0x38(%r9)
1282	movaps		%xmm14,-0x28(%r9)
1283	movaps		%xmm15,-0x18(%r9)
1284.L8x_body:
1285___
1286$code.=<<___;
1287	vzeroupper
1288
1289	################ stack layout
1290	# +0x00		SIMD equivalent of @x[8-12]
1291	# ...
1292	# +0x80		constant copy of key[0-2] smashed by lanes
1293	# ...
1294	# +0x200	SIMD counters (with nonce smashed by lanes)
1295	# ...
1296	# +0x280
1297
1298	vbroadcasti128	.Lsigma(%rip),$xa3	# key[0]
1299	vbroadcasti128	($key),$xb3		# key[1]
1300	vbroadcasti128	16($key),$xt3		# key[2]
1301	vbroadcasti128	($counter),$xd3		# key[3]
1302	lea		0x100(%rsp),%rcx	# size optimization
1303	lea		0x200(%rsp),%rax	# size optimization
1304	lea		.Lrot16(%rip),%r10
1305	lea		.Lrot24(%rip),%r11
1306
1307	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
1308	vpshufd		\$0x55,$xa3,$xa1
1309	vmovdqa		$xa0,0x80-0x100(%rcx)	# ... and offload
1310	vpshufd		\$0xaa,$xa3,$xa2
1311	vmovdqa		$xa1,0xa0-0x100(%rcx)
1312	vpshufd		\$0xff,$xa3,$xa3
1313	vmovdqa		$xa2,0xc0-0x100(%rcx)
1314	vmovdqa		$xa3,0xe0-0x100(%rcx)
1315
1316	vpshufd		\$0x00,$xb3,$xb0
1317	vpshufd		\$0x55,$xb3,$xb1
1318	vmovdqa		$xb0,0x100-0x100(%rcx)
1319	vpshufd		\$0xaa,$xb3,$xb2
1320	vmovdqa		$xb1,0x120-0x100(%rcx)
1321	vpshufd		\$0xff,$xb3,$xb3
1322	vmovdqa		$xb2,0x140-0x100(%rcx)
1323	vmovdqa		$xb3,0x160-0x100(%rcx)
1324
1325	vpshufd		\$0x00,$xt3,$xt0	# "xc0"
1326	vpshufd		\$0x55,$xt3,$xt1	# "xc1"
1327	vmovdqa		$xt0,0x180-0x200(%rax)
1328	vpshufd		\$0xaa,$xt3,$xt2	# "xc2"
1329	vmovdqa		$xt1,0x1a0-0x200(%rax)
1330	vpshufd		\$0xff,$xt3,$xt3	# "xc3"
1331	vmovdqa		$xt2,0x1c0-0x200(%rax)
1332	vmovdqa		$xt3,0x1e0-0x200(%rax)
1333
1334	vpshufd		\$0x00,$xd3,$xd0
1335	vpshufd		\$0x55,$xd3,$xd1
1336	vpaddd		.Lincy(%rip),$xd0,$xd0	# don't save counters yet
1337	vpshufd		\$0xaa,$xd3,$xd2
1338	vmovdqa		$xd1,0x220-0x200(%rax)
1339	vpshufd		\$0xff,$xd3,$xd3
1340	vmovdqa		$xd2,0x240-0x200(%rax)
1341	vmovdqa		$xd3,0x260-0x200(%rax)
1342
1343	jmp		.Loop_enter8x
1344
1345.align	32
1346.Loop_outer8x:
1347	vmovdqa		0x80-0x100(%rcx),$xa0	# re-load smashed key
1348	vmovdqa		0xa0-0x100(%rcx),$xa1
1349	vmovdqa		0xc0-0x100(%rcx),$xa2
1350	vmovdqa		0xe0-0x100(%rcx),$xa3
1351	vmovdqa		0x100-0x100(%rcx),$xb0
1352	vmovdqa		0x120-0x100(%rcx),$xb1
1353	vmovdqa		0x140-0x100(%rcx),$xb2
1354	vmovdqa		0x160-0x100(%rcx),$xb3
1355	vmovdqa		0x180-0x200(%rax),$xt0	# "xc0"
1356	vmovdqa		0x1a0-0x200(%rax),$xt1	# "xc1"
1357	vmovdqa		0x1c0-0x200(%rax),$xt2	# "xc2"
1358	vmovdqa		0x1e0-0x200(%rax),$xt3	# "xc3"
1359	vmovdqa		0x200-0x200(%rax),$xd0
1360	vmovdqa		0x220-0x200(%rax),$xd1
1361	vmovdqa		0x240-0x200(%rax),$xd2
1362	vmovdqa		0x260-0x200(%rax),$xd3
1363	vpaddd		.Leight(%rip),$xd0,$xd0	# next SIMD counters
1364
1365.Loop_enter8x:
1366	vmovdqa		$xt2,0x40(%rsp)		# SIMD equivalent of "@x[10]"
1367	vmovdqa		$xt3,0x60(%rsp)		# SIMD equivalent of "@x[11]"
1368	vbroadcasti128	(%r10),$xt3
1369	vmovdqa		$xd0,0x200-0x200(%rax)	# save SIMD counters
1370	mov		\$10,%eax
1371	jmp		.Loop8x
1372
1373.align	32
1374.Loop8x:
1375___
1376	foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1377	foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1378$code.=<<___;
1379	dec		%eax
1380	jnz		.Loop8x
1381
1382	lea		0x200(%rsp),%rax	# size optimization
1383	vpaddd		0x80-0x100(%rcx),$xa0,$xa0	# accumulate key
1384	vpaddd		0xa0-0x100(%rcx),$xa1,$xa1
1385	vpaddd		0xc0-0x100(%rcx),$xa2,$xa2
1386	vpaddd		0xe0-0x100(%rcx),$xa3,$xa3
1387
1388	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
1389	vpunpckldq	$xa3,$xa2,$xt3
1390	vpunpckhdq	$xa1,$xa0,$xa0
1391	vpunpckhdq	$xa3,$xa2,$xa2
1392	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
1393	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
1394	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
1395	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
1396___
1397	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1398$code.=<<___;
1399	vpaddd		0x100-0x100(%rcx),$xb0,$xb0
1400	vpaddd		0x120-0x100(%rcx),$xb1,$xb1
1401	vpaddd		0x140-0x100(%rcx),$xb2,$xb2
1402	vpaddd		0x160-0x100(%rcx),$xb3,$xb3
1403
1404	vpunpckldq	$xb1,$xb0,$xt2
1405	vpunpckldq	$xb3,$xb2,$xt3
1406	vpunpckhdq	$xb1,$xb0,$xb0
1407	vpunpckhdq	$xb3,$xb2,$xb2
1408	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
1409	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
1410	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
1411	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
1412___
1413	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1414$code.=<<___;
1415	vperm2i128	\$0x20,$xb0,$xa0,$xt3	# "de-interlace" further
1416	vperm2i128	\$0x31,$xb0,$xa0,$xb0
1417	vperm2i128	\$0x20,$xb1,$xa1,$xa0
1418	vperm2i128	\$0x31,$xb1,$xa1,$xb1
1419	vperm2i128	\$0x20,$xb2,$xa2,$xa1
1420	vperm2i128	\$0x31,$xb2,$xa2,$xb2
1421	vperm2i128	\$0x20,$xb3,$xa3,$xa2
1422	vperm2i128	\$0x31,$xb3,$xa3,$xb3
1423___
1424	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1425	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1426$code.=<<___;
1427	vmovdqa		$xa0,0x00(%rsp)		# offload $xaN
1428	vmovdqa		$xa1,0x20(%rsp)
1429	vmovdqa		0x40(%rsp),$xc2		# $xa0
1430	vmovdqa		0x60(%rsp),$xc3		# $xa1
1431
1432	vpaddd		0x180-0x200(%rax),$xc0,$xc0
1433	vpaddd		0x1a0-0x200(%rax),$xc1,$xc1
1434	vpaddd		0x1c0-0x200(%rax),$xc2,$xc2
1435	vpaddd		0x1e0-0x200(%rax),$xc3,$xc3
1436
1437	vpunpckldq	$xc1,$xc0,$xt2
1438	vpunpckldq	$xc3,$xc2,$xt3
1439	vpunpckhdq	$xc1,$xc0,$xc0
1440	vpunpckhdq	$xc3,$xc2,$xc2
1441	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
1442	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
1443	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
1444	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
1445___
1446	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1447$code.=<<___;
1448	vpaddd		0x200-0x200(%rax),$xd0,$xd0
1449	vpaddd		0x220-0x200(%rax),$xd1,$xd1
1450	vpaddd		0x240-0x200(%rax),$xd2,$xd2
1451	vpaddd		0x260-0x200(%rax),$xd3,$xd3
1452
1453	vpunpckldq	$xd1,$xd0,$xt2
1454	vpunpckldq	$xd3,$xd2,$xt3
1455	vpunpckhdq	$xd1,$xd0,$xd0
1456	vpunpckhdq	$xd3,$xd2,$xd2
1457	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
1458	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
1459	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
1460	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
1461___
1462	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1463$code.=<<___;
1464	vperm2i128	\$0x20,$xd0,$xc0,$xt3	# "de-interlace" further
1465	vperm2i128	\$0x31,$xd0,$xc0,$xd0
1466	vperm2i128	\$0x20,$xd1,$xc1,$xc0
1467	vperm2i128	\$0x31,$xd1,$xc1,$xd1
1468	vperm2i128	\$0x20,$xd2,$xc2,$xc1
1469	vperm2i128	\$0x31,$xd2,$xc2,$xd2
1470	vperm2i128	\$0x20,$xd3,$xc3,$xc2
1471	vperm2i128	\$0x31,$xd3,$xc3,$xd3
1472___
1473	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1474	($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1475	($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1476	($xa0,$xa1)=($xt2,$xt3);
1477$code.=<<___;
1478	vmovdqa		0x00(%rsp),$xa0		# $xaN was offloaded, remember?
1479	vmovdqa		0x20(%rsp),$xa1
1480
1481	cmp		\$64*8,$len
1482	jb		.Ltail8x
1483
1484	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1485	vpxor		0x20($inp),$xb0,$xb0
1486	vpxor		0x40($inp),$xc0,$xc0
1487	vpxor		0x60($inp),$xd0,$xd0
1488	lea		0x80($inp),$inp		# size optimization
1489	vmovdqu		$xa0,0x00($out)
1490	vmovdqu		$xb0,0x20($out)
1491	vmovdqu		$xc0,0x40($out)
1492	vmovdqu		$xd0,0x60($out)
1493	lea		0x80($out),$out		# size optimization
1494
1495	vpxor		0x00($inp),$xa1,$xa1
1496	vpxor		0x20($inp),$xb1,$xb1
1497	vpxor		0x40($inp),$xc1,$xc1
1498	vpxor		0x60($inp),$xd1,$xd1
1499	lea		0x80($inp),$inp		# size optimization
1500	vmovdqu		$xa1,0x00($out)
1501	vmovdqu		$xb1,0x20($out)
1502	vmovdqu		$xc1,0x40($out)
1503	vmovdqu		$xd1,0x60($out)
1504	lea		0x80($out),$out		# size optimization
1505
1506	vpxor		0x00($inp),$xa2,$xa2
1507	vpxor		0x20($inp),$xb2,$xb2
1508	vpxor		0x40($inp),$xc2,$xc2
1509	vpxor		0x60($inp),$xd2,$xd2
1510	lea		0x80($inp),$inp		# size optimization
1511	vmovdqu		$xa2,0x00($out)
1512	vmovdqu		$xb2,0x20($out)
1513	vmovdqu		$xc2,0x40($out)
1514	vmovdqu		$xd2,0x60($out)
1515	lea		0x80($out),$out		# size optimization
1516
1517	vpxor		0x00($inp),$xa3,$xa3
1518	vpxor		0x20($inp),$xb3,$xb3
1519	vpxor		0x40($inp),$xc3,$xc3
1520	vpxor		0x60($inp),$xd3,$xd3
1521	lea		0x80($inp),$inp		# size optimization
1522	vmovdqu		$xa3,0x00($out)
1523	vmovdqu		$xb3,0x20($out)
1524	vmovdqu		$xc3,0x40($out)
1525	vmovdqu		$xd3,0x60($out)
1526	lea		0x80($out),$out		# size optimization
1527
1528	sub		\$64*8,$len
1529	jnz		.Loop_outer8x
1530
1531	jmp		.Ldone8x
1532
1533.Ltail8x:
1534	cmp		\$448,$len
1535	jae		.L448_or_more8x
1536	cmp		\$384,$len
1537	jae		.L384_or_more8x
1538	cmp		\$320,$len
1539	jae		.L320_or_more8x
1540	cmp		\$256,$len
1541	jae		.L256_or_more8x
1542	cmp		\$192,$len
1543	jae		.L192_or_more8x
1544	cmp		\$128,$len
1545	jae		.L128_or_more8x
1546	cmp		\$64,$len
1547	jae		.L64_or_more8x
1548
1549	xor		%r10,%r10
1550	vmovdqa		$xa0,0x00(%rsp)
1551	vmovdqa		$xb0,0x20(%rsp)
1552	jmp		.Loop_tail8x
1553
1554.align	32
1555.L64_or_more8x:
1556	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1557	vpxor		0x20($inp),$xb0,$xb0
1558	vmovdqu		$xa0,0x00($out)
1559	vmovdqu		$xb0,0x20($out)
1560	je		.Ldone8x
1561
1562	lea		0x40($inp),$inp		# inp+=64*1
1563	xor		%r10,%r10
1564	vmovdqa		$xc0,0x00(%rsp)
1565	lea		0x40($out),$out		# out+=64*1
1566	sub		\$64,$len		# len-=64*1
1567	vmovdqa		$xd0,0x20(%rsp)
1568	jmp		.Loop_tail8x
1569
1570.align	32
1571.L128_or_more8x:
1572	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1573	vpxor		0x20($inp),$xb0,$xb0
1574	vpxor		0x40($inp),$xc0,$xc0
1575	vpxor		0x60($inp),$xd0,$xd0
1576	vmovdqu		$xa0,0x00($out)
1577	vmovdqu		$xb0,0x20($out)
1578	vmovdqu		$xc0,0x40($out)
1579	vmovdqu		$xd0,0x60($out)
1580	je		.Ldone8x
1581
1582	lea		0x80($inp),$inp		# inp+=64*2
1583	xor		%r10,%r10
1584	vmovdqa		$xa1,0x00(%rsp)
1585	lea		0x80($out),$out		# out+=64*2
1586	sub		\$128,$len		# len-=64*2
1587	vmovdqa		$xb1,0x20(%rsp)
1588	jmp		.Loop_tail8x
1589
1590.align	32
1591.L192_or_more8x:
1592	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1593	vpxor		0x20($inp),$xb0,$xb0
1594	vpxor		0x40($inp),$xc0,$xc0
1595	vpxor		0x60($inp),$xd0,$xd0
1596	vpxor		0x80($inp),$xa1,$xa1
1597	vpxor		0xa0($inp),$xb1,$xb1
1598	vmovdqu		$xa0,0x00($out)
1599	vmovdqu		$xb0,0x20($out)
1600	vmovdqu		$xc0,0x40($out)
1601	vmovdqu		$xd0,0x60($out)
1602	vmovdqu		$xa1,0x80($out)
1603	vmovdqu		$xb1,0xa0($out)
1604	je		.Ldone8x
1605
1606	lea		0xc0($inp),$inp		# inp+=64*3
1607	xor		%r10,%r10
1608	vmovdqa		$xc1,0x00(%rsp)
1609	lea		0xc0($out),$out		# out+=64*3
1610	sub		\$192,$len		# len-=64*3
1611	vmovdqa		$xd1,0x20(%rsp)
1612	jmp		.Loop_tail8x
1613
1614.align	32
1615.L256_or_more8x:
1616	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1617	vpxor		0x20($inp),$xb0,$xb0
1618	vpxor		0x40($inp),$xc0,$xc0
1619	vpxor		0x60($inp),$xd0,$xd0
1620	vpxor		0x80($inp),$xa1,$xa1
1621	vpxor		0xa0($inp),$xb1,$xb1
1622	vpxor		0xc0($inp),$xc1,$xc1
1623	vpxor		0xe0($inp),$xd1,$xd1
1624	vmovdqu		$xa0,0x00($out)
1625	vmovdqu		$xb0,0x20($out)
1626	vmovdqu		$xc0,0x40($out)
1627	vmovdqu		$xd0,0x60($out)
1628	vmovdqu		$xa1,0x80($out)
1629	vmovdqu		$xb1,0xa0($out)
1630	vmovdqu		$xc1,0xc0($out)
1631	vmovdqu		$xd1,0xe0($out)
1632	je		.Ldone8x
1633
1634	lea		0x100($inp),$inp	# inp+=64*4
1635	xor		%r10,%r10
1636	vmovdqa		$xa2,0x00(%rsp)
1637	lea		0x100($out),$out	# out+=64*4
1638	sub		\$256,$len		# len-=64*4
1639	vmovdqa		$xb2,0x20(%rsp)
1640	jmp		.Loop_tail8x
1641
1642.align	32
1643.L320_or_more8x:
1644	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1645	vpxor		0x20($inp),$xb0,$xb0
1646	vpxor		0x40($inp),$xc0,$xc0
1647	vpxor		0x60($inp),$xd0,$xd0
1648	vpxor		0x80($inp),$xa1,$xa1
1649	vpxor		0xa0($inp),$xb1,$xb1
1650	vpxor		0xc0($inp),$xc1,$xc1
1651	vpxor		0xe0($inp),$xd1,$xd1
1652	vpxor		0x100($inp),$xa2,$xa2
1653	vpxor		0x120($inp),$xb2,$xb2
1654	vmovdqu		$xa0,0x00($out)
1655	vmovdqu		$xb0,0x20($out)
1656	vmovdqu		$xc0,0x40($out)
1657	vmovdqu		$xd0,0x60($out)
1658	vmovdqu		$xa1,0x80($out)
1659	vmovdqu		$xb1,0xa0($out)
1660	vmovdqu		$xc1,0xc0($out)
1661	vmovdqu		$xd1,0xe0($out)
1662	vmovdqu		$xa2,0x100($out)
1663	vmovdqu		$xb2,0x120($out)
1664	je		.Ldone8x
1665
1666	lea		0x140($inp),$inp	# inp+=64*5
1667	xor		%r10,%r10
1668	vmovdqa		$xc2,0x00(%rsp)
1669	lea		0x140($out),$out	# out+=64*5
1670	sub		\$320,$len		# len-=64*5
1671	vmovdqa		$xd2,0x20(%rsp)
1672	jmp		.Loop_tail8x
1673
1674.align	32
1675.L384_or_more8x:
1676	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1677	vpxor		0x20($inp),$xb0,$xb0
1678	vpxor		0x40($inp),$xc0,$xc0
1679	vpxor		0x60($inp),$xd0,$xd0
1680	vpxor		0x80($inp),$xa1,$xa1
1681	vpxor		0xa0($inp),$xb1,$xb1
1682	vpxor		0xc0($inp),$xc1,$xc1
1683	vpxor		0xe0($inp),$xd1,$xd1
1684	vpxor		0x100($inp),$xa2,$xa2
1685	vpxor		0x120($inp),$xb2,$xb2
1686	vpxor		0x140($inp),$xc2,$xc2
1687	vpxor		0x160($inp),$xd2,$xd2
1688	vmovdqu		$xa0,0x00($out)
1689	vmovdqu		$xb0,0x20($out)
1690	vmovdqu		$xc0,0x40($out)
1691	vmovdqu		$xd0,0x60($out)
1692	vmovdqu		$xa1,0x80($out)
1693	vmovdqu		$xb1,0xa0($out)
1694	vmovdqu		$xc1,0xc0($out)
1695	vmovdqu		$xd1,0xe0($out)
1696	vmovdqu		$xa2,0x100($out)
1697	vmovdqu		$xb2,0x120($out)
1698	vmovdqu		$xc2,0x140($out)
1699	vmovdqu		$xd2,0x160($out)
1700	je		.Ldone8x
1701
1702	lea		0x180($inp),$inp	# inp+=64*6
1703	xor		%r10,%r10
1704	vmovdqa		$xa3,0x00(%rsp)
1705	lea		0x180($out),$out	# out+=64*6
1706	sub		\$384,$len		# len-=64*6
1707	vmovdqa		$xb3,0x20(%rsp)
1708	jmp		.Loop_tail8x
1709
1710.align	32
1711.L448_or_more8x:
1712	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1713	vpxor		0x20($inp),$xb0,$xb0
1714	vpxor		0x40($inp),$xc0,$xc0
1715	vpxor		0x60($inp),$xd0,$xd0
1716	vpxor		0x80($inp),$xa1,$xa1
1717	vpxor		0xa0($inp),$xb1,$xb1
1718	vpxor		0xc0($inp),$xc1,$xc1
1719	vpxor		0xe0($inp),$xd1,$xd1
1720	vpxor		0x100($inp),$xa2,$xa2
1721	vpxor		0x120($inp),$xb2,$xb2
1722	vpxor		0x140($inp),$xc2,$xc2
1723	vpxor		0x160($inp),$xd2,$xd2
1724	vpxor		0x180($inp),$xa3,$xa3
1725	vpxor		0x1a0($inp),$xb3,$xb3
1726	vmovdqu		$xa0,0x00($out)
1727	vmovdqu		$xb0,0x20($out)
1728	vmovdqu		$xc0,0x40($out)
1729	vmovdqu		$xd0,0x60($out)
1730	vmovdqu		$xa1,0x80($out)
1731	vmovdqu		$xb1,0xa0($out)
1732	vmovdqu		$xc1,0xc0($out)
1733	vmovdqu		$xd1,0xe0($out)
1734	vmovdqu		$xa2,0x100($out)
1735	vmovdqu		$xb2,0x120($out)
1736	vmovdqu		$xc2,0x140($out)
1737	vmovdqu		$xd2,0x160($out)
1738	vmovdqu		$xa3,0x180($out)
1739	vmovdqu		$xb3,0x1a0($out)
1740	je		.Ldone8x
1741
1742	lea		0x1c0($inp),$inp	# inp+=64*7
1743	xor		%r10,%r10
1744	vmovdqa		$xc3,0x00(%rsp)
1745	lea		0x1c0($out),$out	# out+=64*7
1746	sub		\$448,$len		# len-=64*7
1747	vmovdqa		$xd3,0x20(%rsp)
1748
1749.Loop_tail8x:
1750	movzb		($inp,%r10),%eax
1751	movzb		(%rsp,%r10),%ecx
1752	lea		1(%r10),%r10
1753	xor		%ecx,%eax
1754	mov		%al,-1($out,%r10)
1755	dec		$len
1756	jnz		.Loop_tail8x
1757
1758.Ldone8x:
1759	vzeroall
1760___
1761$code.=<<___	if ($win64);
1762	movaps		-0xa8(%r9),%xmm6
1763	movaps		-0x98(%r9),%xmm7
1764	movaps		-0x88(%r9),%xmm8
1765	movaps		-0x78(%r9),%xmm9
1766	movaps		-0x68(%r9),%xmm10
1767	movaps		-0x58(%r9),%xmm11
1768	movaps		-0x48(%r9),%xmm12
1769	movaps		-0x38(%r9),%xmm13
1770	movaps		-0x28(%r9),%xmm14
1771	movaps		-0x18(%r9),%xmm15
1772___
1773$code.=<<___;
1774	lea		(%r9),%rsp
1775.L8x_epilogue:
1776	ret
1777.size	ChaCha20_8x,.-ChaCha20_8x
1778___
1779}
1780
1781########################################################################
1782# AVX512 code paths
1783if ($avx>2) {
1784# This one handles shorter inputs...
1785
1786my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
1787my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
1788
1789sub AVX512ROUND {	# critical path is 14 "SIMD ticks" per round
1790	&vpaddd	($a,$a,$b);
1791	&vpxord	($d,$d,$a);
1792	&vprold	($d,$d,16);
1793
1794	&vpaddd	($c,$c,$d);
1795	&vpxord	($b,$b,$c);
1796	&vprold	($b,$b,12);
1797
1798	&vpaddd	($a,$a,$b);
1799	&vpxord	($d,$d,$a);
1800	&vprold	($d,$d,8);
1801
1802	&vpaddd	($c,$c,$d);
1803	&vpxord	($b,$b,$c);
1804	&vprold	($b,$b,7);
1805}
1806
1807my $xframe = $win64 ? 32+8 : 8;
1808
1809$code.=<<___;
1810.type	ChaCha20_avx512,\@function,5
1811.align	32
1812ChaCha20_avx512:
1813.LChaCha20_avx512:
1814	mov	%rsp,%r9		# frame pointer
1815	cmp	\$512,$len
1816	ja	.LChaCha20_16x
1817
1818	sub	\$64+$xframe,%rsp
1819___
1820$code.=<<___	if ($win64);
1821	movaps	%xmm6,-0x28(%r9)
1822	movaps	%xmm7,-0x18(%r9)
1823.Lavx512_body:
1824___
1825$code.=<<___;
1826	vbroadcasti32x4	.Lsigma(%rip),$a
1827	vbroadcasti32x4	($key),$b
1828	vbroadcasti32x4	16($key),$c
1829	vbroadcasti32x4	($counter),$d
1830
1831	vmovdqa32	$a,$a_
1832	vmovdqa32	$b,$b_
1833	vmovdqa32	$c,$c_
1834	vpaddd		.Lzeroz(%rip),$d,$d
1835	vmovdqa32	.Lfourz(%rip),$fourz
1836	mov		\$10,$counter	# reuse $counter
1837	vmovdqa32	$d,$d_
1838	jmp		.Loop_avx512
1839
1840.align	16
1841.Loop_outer_avx512:
1842	vmovdqa32	$a_,$a
1843	vmovdqa32	$b_,$b
1844	vmovdqa32	$c_,$c
1845	vpaddd		$fourz,$d_,$d
1846	mov		\$10,$counter
1847	vmovdqa32	$d,$d_
1848	jmp		.Loop_avx512
1849
1850.align	32
1851.Loop_avx512:
1852___
1853	&AVX512ROUND();
1854	&vpshufd	($c,$c,0b01001110);
1855	&vpshufd	($b,$b,0b00111001);
1856	&vpshufd	($d,$d,0b10010011);
1857
1858	&AVX512ROUND();
1859	&vpshufd	($c,$c,0b01001110);
1860	&vpshufd	($b,$b,0b10010011);
1861	&vpshufd	($d,$d,0b00111001);
1862
1863	&dec		($counter);
1864	&jnz		(".Loop_avx512");
1865
1866$code.=<<___;
1867	vpaddd		$a_,$a,$a
1868	vpaddd		$b_,$b,$b
1869	vpaddd		$c_,$c,$c
1870	vpaddd		$d_,$d,$d
1871
1872	sub		\$64,$len
1873	jb		.Ltail64_avx512
1874
1875	vpxor		0x00($inp),%x#$a,$t0	# xor with input
1876	vpxor		0x10($inp),%x#$b,$t1
1877	vpxor		0x20($inp),%x#$c,$t2
1878	vpxor		0x30($inp),%x#$d,$t3
1879	lea		0x40($inp),$inp		# inp+=64
1880
1881	vmovdqu		$t0,0x00($out)		# write output
1882	vmovdqu		$t1,0x10($out)
1883	vmovdqu		$t2,0x20($out)
1884	vmovdqu		$t3,0x30($out)
1885	lea		0x40($out),$out		# out+=64
1886
1887	jz		.Ldone_avx512
1888
1889	vextracti32x4	\$1,$a,$t0
1890	vextracti32x4	\$1,$b,$t1
1891	vextracti32x4	\$1,$c,$t2
1892	vextracti32x4	\$1,$d,$t3
1893
1894	sub		\$64,$len
1895	jb		.Ltail_avx512
1896
1897	vpxor		0x00($inp),$t0,$t0	# xor with input
1898	vpxor		0x10($inp),$t1,$t1
1899	vpxor		0x20($inp),$t2,$t2
1900	vpxor		0x30($inp),$t3,$t3
1901	lea		0x40($inp),$inp		# inp+=64
1902
1903	vmovdqu		$t0,0x00($out)		# write output
1904	vmovdqu		$t1,0x10($out)
1905	vmovdqu		$t2,0x20($out)
1906	vmovdqu		$t3,0x30($out)
1907	lea		0x40($out),$out		# out+=64
1908
1909	jz		.Ldone_avx512
1910
1911	vextracti32x4	\$2,$a,$t0
1912	vextracti32x4	\$2,$b,$t1
1913	vextracti32x4	\$2,$c,$t2
1914	vextracti32x4	\$2,$d,$t3
1915
1916	sub		\$64,$len
1917	jb		.Ltail_avx512
1918
1919	vpxor		0x00($inp),$t0,$t0	# xor with input
1920	vpxor		0x10($inp),$t1,$t1
1921	vpxor		0x20($inp),$t2,$t2
1922	vpxor		0x30($inp),$t3,$t3
1923	lea		0x40($inp),$inp		# inp+=64
1924
1925	vmovdqu		$t0,0x00($out)		# write output
1926	vmovdqu		$t1,0x10($out)
1927	vmovdqu		$t2,0x20($out)
1928	vmovdqu		$t3,0x30($out)
1929	lea		0x40($out),$out		# out+=64
1930
1931	jz		.Ldone_avx512
1932
1933	vextracti32x4	\$3,$a,$t0
1934	vextracti32x4	\$3,$b,$t1
1935	vextracti32x4	\$3,$c,$t2
1936	vextracti32x4	\$3,$d,$t3
1937
1938	sub		\$64,$len
1939	jb		.Ltail_avx512
1940
1941	vpxor		0x00($inp),$t0,$t0	# xor with input
1942	vpxor		0x10($inp),$t1,$t1
1943	vpxor		0x20($inp),$t2,$t2
1944	vpxor		0x30($inp),$t3,$t3
1945	lea		0x40($inp),$inp		# inp+=64
1946
1947	vmovdqu		$t0,0x00($out)		# write output
1948	vmovdqu		$t1,0x10($out)
1949	vmovdqu		$t2,0x20($out)
1950	vmovdqu		$t3,0x30($out)
1951	lea		0x40($out),$out		# out+=64
1952
1953	jnz		.Loop_outer_avx512
1954
1955	jmp		.Ldone_avx512
1956
1957.align	16
1958.Ltail64_avx512:
1959	vmovdqa		%x#$a,0x00(%rsp)
1960	vmovdqa		%x#$b,0x10(%rsp)
1961	vmovdqa		%x#$c,0x20(%rsp)
1962	vmovdqa		%x#$d,0x30(%rsp)
1963	add		\$64,$len
1964	jmp		.Loop_tail_avx512
1965
1966.align	16
1967.Ltail_avx512:
1968	vmovdqa		$t0,0x00(%rsp)
1969	vmovdqa		$t1,0x10(%rsp)
1970	vmovdqa		$t2,0x20(%rsp)
1971	vmovdqa		$t3,0x30(%rsp)
1972	add		\$64,$len
1973
1974.Loop_tail_avx512:
1975	movzb		($inp,$counter),%eax
1976	movzb		(%rsp,$counter),%ecx
1977	lea		1($counter),$counter
1978	xor		%ecx,%eax
1979	mov		%al,-1($out,$counter)
1980	dec		$len
1981	jnz		.Loop_tail_avx512
1982
1983	vmovdqa32	$a_,0x00(%rsp)
1984
1985.Ldone_avx512:
1986	vzeroall
1987___
1988$code.=<<___	if ($win64);
1989	movaps	-0x28(%r9),%xmm6
1990	movaps	-0x18(%r9),%xmm7
1991___
1992$code.=<<___;
1993	lea	(%r9),%rsp
1994.Lavx512_epilogue:
1995	ret
1996.size	ChaCha20_avx512,.-ChaCha20_avx512
1997___
1998}
1999if ($avx>2) {
2000# This one handles longer inputs...
2001
2002my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2003    $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2004my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2005	 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2006my @key=map("%zmm$_",(16..31));
2007my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2008
2009sub AVX512_lane_ROUND {
2010my ($a0,$b0,$c0,$d0)=@_;
2011my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2012my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2013my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2014my @x=map("\"$_\"",@xx);
2015
2016	(
2017	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
2018	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
2019	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
2020	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
2021	"&vpxord	(@x[$d0],@x[$d0],@x[$a0])",
2022	 "&vpxord	(@x[$d1],@x[$d1],@x[$a1])",
2023	  "&vpxord	(@x[$d2],@x[$d2],@x[$a2])",
2024	   "&vpxord	(@x[$d3],@x[$d3],@x[$a3])",
2025	"&vprold	(@x[$d0],@x[$d0],16)",
2026	 "&vprold	(@x[$d1],@x[$d1],16)",
2027	  "&vprold	(@x[$d2],@x[$d2],16)",
2028	   "&vprold	(@x[$d3],@x[$d3],16)",
2029
2030	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
2031	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
2032	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
2033	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
2034	"&vpxord	(@x[$b0],@x[$b0],@x[$c0])",
2035	 "&vpxord	(@x[$b1],@x[$b1],@x[$c1])",
2036	  "&vpxord	(@x[$b2],@x[$b2],@x[$c2])",
2037	   "&vpxord	(@x[$b3],@x[$b3],@x[$c3])",
2038	"&vprold	(@x[$b0],@x[$b0],12)",
2039	 "&vprold	(@x[$b1],@x[$b1],12)",
2040	  "&vprold	(@x[$b2],@x[$b2],12)",
2041	   "&vprold	(@x[$b3],@x[$b3],12)",
2042
2043	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",
2044	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",
2045	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
2046	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
2047	"&vpxord	(@x[$d0],@x[$d0],@x[$a0])",
2048	 "&vpxord	(@x[$d1],@x[$d1],@x[$a1])",
2049	  "&vpxord	(@x[$d2],@x[$d2],@x[$a2])",
2050	   "&vpxord	(@x[$d3],@x[$d3],@x[$a3])",
2051	"&vprold	(@x[$d0],@x[$d0],8)",
2052	 "&vprold	(@x[$d1],@x[$d1],8)",
2053	  "&vprold	(@x[$d2],@x[$d2],8)",
2054	   "&vprold	(@x[$d3],@x[$d3],8)",
2055
2056	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
2057	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
2058	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
2059	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
2060	"&vpxord	(@x[$b0],@x[$b0],@x[$c0])",
2061	 "&vpxord	(@x[$b1],@x[$b1],@x[$c1])",
2062	  "&vpxord	(@x[$b2],@x[$b2],@x[$c2])",
2063	   "&vpxord	(@x[$b3],@x[$b3],@x[$c3])",
2064	"&vprold	(@x[$b0],@x[$b0],7)",
2065	 "&vprold	(@x[$b1],@x[$b1],7)",
2066	  "&vprold	(@x[$b2],@x[$b2],7)",
2067	   "&vprold	(@x[$b3],@x[$b3],7)"
2068	);
2069}
2070
2071my $xframe = $win64 ? 0xa8 : 8;
2072
2073$code.=<<___;
2074.type	ChaCha20_16x,\@function,5
2075.align	32
2076ChaCha20_16x:
2077.LChaCha20_16x:
2078	mov		%rsp,%r9		# frame register
2079	sub		\$64+$xframe,%rsp
2080	and		\$-64,%rsp
2081___
2082$code.=<<___	if ($win64);
2083	movaps		%xmm6,-0xa8(%r9)
2084	movaps		%xmm7,-0x98(%r9)
2085	movaps		%xmm8,-0x88(%r9)
2086	movaps		%xmm9,-0x78(%r9)
2087	movaps		%xmm10,-0x68(%r9)
2088	movaps		%xmm11,-0x58(%r9)
2089	movaps		%xmm12,-0x48(%r9)
2090	movaps		%xmm13,-0x38(%r9)
2091	movaps		%xmm14,-0x28(%r9)
2092	movaps		%xmm15,-0x18(%r9)
2093.L16x_body:
2094___
2095$code.=<<___;
2096	vzeroupper
2097
2098	lea		.Lsigma(%rip),%r10
2099	vbroadcasti32x4	(%r10),$xa3		# key[0]
2100	vbroadcasti32x4	($key),$xb3		# key[1]
2101	vbroadcasti32x4	16($key),$xc3		# key[2]
2102	vbroadcasti32x4	($counter),$xd3		# key[3]
2103
2104	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
2105	vpshufd		\$0x55,$xa3,$xa1
2106	vpshufd		\$0xaa,$xa3,$xa2
2107	vpshufd		\$0xff,$xa3,$xa3
2108	vmovdqa64	$xa0,@key[0]
2109	vmovdqa64	$xa1,@key[1]
2110	vmovdqa64	$xa2,@key[2]
2111	vmovdqa64	$xa3,@key[3]
2112
2113	vpshufd		\$0x00,$xb3,$xb0
2114	vpshufd		\$0x55,$xb3,$xb1
2115	vpshufd		\$0xaa,$xb3,$xb2
2116	vpshufd		\$0xff,$xb3,$xb3
2117	vmovdqa64	$xb0,@key[4]
2118	vmovdqa64	$xb1,@key[5]
2119	vmovdqa64	$xb2,@key[6]
2120	vmovdqa64	$xb3,@key[7]
2121
2122	vpshufd		\$0x00,$xc3,$xc0
2123	vpshufd		\$0x55,$xc3,$xc1
2124	vpshufd		\$0xaa,$xc3,$xc2
2125	vpshufd		\$0xff,$xc3,$xc3
2126	vmovdqa64	$xc0,@key[8]
2127	vmovdqa64	$xc1,@key[9]
2128	vmovdqa64	$xc2,@key[10]
2129	vmovdqa64	$xc3,@key[11]
2130
2131	vpshufd		\$0x00,$xd3,$xd0
2132	vpshufd		\$0x55,$xd3,$xd1
2133	vpshufd		\$0xaa,$xd3,$xd2
2134	vpshufd		\$0xff,$xd3,$xd3
2135	vpaddd		.Lincz(%rip),$xd0,$xd0	# don't save counters yet
2136	vmovdqa64	$xd0,@key[12]
2137	vmovdqa64	$xd1,@key[13]
2138	vmovdqa64	$xd2,@key[14]
2139	vmovdqa64	$xd3,@key[15]
2140
2141	mov		\$10,%eax
2142	jmp		.Loop16x
2143
2144.align	32
2145.Loop_outer16x:
2146	vpbroadcastd	0(%r10),$xa0		# reload key
2147	vpbroadcastd	4(%r10),$xa1
2148	vpbroadcastd	8(%r10),$xa2
2149	vpbroadcastd	12(%r10),$xa3
2150	vpaddd		.Lsixteen(%rip),@key[12],@key[12]	# next SIMD counters
2151	vmovdqa64	@key[4],$xb0
2152	vmovdqa64	@key[5],$xb1
2153	vmovdqa64	@key[6],$xb2
2154	vmovdqa64	@key[7],$xb3
2155	vmovdqa64	@key[8],$xc0
2156	vmovdqa64	@key[9],$xc1
2157	vmovdqa64	@key[10],$xc2
2158	vmovdqa64	@key[11],$xc3
2159	vmovdqa64	@key[12],$xd0
2160	vmovdqa64	@key[13],$xd1
2161	vmovdqa64	@key[14],$xd2
2162	vmovdqa64	@key[15],$xd3
2163
2164	vmovdqa64	$xa0,@key[0]
2165	vmovdqa64	$xa1,@key[1]
2166	vmovdqa64	$xa2,@key[2]
2167	vmovdqa64	$xa3,@key[3]
2168
2169	mov		\$10,%eax
2170	jmp		.Loop16x
2171
2172.align	32
2173.Loop16x:
2174___
2175	foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2176	foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2177$code.=<<___;
2178	dec		%eax
2179	jnz		.Loop16x
2180
2181	vpaddd		@key[0],$xa0,$xa0	# accumulate key
2182	vpaddd		@key[1],$xa1,$xa1
2183	vpaddd		@key[2],$xa2,$xa2
2184	vpaddd		@key[3],$xa3,$xa3
2185
2186	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
2187	vpunpckldq	$xa3,$xa2,$xt3
2188	vpunpckhdq	$xa1,$xa0,$xa0
2189	vpunpckhdq	$xa3,$xa2,$xa2
2190	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
2191	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
2192	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
2193	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
2194___
2195	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2196$code.=<<___;
2197	vpaddd		@key[4],$xb0,$xb0
2198	vpaddd		@key[5],$xb1,$xb1
2199	vpaddd		@key[6],$xb2,$xb2
2200	vpaddd		@key[7],$xb3,$xb3
2201
2202	vpunpckldq	$xb1,$xb0,$xt2
2203	vpunpckldq	$xb3,$xb2,$xt3
2204	vpunpckhdq	$xb1,$xb0,$xb0
2205	vpunpckhdq	$xb3,$xb2,$xb2
2206	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
2207	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
2208	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
2209	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
2210___
2211	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2212$code.=<<___;
2213	vshufi32x4	\$0x44,$xb0,$xa0,$xt3	# "de-interlace" further
2214	vshufi32x4	\$0xee,$xb0,$xa0,$xb0
2215	vshufi32x4	\$0x44,$xb1,$xa1,$xa0
2216	vshufi32x4	\$0xee,$xb1,$xa1,$xb1
2217	vshufi32x4	\$0x44,$xb2,$xa2,$xa1
2218	vshufi32x4	\$0xee,$xb2,$xa2,$xb2
2219	vshufi32x4	\$0x44,$xb3,$xa3,$xa2
2220	vshufi32x4	\$0xee,$xb3,$xa3,$xb3
2221___
2222	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2223$code.=<<___;
2224	vpaddd		@key[8],$xc0,$xc0
2225	vpaddd		@key[9],$xc1,$xc1
2226	vpaddd		@key[10],$xc2,$xc2
2227	vpaddd		@key[11],$xc3,$xc3
2228
2229	vpunpckldq	$xc1,$xc0,$xt2
2230	vpunpckldq	$xc3,$xc2,$xt3
2231	vpunpckhdq	$xc1,$xc0,$xc0
2232	vpunpckhdq	$xc3,$xc2,$xc2
2233	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
2234	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
2235	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
2236	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
2237___
2238	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2239$code.=<<___;
2240	vpaddd		@key[12],$xd0,$xd0
2241	vpaddd		@key[13],$xd1,$xd1
2242	vpaddd		@key[14],$xd2,$xd2
2243	vpaddd		@key[15],$xd3,$xd3
2244
2245	vpunpckldq	$xd1,$xd0,$xt2
2246	vpunpckldq	$xd3,$xd2,$xt3
2247	vpunpckhdq	$xd1,$xd0,$xd0
2248	vpunpckhdq	$xd3,$xd2,$xd2
2249	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
2250	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
2251	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
2252	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
2253___
2254	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2255$code.=<<___;
2256	vshufi32x4	\$0x44,$xd0,$xc0,$xt3	# "de-interlace" further
2257	vshufi32x4	\$0xee,$xd0,$xc0,$xd0
2258	vshufi32x4	\$0x44,$xd1,$xc1,$xc0
2259	vshufi32x4	\$0xee,$xd1,$xc1,$xd1
2260	vshufi32x4	\$0x44,$xd2,$xc2,$xc1
2261	vshufi32x4	\$0xee,$xd2,$xc2,$xd2
2262	vshufi32x4	\$0x44,$xd3,$xc3,$xc2
2263	vshufi32x4	\$0xee,$xd3,$xc3,$xd3
2264___
2265	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2266$code.=<<___;
2267	vshufi32x4	\$0x88,$xc0,$xa0,$xt0	# "de-interlace" further
2268	vshufi32x4	\$0xdd,$xc0,$xa0,$xa0
2269	 vshufi32x4	\$0x88,$xd0,$xb0,$xc0
2270	 vshufi32x4	\$0xdd,$xd0,$xb0,$xd0
2271	vshufi32x4	\$0x88,$xc1,$xa1,$xt1
2272	vshufi32x4	\$0xdd,$xc1,$xa1,$xa1
2273	 vshufi32x4	\$0x88,$xd1,$xb1,$xc1
2274	 vshufi32x4	\$0xdd,$xd1,$xb1,$xd1
2275	vshufi32x4	\$0x88,$xc2,$xa2,$xt2
2276	vshufi32x4	\$0xdd,$xc2,$xa2,$xa2
2277	 vshufi32x4	\$0x88,$xd2,$xb2,$xc2
2278	 vshufi32x4	\$0xdd,$xd2,$xb2,$xd2
2279	vshufi32x4	\$0x88,$xc3,$xa3,$xt3
2280	vshufi32x4	\$0xdd,$xc3,$xa3,$xa3
2281	 vshufi32x4	\$0x88,$xd3,$xb3,$xc3
2282	 vshufi32x4	\$0xdd,$xd3,$xb3,$xd3
2283___
2284	($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2285	($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2286
2287	($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2288	 $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2289	($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2290	 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2291$code.=<<___;
2292	cmp		\$64*16,$len
2293	jb		.Ltail16x
2294
2295	vpxord		0x00($inp),$xa0,$xa0	# xor with input
2296	vpxord		0x40($inp),$xb0,$xb0
2297	vpxord		0x80($inp),$xc0,$xc0
2298	vpxord		0xc0($inp),$xd0,$xd0
2299	vmovdqu32	$xa0,0x00($out)
2300	vmovdqu32	$xb0,0x40($out)
2301	vmovdqu32	$xc0,0x80($out)
2302	vmovdqu32	$xd0,0xc0($out)
2303
2304	vpxord		0x100($inp),$xa1,$xa1
2305	vpxord		0x140($inp),$xb1,$xb1
2306	vpxord		0x180($inp),$xc1,$xc1
2307	vpxord		0x1c0($inp),$xd1,$xd1
2308	vmovdqu32	$xa1,0x100($out)
2309	vmovdqu32	$xb1,0x140($out)
2310	vmovdqu32	$xc1,0x180($out)
2311	vmovdqu32	$xd1,0x1c0($out)
2312
2313	vpxord		0x200($inp),$xa2,$xa2
2314	vpxord		0x240($inp),$xb2,$xb2
2315	vpxord		0x280($inp),$xc2,$xc2
2316	vpxord		0x2c0($inp),$xd2,$xd2
2317	vmovdqu32	$xa2,0x200($out)
2318	vmovdqu32	$xb2,0x240($out)
2319	vmovdqu32	$xc2,0x280($out)
2320	vmovdqu32	$xd2,0x2c0($out)
2321
2322	vpxord		0x300($inp),$xa3,$xa3
2323	vpxord		0x340($inp),$xb3,$xb3
2324	vpxord		0x380($inp),$xc3,$xc3
2325	vpxord		0x3c0($inp),$xd3,$xd3
2326	lea		0x400($inp),$inp
2327	vmovdqu32	$xa3,0x300($out)
2328	vmovdqu32	$xb3,0x340($out)
2329	vmovdqu32	$xc3,0x380($out)
2330	vmovdqu32	$xd3,0x3c0($out)
2331	lea		0x400($out),$out
2332
2333	sub		\$64*16,$len
2334	jnz		.Loop_outer16x
2335
2336	jmp		.Ldone16x
2337
2338.align	32
2339.Ltail16x:
2340	xor		%r10,%r10
2341	sub		$inp,$out
2342	cmp		\$64*1,$len
2343	jb		.Less_than_64_16x
2344	vpxord		($inp),$xa0,$xa0	# xor with input
2345	vmovdqu32	$xa0,($out,$inp)
2346	je		.Ldone16x
2347	vmovdqa32	$xb0,$xa0
2348	lea		64($inp),$inp
2349
2350	cmp		\$64*2,$len
2351	jb		.Less_than_64_16x
2352	vpxord		($inp),$xb0,$xb0
2353	vmovdqu32	$xb0,($out,$inp)
2354	je		.Ldone16x
2355	vmovdqa32	$xc0,$xa0
2356	lea		64($inp),$inp
2357
2358	cmp		\$64*3,$len
2359	jb		.Less_than_64_16x
2360	vpxord		($inp),$xc0,$xc0
2361	vmovdqu32	$xc0,($out,$inp)
2362	je		.Ldone16x
2363	vmovdqa32	$xd0,$xa0
2364	lea		64($inp),$inp
2365
2366	cmp		\$64*4,$len
2367	jb		.Less_than_64_16x
2368	vpxord		($inp),$xd0,$xd0
2369	vmovdqu32	$xd0,($out,$inp)
2370	je		.Ldone16x
2371	vmovdqa32	$xa1,$xa0
2372	lea		64($inp),$inp
2373
2374	cmp		\$64*5,$len
2375	jb		.Less_than_64_16x
2376	vpxord		($inp),$xa1,$xa1
2377	vmovdqu32	$xa1,($out,$inp)
2378	je		.Ldone16x
2379	vmovdqa32	$xb1,$xa0
2380	lea		64($inp),$inp
2381
2382	cmp		\$64*6,$len
2383	jb		.Less_than_64_16x
2384	vpxord		($inp),$xb1,$xb1
2385	vmovdqu32	$xb1,($out,$inp)
2386	je		.Ldone16x
2387	vmovdqa32	$xc1,$xa0
2388	lea		64($inp),$inp
2389
2390	cmp		\$64*7,$len
2391	jb		.Less_than_64_16x
2392	vpxord		($inp),$xc1,$xc1
2393	vmovdqu32	$xc1,($out,$inp)
2394	je		.Ldone16x
2395	vmovdqa32	$xd1,$xa0
2396	lea		64($inp),$inp
2397
2398	cmp		\$64*8,$len
2399	jb		.Less_than_64_16x
2400	vpxord		($inp),$xd1,$xd1
2401	vmovdqu32	$xd1,($out,$inp)
2402	je		.Ldone16x
2403	vmovdqa32	$xa2,$xa0
2404	lea		64($inp),$inp
2405
2406	cmp		\$64*9,$len
2407	jb		.Less_than_64_16x
2408	vpxord		($inp),$xa2,$xa2
2409	vmovdqu32	$xa2,($out,$inp)
2410	je		.Ldone16x
2411	vmovdqa32	$xb2,$xa0
2412	lea		64($inp),$inp
2413
2414	cmp		\$64*10,$len
2415	jb		.Less_than_64_16x
2416	vpxord		($inp),$xb2,$xb2
2417	vmovdqu32	$xb2,($out,$inp)
2418	je		.Ldone16x
2419	vmovdqa32	$xc2,$xa0
2420	lea		64($inp),$inp
2421
2422	cmp		\$64*11,$len
2423	jb		.Less_than_64_16x
2424	vpxord		($inp),$xc2,$xc2
2425	vmovdqu32	$xc2,($out,$inp)
2426	je		.Ldone16x
2427	vmovdqa32	$xd2,$xa0
2428	lea		64($inp),$inp
2429
2430	cmp		\$64*12,$len
2431	jb		.Less_than_64_16x
2432	vpxord		($inp),$xd2,$xd2
2433	vmovdqu32	$xd2,($out,$inp)
2434	je		.Ldone16x
2435	vmovdqa32	$xa3,$xa0
2436	lea		64($inp),$inp
2437
2438	cmp		\$64*13,$len
2439	jb		.Less_than_64_16x
2440	vpxord		($inp),$xa3,$xa3
2441	vmovdqu32	$xa3,($out,$inp)
2442	je		.Ldone16x
2443	vmovdqa32	$xb3,$xa0
2444	lea		64($inp),$inp
2445
2446	cmp		\$64*14,$len
2447	jb		.Less_than_64_16x
2448	vpxord		($inp),$xb3,$xb3
2449	vmovdqu32	$xb3,($out,$inp)
2450	je		.Ldone16x
2451	vmovdqa32	$xc3,$xa0
2452	lea		64($inp),$inp
2453
2454	cmp		\$64*15,$len
2455	jb		.Less_than_64_16x
2456	vpxord		($inp),$xc3,$xc3
2457	vmovdqu32	$xc3,($out,$inp)
2458	je		.Ldone16x
2459	vmovdqa32	$xd3,$xa0
2460	lea		64($inp),$inp
2461
2462.Less_than_64_16x:
2463	vmovdqa32	$xa0,0x00(%rsp)
2464	lea		($out,$inp),$out
2465	and		\$63,$len
2466
2467.Loop_tail16x:
2468	movzb		($inp,%r10),%eax
2469	movzb		(%rsp,%r10),%ecx
2470	lea		1(%r10),%r10
2471	xor		%ecx,%eax
2472	mov		%al,-1($out,%r10)
2473	dec		$len
2474	jnz		.Loop_tail16x
2475
2476	vpxord		$xa0,$xa0,$xa0
2477	vmovdqa32	$xa0,0(%rsp)
2478
2479.Ldone16x:
2480	vzeroall
2481___
2482$code.=<<___	if ($win64);
2483	movaps		-0xa8(%r9),%xmm6
2484	movaps		-0x98(%r9),%xmm7
2485	movaps		-0x88(%r9),%xmm8
2486	movaps		-0x78(%r9),%xmm9
2487	movaps		-0x68(%r9),%xmm10
2488	movaps		-0x58(%r9),%xmm11
2489	movaps		-0x48(%r9),%xmm12
2490	movaps		-0x38(%r9),%xmm13
2491	movaps		-0x28(%r9),%xmm14
2492	movaps		-0x18(%r9),%xmm15
2493___
2494$code.=<<___;
2495	lea		(%r9),%rsp
2496.L16x_epilogue:
2497	ret
2498.size	ChaCha20_16x,.-ChaCha20_16x
2499___
2500}
2501
2502# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2503#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
2504if ($win64) {
2505$rec="%rcx";
2506$frame="%rdx";
2507$context="%r8";
2508$disp="%r9";
2509
2510$code.=<<___;
2511.extern	__imp_RtlVirtualUnwind
2512.type	se_handler,\@abi-omnipotent
2513.align	16
2514se_handler:
2515	push	%rsi
2516	push	%rdi
2517	push	%rbx
2518	push	%rbp
2519	push	%r12
2520	push	%r13
2521	push	%r14
2522	push	%r15
2523	pushfq
2524	sub	\$64,%rsp
2525
2526	mov	120($context),%rax	# pull context->Rax
2527	mov	248($context),%rbx	# pull context->Rip
2528
2529	mov	8($disp),%rsi		# disp->ImageBase
2530	mov	56($disp),%r11		# disp->HandlerData
2531
2532	lea	.Lctr32_body(%rip),%r10
2533	cmp	%r10,%rbx		# context->Rip<.Lprologue
2534	jb	.Lcommon_seh_tail
2535
2536	mov	152($context),%rax	# pull context->Rsp
2537
2538	lea	.Lno_data(%rip),%r10	# epilogue label
2539	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
2540	jae	.Lcommon_seh_tail
2541
2542	lea	64+24+48(%rax),%rax
2543
2544	mov	-8(%rax),%rbx
2545	mov	-16(%rax),%rbp
2546	mov	-24(%rax),%r12
2547	mov	-32(%rax),%r13
2548	mov	-40(%rax),%r14
2549	mov	-48(%rax),%r15
2550	mov	%rbx,144($context)	# restore context->Rbx
2551	mov	%rbp,160($context)	# restore context->Rbp
2552	mov	%r12,216($context)	# restore context->R12
2553	mov	%r13,224($context)	# restore context->R13
2554	mov	%r14,232($context)	# restore context->R14
2555	mov	%r15,240($context)	# restore context->R14
2556
2557.Lcommon_seh_tail:
2558	mov	8(%rax),%rdi
2559	mov	16(%rax),%rsi
2560	mov	%rax,152($context)	# restore context->Rsp
2561	mov	%rsi,168($context)	# restore context->Rsi
2562	mov	%rdi,176($context)	# restore context->Rdi
2563
2564	mov	40($disp),%rdi		# disp->ContextRecord
2565	mov	$context,%rsi		# context
2566	mov	\$154,%ecx		# sizeof(CONTEXT)
2567	.long	0xa548f3fc		# cld; rep movsq
2568
2569	mov	$disp,%rsi
2570	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
2571	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
2572	mov	0(%rsi),%r8		# arg3, disp->ControlPc
2573	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
2574	mov	40(%rsi),%r10		# disp->ContextRecord
2575	lea	56(%rsi),%r11		# &disp->HandlerData
2576	lea	24(%rsi),%r12		# &disp->EstablisherFrame
2577	mov	%r10,32(%rsp)		# arg5
2578	mov	%r11,40(%rsp)		# arg6
2579	mov	%r12,48(%rsp)		# arg7
2580	mov	%rcx,56(%rsp)		# arg8, (NULL)
2581	call	*__imp_RtlVirtualUnwind(%rip)
2582
2583	mov	\$1,%eax		# ExceptionContinueSearch
2584	add	\$64,%rsp
2585	popfq
2586	pop	%r15
2587	pop	%r14
2588	pop	%r13
2589	pop	%r12
2590	pop	%rbp
2591	pop	%rbx
2592	pop	%rdi
2593	pop	%rsi
2594	ret
2595.size	se_handler,.-se_handler
2596
2597.type	ssse3_handler,\@abi-omnipotent
2598.align	16
2599ssse3_handler:
2600	push	%rsi
2601	push	%rdi
2602	push	%rbx
2603	push	%rbp
2604	push	%r12
2605	push	%r13
2606	push	%r14
2607	push	%r15
2608	pushfq
2609	sub	\$64,%rsp
2610
2611	mov	120($context),%rax	# pull context->Rax
2612	mov	248($context),%rbx	# pull context->Rip
2613
2614	mov	8($disp),%rsi		# disp->ImageBase
2615	mov	56($disp),%r11		# disp->HandlerData
2616
2617	mov	0(%r11),%r10d		# HandlerData[0]
2618	lea	(%rsi,%r10),%r10	# prologue label
2619	cmp	%r10,%rbx		# context->Rip<prologue label
2620	jb	.Lcommon_seh_tail
2621
2622	mov	192($context),%rax	# pull context->R9
2623
2624	mov	4(%r11),%r10d		# HandlerData[1]
2625	lea	(%rsi,%r10),%r10	# epilogue label
2626	cmp	%r10,%rbx		# context->Rip>=epilogue label
2627	jae	.Lcommon_seh_tail
2628
2629	lea	-0x28(%rax),%rsi
2630	lea	512($context),%rdi	# &context.Xmm6
2631	mov	\$4,%ecx
2632	.long	0xa548f3fc		# cld; rep movsq
2633
2634	jmp	.Lcommon_seh_tail
2635.size	ssse3_handler,.-ssse3_handler
2636
2637.type	full_handler,\@abi-omnipotent
2638.align	16
2639full_handler:
2640	push	%rsi
2641	push	%rdi
2642	push	%rbx
2643	push	%rbp
2644	push	%r12
2645	push	%r13
2646	push	%r14
2647	push	%r15
2648	pushfq
2649	sub	\$64,%rsp
2650
2651	mov	120($context),%rax	# pull context->Rax
2652	mov	248($context),%rbx	# pull context->Rip
2653
2654	mov	8($disp),%rsi		# disp->ImageBase
2655	mov	56($disp),%r11		# disp->HandlerData
2656
2657	mov	0(%r11),%r10d		# HandlerData[0]
2658	lea	(%rsi,%r10),%r10	# prologue label
2659	cmp	%r10,%rbx		# context->Rip<prologue label
2660	jb	.Lcommon_seh_tail
2661
2662	mov	192($context),%rax	# pull context->R9
2663
2664	mov	4(%r11),%r10d		# HandlerData[1]
2665	lea	(%rsi,%r10),%r10	# epilogue label
2666	cmp	%r10,%rbx		# context->Rip>=epilogue label
2667	jae	.Lcommon_seh_tail
2668
2669	lea	-0xa8(%rax),%rsi
2670	lea	512($context),%rdi	# &context.Xmm6
2671	mov	\$20,%ecx
2672	.long	0xa548f3fc		# cld; rep movsq
2673
2674	jmp	.Lcommon_seh_tail
2675.size	full_handler,.-full_handler
2676
2677.section	.pdata
2678.align	4
2679	.rva	.LSEH_begin_ChaCha20_ctr32
2680	.rva	.LSEH_end_ChaCha20_ctr32
2681	.rva	.LSEH_info_ChaCha20_ctr32
2682
2683	.rva	.LSEH_begin_ChaCha20_ssse3
2684	.rva	.LSEH_end_ChaCha20_ssse3
2685	.rva	.LSEH_info_ChaCha20_ssse3
2686
2687	.rva	.LSEH_begin_ChaCha20_4x
2688	.rva	.LSEH_end_ChaCha20_4x
2689	.rva	.LSEH_info_ChaCha20_4x
2690___
2691$code.=<<___ if ($avx>1);
2692	.rva	.LSEH_begin_ChaCha20_8x
2693	.rva	.LSEH_end_ChaCha20_8x
2694	.rva	.LSEH_info_ChaCha20_8x
2695___
2696$code.=<<___ if ($avx>2);
2697	.rva	.LSEH_begin_ChaCha20_avx512
2698	.rva	.LSEH_end_ChaCha20_avx512
2699	.rva	.LSEH_info_ChaCha20_avx512
2700
2701	.rva	.LSEH_begin_ChaCha20_16x
2702	.rva	.LSEH_end_ChaCha20_16x
2703	.rva	.LSEH_info_ChaCha20_16x
2704___
2705$code.=<<___;
2706.section	.xdata
2707.align	8
2708.LSEH_info_ChaCha20_ctr32:
2709	.byte	9,0,0,0
2710	.rva	se_handler
2711
2712.LSEH_info_ChaCha20_ssse3:
2713	.byte	9,0,0,0
2714	.rva	ssse3_handler
2715	.rva	.Lssse3_body,.Lssse3_epilogue
2716
2717.LSEH_info_ChaCha20_4x:
2718	.byte	9,0,0,0
2719	.rva	full_handler
2720	.rva	.L4x_body,.L4x_epilogue
2721___
2722$code.=<<___ if ($avx>1);
2723.LSEH_info_ChaCha20_8x:
2724	.byte	9,0,0,0
2725	.rva	full_handler
2726	.rva	.L8x_body,.L8x_epilogue			# HandlerData[]
2727___
2728$code.=<<___ if ($avx>2);
2729.LSEH_info_ChaCha20_avx512:
2730	.byte	9,0,0,0
2731	.rva	ssse3_handler
2732	.rva	.Lavx512_body,.Lavx512_epilogue		# HandlerData[]
2733
2734.LSEH_info_ChaCha20_16x:
2735	.byte	9,0,0,0
2736	.rva	full_handler
2737	.rva	.L16x_body,.L16x_epilogue		# HandlerData[]
2738___
2739}
2740
2741foreach (split("\n",$code)) {
2742	s/\`([^\`]*)\`/eval $1/ge;
2743
2744	s/%x#%[yz]/%x/g;	# "down-shift"
2745
2746	print $_,"\n";
2747}
2748
2749close STDOUT;
2750