x86_64-mont.pl revision 337982
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# October 2005.
11#
12# Montgomery multiplication routine for x86_64. While it gives modest
13# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14# than twice, >2x, as fast. Most common rsa1024 sign is improved by
15# respectful 50%. It remains to be seen if loop unrolling and
16# dedicated squaring routine can provide further improvement...
17
18# July 2011.
19#
20# Add dedicated squaring procedure. Performance improvement varies
21# from platform to platform, but in average it's ~5%/15%/25%/33%
22# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
23
24# August 2011.
25#
26# Unroll and modulo-schedule inner loops in such manner that they
27# are "fallen through" for input lengths of 8, which is critical for
28# 1024-bit RSA *sign*. Average performance improvement in comparison
29# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
31
32# June 2013.
33#
34# Optimize reduction in squaring procedure and improve 1024+-bit RSA
35# sign performance by 10-16% on Intel Sandy Bridge and later
36# (virtually same on non-Intel processors).
37
38# August 2013.
39#
40# Add MULX/ADOX/ADCX code path.
41
42$flavour = shift;
43$output  = shift;
44if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
45
46$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
47
48$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51die "can't locate x86_64-xlate.pl";
52
53open OUT,"| \"$^X\" $xlate $flavour $output";
54*STDOUT=*OUT;
55
56if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
58	$addx = ($1>=2.23);
59}
60
61if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62	    `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
63	$addx = ($1>=2.10);
64}
65
66if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67	    `ml64 2>&1` =~ /Version ([0-9]+)\./) {
68	$addx = ($1>=12);
69}
70
71if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
72	my $ver = $2 + $3/100.0;	# 3.1->3.01, 3.10->3.10
73	$addx = ($ver>=3.03);
74}
75
76# int bn_mul_mont(
77$rp="%rdi";	# BN_ULONG *rp,
78$ap="%rsi";	# const BN_ULONG *ap,
79$bp="%rdx";	# const BN_ULONG *bp,
80$np="%rcx";	# const BN_ULONG *np,
81$n0="%r8";	# const BN_ULONG *n0,
82$num="%r9";	# int num);
83$lo0="%r10";
84$hi0="%r11";
85$hi1="%r13";
86$i="%r14";
87$j="%r15";
88$m0="%rbx";
89$m1="%rbp";
90
91$code=<<___;
92.text
93
94.extern	OPENSSL_ia32cap_P
95
96.globl	bn_mul_mont
97.type	bn_mul_mont,\@function,6
98.align	16
99bn_mul_mont:
100	mov	${num}d,${num}d
101	mov	%rsp,%rax
102	test	\$3,${num}d
103	jnz	.Lmul_enter
104	cmp	\$8,${num}d
105	jb	.Lmul_enter
106___
107$code.=<<___ if ($addx);
108	mov	OPENSSL_ia32cap_P+8(%rip),%r11d
109___
110$code.=<<___;
111	cmp	$ap,$bp
112	jne	.Lmul4x_enter
113	test	\$7,${num}d
114	jz	.Lsqr8x_enter
115	jmp	.Lmul4x_enter
116
117.align	16
118.Lmul_enter:
119	push	%rbx
120	push	%rbp
121	push	%r12
122	push	%r13
123	push	%r14
124	push	%r15
125
126	neg	$num
127	mov	%rsp,%r11
128	lea	-16(%rsp,$num,8),%r10	# future alloca(8*(num+2))
129	neg	$num			# restore $num
130	and	\$-1024,%r10		# minimize TLB usage
131
132	# Some OSes, *cough*-dows, insist on stack being "wired" to
133	# physical memory in strictly sequential manner, i.e. if stack
134	# allocation spans two pages, then reference to farmost one can
135	# be punishable by SEGV. But page walking can do good even on
136	# other OSes, because it guarantees that villain thread hits
137	# the guard page before it can make damage to innocent one...
138	sub	%r10,%r11
139	and	\$-4096,%r11
140	lea	(%r10,%r11),%rsp
141	mov	(%rsp),%r11
142	cmp	%r10,%rsp
143	ja	.Lmul_page_walk
144	jmp	.Lmul_page_walk_done
145
146.align	16
147.Lmul_page_walk:
148	lea	-4096(%rsp),%rsp
149	mov	(%rsp),%r11
150	cmp	%r10,%rsp
151	ja	.Lmul_page_walk
152.Lmul_page_walk_done:
153
154	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
155.Lmul_body:
156	mov	$bp,%r12		# reassign $bp
157___
158		$bp="%r12";
159$code.=<<___;
160	mov	($n0),$n0		# pull n0[0] value
161	mov	($bp),$m0		# m0=bp[0]
162	mov	($ap),%rax
163
164	xor	$i,$i			# i=0
165	xor	$j,$j			# j=0
166
167	mov	$n0,$m1
168	mulq	$m0			# ap[0]*bp[0]
169	mov	%rax,$lo0
170	mov	($np),%rax
171
172	imulq	$lo0,$m1		# "tp[0]"*n0
173	mov	%rdx,$hi0
174
175	mulq	$m1			# np[0]*m1
176	add	%rax,$lo0		# discarded
177	mov	8($ap),%rax
178	adc	\$0,%rdx
179	mov	%rdx,$hi1
180
181	lea	1($j),$j		# j++
182	jmp	.L1st_enter
183
184.align	16
185.L1st:
186	add	%rax,$hi1
187	mov	($ap,$j,8),%rax
188	adc	\$0,%rdx
189	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
190	mov	$lo0,$hi0
191	adc	\$0,%rdx
192	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
193	mov	%rdx,$hi1
194
195.L1st_enter:
196	mulq	$m0			# ap[j]*bp[0]
197	add	%rax,$hi0
198	mov	($np,$j,8),%rax
199	adc	\$0,%rdx
200	lea	1($j),$j		# j++
201	mov	%rdx,$lo0
202
203	mulq	$m1			# np[j]*m1
204	cmp	$num,$j
205	jne	.L1st
206
207	add	%rax,$hi1
208	mov	($ap),%rax		# ap[0]
209	adc	\$0,%rdx
210	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
211	adc	\$0,%rdx
212	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
213	mov	%rdx,$hi1
214	mov	$lo0,$hi0
215
216	xor	%rdx,%rdx
217	add	$hi0,$hi1
218	adc	\$0,%rdx
219	mov	$hi1,-8(%rsp,$num,8)
220	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
221
222	lea	1($i),$i		# i++
223	jmp	.Louter
224.align	16
225.Louter:
226	mov	($bp,$i,8),$m0		# m0=bp[i]
227	xor	$j,$j			# j=0
228	mov	$n0,$m1
229	mov	(%rsp),$lo0
230	mulq	$m0			# ap[0]*bp[i]
231	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
232	mov	($np),%rax
233	adc	\$0,%rdx
234
235	imulq	$lo0,$m1		# tp[0]*n0
236	mov	%rdx,$hi0
237
238	mulq	$m1			# np[0]*m1
239	add	%rax,$lo0		# discarded
240	mov	8($ap),%rax
241	adc	\$0,%rdx
242	mov	8(%rsp),$lo0		# tp[1]
243	mov	%rdx,$hi1
244
245	lea	1($j),$j		# j++
246	jmp	.Linner_enter
247
248.align	16
249.Linner:
250	add	%rax,$hi1
251	mov	($ap,$j,8),%rax
252	adc	\$0,%rdx
253	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
254	mov	(%rsp,$j,8),$lo0
255	adc	\$0,%rdx
256	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
257	mov	%rdx,$hi1
258
259.Linner_enter:
260	mulq	$m0			# ap[j]*bp[i]
261	add	%rax,$hi0
262	mov	($np,$j,8),%rax
263	adc	\$0,%rdx
264	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
265	mov	%rdx,$hi0
266	adc	\$0,$hi0
267	lea	1($j),$j		# j++
268
269	mulq	$m1			# np[j]*m1
270	cmp	$num,$j
271	jne	.Linner
272
273	add	%rax,$hi1
274	mov	($ap),%rax		# ap[0]
275	adc	\$0,%rdx
276	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
277	mov	(%rsp,$j,8),$lo0
278	adc	\$0,%rdx
279	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
280	mov	%rdx,$hi1
281
282	xor	%rdx,%rdx
283	add	$hi0,$hi1
284	adc	\$0,%rdx
285	add	$lo0,$hi1		# pull upmost overflow bit
286	adc	\$0,%rdx
287	mov	$hi1,-8(%rsp,$num,8)
288	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
289
290	lea	1($i),$i		# i++
291	cmp	$num,$i
292	jb	.Louter
293
294	xor	$i,$i			# i=0 and clear CF!
295	mov	(%rsp),%rax		# tp[0]
296	mov	$num,$j			# j=num
297
298.align	16
299.Lsub:	sbb	($np,$i,8),%rax
300	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
301	mov	8(%rsp,$i,8),%rax	# tp[i+1]
302	lea	1($i),$i		# i++
303	dec	$j			# doesnn't affect CF!
304	jnz	.Lsub
305
306	sbb	\$0,%rax		# handle upmost overflow bit
307	mov	\$-1,%rbx
308	xor	%rax,%rbx		# not %rax
309	xor	$i,$i
310	mov	$num,$j			# j=num
311
312.Lcopy:					# conditional copy
313	mov	($rp,$i,8),%rcx
314	mov	(%rsp,$i,8),%rdx
315	and	%rbx,%rcx
316	and	%rax,%rdx
317	mov	$num,(%rsp,$i,8)	# zap temporary vector
318	or	%rcx,%rdx
319	mov	%rdx,($rp,$i,8)		# rp[i]=tp[i]
320	lea	1($i),$i
321	sub	\$1,$j
322	jnz	.Lcopy
323
324	mov	8(%rsp,$num,8),%rsi	# restore %rsp
325	mov	\$1,%rax
326	mov	-48(%rsi),%r15
327	mov	-40(%rsi),%r14
328	mov	-32(%rsi),%r13
329	mov	-24(%rsi),%r12
330	mov	-16(%rsi),%rbp
331	mov	-8(%rsi),%rbx
332	lea	(%rsi),%rsp
333.Lmul_epilogue:
334	ret
335.size	bn_mul_mont,.-bn_mul_mont
336___
337{{{
338my @A=("%r10","%r11");
339my @N=("%r13","%rdi");
340$code.=<<___;
341.type	bn_mul4x_mont,\@function,6
342.align	16
343bn_mul4x_mont:
344	mov	${num}d,${num}d
345	mov	%rsp,%rax
346.Lmul4x_enter:
347___
348$code.=<<___ if ($addx);
349	and	\$0x80100,%r11d
350	cmp	\$0x80100,%r11d
351	je	.Lmulx4x_enter
352___
353$code.=<<___;
354	push	%rbx
355	push	%rbp
356	push	%r12
357	push	%r13
358	push	%r14
359	push	%r15
360
361	neg	$num
362	mov	%rsp,%r11
363	lea	-32(%rsp,$num,8),%r10	# future alloca(8*(num+4))
364	neg	$num			# restore
365	and	\$-1024,%r10		# minimize TLB usage
366
367	sub	%r10,%r11
368	and	\$-4096,%r11
369	lea	(%r10,%r11),%rsp
370	mov	(%rsp),%r11
371	cmp	%r10,%rsp
372	ja	.Lmul4x_page_walk
373	jmp	.Lmul4x_page_walk_done
374
375.Lmul4x_page_walk:
376	lea	-4096(%rsp),%rsp
377	mov	(%rsp),%r11
378	cmp	%r10,%rsp
379	ja	.Lmul4x_page_walk
380.Lmul4x_page_walk_done:
381
382	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
383.Lmul4x_body:
384	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
385	mov	%rdx,%r12		# reassign $bp
386___
387		$bp="%r12";
388$code.=<<___;
389	mov	($n0),$n0		# pull n0[0] value
390	mov	($bp),$m0		# m0=bp[0]
391	mov	($ap),%rax
392
393	xor	$i,$i			# i=0
394	xor	$j,$j			# j=0
395
396	mov	$n0,$m1
397	mulq	$m0			# ap[0]*bp[0]
398	mov	%rax,$A[0]
399	mov	($np),%rax
400
401	imulq	$A[0],$m1		# "tp[0]"*n0
402	mov	%rdx,$A[1]
403
404	mulq	$m1			# np[0]*m1
405	add	%rax,$A[0]		# discarded
406	mov	8($ap),%rax
407	adc	\$0,%rdx
408	mov	%rdx,$N[1]
409
410	mulq	$m0
411	add	%rax,$A[1]
412	mov	8($np),%rax
413	adc	\$0,%rdx
414	mov	%rdx,$A[0]
415
416	mulq	$m1
417	add	%rax,$N[1]
418	mov	16($ap),%rax
419	adc	\$0,%rdx
420	add	$A[1],$N[1]
421	lea	4($j),$j		# j++
422	adc	\$0,%rdx
423	mov	$N[1],(%rsp)
424	mov	%rdx,$N[0]
425	jmp	.L1st4x
426.align	16
427.L1st4x:
428	mulq	$m0			# ap[j]*bp[0]
429	add	%rax,$A[0]
430	mov	-16($np,$j,8),%rax
431	adc	\$0,%rdx
432	mov	%rdx,$A[1]
433
434	mulq	$m1			# np[j]*m1
435	add	%rax,$N[0]
436	mov	-8($ap,$j,8),%rax
437	adc	\$0,%rdx
438	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
439	adc	\$0,%rdx
440	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
441	mov	%rdx,$N[1]
442
443	mulq	$m0			# ap[j]*bp[0]
444	add	%rax,$A[1]
445	mov	-8($np,$j,8),%rax
446	adc	\$0,%rdx
447	mov	%rdx,$A[0]
448
449	mulq	$m1			# np[j]*m1
450	add	%rax,$N[1]
451	mov	($ap,$j,8),%rax
452	adc	\$0,%rdx
453	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
454	adc	\$0,%rdx
455	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
456	mov	%rdx,$N[0]
457
458	mulq	$m0			# ap[j]*bp[0]
459	add	%rax,$A[0]
460	mov	($np,$j,8),%rax
461	adc	\$0,%rdx
462	mov	%rdx,$A[1]
463
464	mulq	$m1			# np[j]*m1
465	add	%rax,$N[0]
466	mov	8($ap,$j,8),%rax
467	adc	\$0,%rdx
468	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
469	adc	\$0,%rdx
470	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
471	mov	%rdx,$N[1]
472
473	mulq	$m0			# ap[j]*bp[0]
474	add	%rax,$A[1]
475	mov	8($np,$j,8),%rax
476	adc	\$0,%rdx
477	lea	4($j),$j		# j++
478	mov	%rdx,$A[0]
479
480	mulq	$m1			# np[j]*m1
481	add	%rax,$N[1]
482	mov	-16($ap,$j,8),%rax
483	adc	\$0,%rdx
484	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
485	adc	\$0,%rdx
486	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
487	mov	%rdx,$N[0]
488	cmp	$num,$j
489	jb	.L1st4x
490
491	mulq	$m0			# ap[j]*bp[0]
492	add	%rax,$A[0]
493	mov	-16($np,$j,8),%rax
494	adc	\$0,%rdx
495	mov	%rdx,$A[1]
496
497	mulq	$m1			# np[j]*m1
498	add	%rax,$N[0]
499	mov	-8($ap,$j,8),%rax
500	adc	\$0,%rdx
501	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
502	adc	\$0,%rdx
503	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
504	mov	%rdx,$N[1]
505
506	mulq	$m0			# ap[j]*bp[0]
507	add	%rax,$A[1]
508	mov	-8($np,$j,8),%rax
509	adc	\$0,%rdx
510	mov	%rdx,$A[0]
511
512	mulq	$m1			# np[j]*m1
513	add	%rax,$N[1]
514	mov	($ap),%rax		# ap[0]
515	adc	\$0,%rdx
516	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
517	adc	\$0,%rdx
518	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
519	mov	%rdx,$N[0]
520
521	xor	$N[1],$N[1]
522	add	$A[0],$N[0]
523	adc	\$0,$N[1]
524	mov	$N[0],-8(%rsp,$j,8)
525	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
526
527	lea	1($i),$i		# i++
528.align	4
529.Louter4x:
530	mov	($bp,$i,8),$m0		# m0=bp[i]
531	xor	$j,$j			# j=0
532	mov	(%rsp),$A[0]
533	mov	$n0,$m1
534	mulq	$m0			# ap[0]*bp[i]
535	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
536	mov	($np),%rax
537	adc	\$0,%rdx
538
539	imulq	$A[0],$m1		# tp[0]*n0
540	mov	%rdx,$A[1]
541
542	mulq	$m1			# np[0]*m1
543	add	%rax,$A[0]		# "$N[0]", discarded
544	mov	8($ap),%rax
545	adc	\$0,%rdx
546	mov	%rdx,$N[1]
547
548	mulq	$m0			# ap[j]*bp[i]
549	add	%rax,$A[1]
550	mov	8($np),%rax
551	adc	\$0,%rdx
552	add	8(%rsp),$A[1]		# +tp[1]
553	adc	\$0,%rdx
554	mov	%rdx,$A[0]
555
556	mulq	$m1			# np[j]*m1
557	add	%rax,$N[1]
558	mov	16($ap),%rax
559	adc	\$0,%rdx
560	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
561	lea	4($j),$j		# j+=2
562	adc	\$0,%rdx
563	mov	$N[1],(%rsp)		# tp[j-1]
564	mov	%rdx,$N[0]
565	jmp	.Linner4x
566.align	16
567.Linner4x:
568	mulq	$m0			# ap[j]*bp[i]
569	add	%rax,$A[0]
570	mov	-16($np,$j,8),%rax
571	adc	\$0,%rdx
572	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
573	adc	\$0,%rdx
574	mov	%rdx,$A[1]
575
576	mulq	$m1			# np[j]*m1
577	add	%rax,$N[0]
578	mov	-8($ap,$j,8),%rax
579	adc	\$0,%rdx
580	add	$A[0],$N[0]
581	adc	\$0,%rdx
582	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
583	mov	%rdx,$N[1]
584
585	mulq	$m0			# ap[j]*bp[i]
586	add	%rax,$A[1]
587	mov	-8($np,$j,8),%rax
588	adc	\$0,%rdx
589	add	-8(%rsp,$j,8),$A[1]
590	adc	\$0,%rdx
591	mov	%rdx,$A[0]
592
593	mulq	$m1			# np[j]*m1
594	add	%rax,$N[1]
595	mov	($ap,$j,8),%rax
596	adc	\$0,%rdx
597	add	$A[1],$N[1]
598	adc	\$0,%rdx
599	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
600	mov	%rdx,$N[0]
601
602	mulq	$m0			# ap[j]*bp[i]
603	add	%rax,$A[0]
604	mov	($np,$j,8),%rax
605	adc	\$0,%rdx
606	add	(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
607	adc	\$0,%rdx
608	mov	%rdx,$A[1]
609
610	mulq	$m1			# np[j]*m1
611	add	%rax,$N[0]
612	mov	8($ap,$j,8),%rax
613	adc	\$0,%rdx
614	add	$A[0],$N[0]
615	adc	\$0,%rdx
616	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
617	mov	%rdx,$N[1]
618
619	mulq	$m0			# ap[j]*bp[i]
620	add	%rax,$A[1]
621	mov	8($np,$j,8),%rax
622	adc	\$0,%rdx
623	add	8(%rsp,$j,8),$A[1]
624	adc	\$0,%rdx
625	lea	4($j),$j		# j++
626	mov	%rdx,$A[0]
627
628	mulq	$m1			# np[j]*m1
629	add	%rax,$N[1]
630	mov	-16($ap,$j,8),%rax
631	adc	\$0,%rdx
632	add	$A[1],$N[1]
633	adc	\$0,%rdx
634	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
635	mov	%rdx,$N[0]
636	cmp	$num,$j
637	jb	.Linner4x
638
639	mulq	$m0			# ap[j]*bp[i]
640	add	%rax,$A[0]
641	mov	-16($np,$j,8),%rax
642	adc	\$0,%rdx
643	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
644	adc	\$0,%rdx
645	mov	%rdx,$A[1]
646
647	mulq	$m1			# np[j]*m1
648	add	%rax,$N[0]
649	mov	-8($ap,$j,8),%rax
650	adc	\$0,%rdx
651	add	$A[0],$N[0]
652	adc	\$0,%rdx
653	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
654	mov	%rdx,$N[1]
655
656	mulq	$m0			# ap[j]*bp[i]
657	add	%rax,$A[1]
658	mov	-8($np,$j,8),%rax
659	adc	\$0,%rdx
660	add	-8(%rsp,$j,8),$A[1]
661	adc	\$0,%rdx
662	lea	1($i),$i		# i++
663	mov	%rdx,$A[0]
664
665	mulq	$m1			# np[j]*m1
666	add	%rax,$N[1]
667	mov	($ap),%rax		# ap[0]
668	adc	\$0,%rdx
669	add	$A[1],$N[1]
670	adc	\$0,%rdx
671	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
672	mov	%rdx,$N[0]
673
674	xor	$N[1],$N[1]
675	add	$A[0],$N[0]
676	adc	\$0,$N[1]
677	add	(%rsp,$num,8),$N[0]	# pull upmost overflow bit
678	adc	\$0,$N[1]
679	mov	$N[0],-8(%rsp,$j,8)
680	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
681
682	cmp	$num,$i
683	jb	.Louter4x
684___
685{
686my @ri=("%rax","%rdx",$m0,$m1);
687$code.=<<___;
688	mov	16(%rsp,$num,8),$rp	# restore $rp
689	lea	-4($num),$j
690	mov	0(%rsp),@ri[0]		# tp[0]
691	mov	8(%rsp),@ri[1]		# tp[1]
692	shr	\$2,$j			# j=num/4-1
693	lea	(%rsp),$ap		# borrow ap for tp
694	xor	$i,$i			# i=0 and clear CF!
695
696	sub	0($np),@ri[0]
697	mov	16($ap),@ri[2]		# tp[2]
698	mov	24($ap),@ri[3]		# tp[3]
699	sbb	8($np),@ri[1]
700
701.Lsub4x:
702	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
703	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
704	sbb	16($np,$i,8),@ri[2]
705	mov	32($ap,$i,8),@ri[0]	# tp[i+1]
706	mov	40($ap,$i,8),@ri[1]
707	sbb	24($np,$i,8),@ri[3]
708	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
709	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
710	sbb	32($np,$i,8),@ri[0]
711	mov	48($ap,$i,8),@ri[2]
712	mov	56($ap,$i,8),@ri[3]
713	sbb	40($np,$i,8),@ri[1]
714	lea	4($i),$i		# i++
715	dec	$j			# doesnn't affect CF!
716	jnz	.Lsub4x
717
718	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
719	mov	32($ap,$i,8),@ri[0]	# load overflow bit
720	sbb	16($np,$i,8),@ri[2]
721	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
722	sbb	24($np,$i,8),@ri[3]
723	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
724
725	sbb	\$0,@ri[0]		# handle upmost overflow bit
726	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
727	pxor	%xmm0,%xmm0
728	movq	@ri[0],%xmm4
729	pcmpeqd	%xmm5,%xmm5
730	pshufd	\$0,%xmm4,%xmm4
731	mov	$num,$j
732	pxor	%xmm4,%xmm5
733	shr	\$2,$j			# j=num/4
734	xor	%eax,%eax		# i=0
735
736	jmp	.Lcopy4x
737.align	16
738.Lcopy4x:				# conditional copy
739	movdqa	(%rsp,%rax),%xmm1
740	movdqu	($rp,%rax),%xmm2
741	pand	%xmm4,%xmm1
742	pand	%xmm5,%xmm2
743	movdqa	16(%rsp,%rax),%xmm3
744	movdqa	%xmm0,(%rsp,%rax)
745	por	%xmm2,%xmm1
746	movdqu	16($rp,%rax),%xmm2
747	movdqu	%xmm1,($rp,%rax)
748	pand	%xmm4,%xmm3
749	pand	%xmm5,%xmm2
750	movdqa	%xmm0,16(%rsp,%rax)
751	por	%xmm2,%xmm3
752	movdqu	%xmm3,16($rp,%rax)
753	lea	32(%rax),%rax
754	dec	$j
755	jnz	.Lcopy4x
756___
757}
758$code.=<<___;
759	mov	8(%rsp,$num,8),%rsi	# restore %rsp
760	mov	\$1,%rax
761	mov	-48(%rsi),%r15
762	mov	-40(%rsi),%r14
763	mov	-32(%rsi),%r13
764	mov	-24(%rsi),%r12
765	mov	-16(%rsi),%rbp
766	mov	-8(%rsi),%rbx
767	lea	(%rsi),%rsp
768.Lmul4x_epilogue:
769	ret
770.size	bn_mul4x_mont,.-bn_mul4x_mont
771___
772}}}
773{{{
774######################################################################
775# void bn_sqr8x_mont(
776my $rptr="%rdi";	# const BN_ULONG *rptr,
777my $aptr="%rsi";	# const BN_ULONG *aptr,
778my $bptr="%rdx";	# not used
779my $nptr="%rcx";	# const BN_ULONG *nptr,
780my $n0  ="%r8";		# const BN_ULONG *n0);
781my $num ="%r9";		# int num, has to be divisible by 8
782
783my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
784my @A0=("%r10","%r11");
785my @A1=("%r12","%r13");
786my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
787
788$code.=<<___	if ($addx);
789.extern	bn_sqrx8x_internal		# see x86_64-mont5 module
790___
791$code.=<<___;
792.extern	bn_sqr8x_internal		# see x86_64-mont5 module
793
794.type	bn_sqr8x_mont,\@function,6
795.align	32
796bn_sqr8x_mont:
797	mov	%rsp,%rax
798.Lsqr8x_enter:
799	push	%rbx
800	push	%rbp
801	push	%r12
802	push	%r13
803	push	%r14
804	push	%r15
805.Lsqr8x_prologue:
806
807	mov	${num}d,%r10d
808	shl	\$3,${num}d		# convert $num to bytes
809	shl	\$3+2,%r10		# 4*$num
810	neg	$num
811
812	##############################################################
813	# ensure that stack frame doesn't alias with $aptr modulo
814	# 4096. this is done to allow memory disambiguation logic
815	# do its job.
816	#
817	lea	-64(%rsp,$num,2),%r11
818	mov	%rsp,%rbp
819	mov	($n0),$n0		# *n0
820	sub	$aptr,%r11
821	and	\$4095,%r11
822	cmp	%r11,%r10
823	jb	.Lsqr8x_sp_alt
824	sub	%r11,%rbp		# align with $aptr
825	lea	-64(%rbp,$num,2),%rbp	# future alloca(frame+2*$num)
826	jmp	.Lsqr8x_sp_done
827
828.align	32
829.Lsqr8x_sp_alt:
830	lea	4096-64(,$num,2),%r10	# 4096-frame-2*$num
831	lea	-64(%rbp,$num,2),%rbp	# future alloca(frame+2*$num)
832	sub	%r10,%r11
833	mov	\$0,%r10
834	cmovc	%r10,%r11
835	sub	%r11,%rbp
836.Lsqr8x_sp_done:
837	and	\$-64,%rbp
838	mov	%rsp,%r11
839	sub	%rbp,%r11
840	and	\$-4096,%r11
841	lea	(%rbp,%r11),%rsp
842	mov	(%rsp),%r10
843	cmp	%rbp,%rsp
844	ja	.Lsqr8x_page_walk
845	jmp	.Lsqr8x_page_walk_done
846
847.align	16
848.Lsqr8x_page_walk:
849	lea	-4096(%rsp),%rsp
850	mov	(%rsp),%r10
851	cmp	%rbp,%rsp
852	ja	.Lsqr8x_page_walk
853.Lsqr8x_page_walk_done:
854
855	mov	$num,%r10
856	neg	$num
857
858	mov	$n0,  32(%rsp)
859	mov	%rax, 40(%rsp)		# save original %rsp
860.Lsqr8x_body:
861
862	movq	$nptr, %xmm2		# save pointer to modulus
863	pxor	%xmm0,%xmm0
864	movq	$rptr,%xmm1		# save $rptr
865	movq	%r10, %xmm3		# -$num
866___
867$code.=<<___ if ($addx);
868	mov	OPENSSL_ia32cap_P+8(%rip),%eax
869	and	\$0x80100,%eax
870	cmp	\$0x80100,%eax
871	jne	.Lsqr8x_nox
872
873	call	bn_sqrx8x_internal	# see x86_64-mont5 module
874					# %rax	top-most carry
875					# %rbp	nptr
876					# %rcx	-8*num
877					# %r8	end of tp[2*num]
878	lea	(%r8,%rcx),%rbx
879	mov	%rcx,$num
880	mov	%rcx,%rdx
881	movq	%xmm1,$rptr
882	sar	\$3+2,%rcx		# %cf=0
883	jmp	.Lsqr8x_sub
884
885.align	32
886.Lsqr8x_nox:
887___
888$code.=<<___;
889	call	bn_sqr8x_internal	# see x86_64-mont5 module
890					# %rax	top-most carry
891					# %rbp	nptr
892					# %r8	-8*num
893					# %rdi	end of tp[2*num]
894	lea	(%rdi,$num),%rbx
895	mov	$num,%rcx
896	mov	$num,%rdx
897	movq	%xmm1,$rptr
898	sar	\$3+2,%rcx		# %cf=0
899	jmp	.Lsqr8x_sub
900
901.align	32
902.Lsqr8x_sub:
903	mov	8*0(%rbx),%r12
904	mov	8*1(%rbx),%r13
905	mov	8*2(%rbx),%r14
906	mov	8*3(%rbx),%r15
907	lea	8*4(%rbx),%rbx
908	sbb	8*0(%rbp),%r12
909	sbb	8*1(%rbp),%r13
910	sbb	8*2(%rbp),%r14
911	sbb	8*3(%rbp),%r15
912	lea	8*4(%rbp),%rbp
913	mov	%r12,8*0($rptr)
914	mov	%r13,8*1($rptr)
915	mov	%r14,8*2($rptr)
916	mov	%r15,8*3($rptr)
917	lea	8*4($rptr),$rptr
918	inc	%rcx			# preserves %cf
919	jnz	.Lsqr8x_sub
920
921	sbb	\$0,%rax		# top-most carry
922	lea	(%rbx,$num),%rbx	# rewind
923	lea	($rptr,$num),$rptr	# rewind
924
925	movq	%rax,%xmm1
926	pxor	%xmm0,%xmm0
927	pshufd	\$0,%xmm1,%xmm1
928	mov	40(%rsp),%rsi		# restore %rsp
929	jmp	.Lsqr8x_cond_copy
930
931.align	32
932.Lsqr8x_cond_copy:
933	movdqa	16*0(%rbx),%xmm2
934	movdqa	16*1(%rbx),%xmm3
935	lea	16*2(%rbx),%rbx
936	movdqu	16*0($rptr),%xmm4
937	movdqu	16*1($rptr),%xmm5
938	lea	16*2($rptr),$rptr
939	movdqa	%xmm0,-16*2(%rbx)	# zero tp
940	movdqa	%xmm0,-16*1(%rbx)
941	movdqa	%xmm0,-16*2(%rbx,%rdx)
942	movdqa	%xmm0,-16*1(%rbx,%rdx)
943	pcmpeqd	%xmm1,%xmm0
944	pand	%xmm1,%xmm2
945	pand	%xmm1,%xmm3
946	pand	%xmm0,%xmm4
947	pand	%xmm0,%xmm5
948	pxor	%xmm0,%xmm0
949	por	%xmm2,%xmm4
950	por	%xmm3,%xmm5
951	movdqu	%xmm4,-16*2($rptr)
952	movdqu	%xmm5,-16*1($rptr)
953	add	\$32,$num
954	jnz	.Lsqr8x_cond_copy
955
956	mov	\$1,%rax
957	mov	-48(%rsi),%r15
958	mov	-40(%rsi),%r14
959	mov	-32(%rsi),%r13
960	mov	-24(%rsi),%r12
961	mov	-16(%rsi),%rbp
962	mov	-8(%rsi),%rbx
963	lea	(%rsi),%rsp
964.Lsqr8x_epilogue:
965	ret
966.size	bn_sqr8x_mont,.-bn_sqr8x_mont
967___
968}}}
969
970if ($addx) {{{
971my $bp="%rdx";	# original value
972
973$code.=<<___;
974.type	bn_mulx4x_mont,\@function,6
975.align	32
976bn_mulx4x_mont:
977	mov	%rsp,%rax
978.Lmulx4x_enter:
979	push	%rbx
980	push	%rbp
981	push	%r12
982	push	%r13
983	push	%r14
984	push	%r15
985.Lmulx4x_prologue:
986
987	shl	\$3,${num}d		# convert $num to bytes
988	xor	%r10,%r10
989	sub	$num,%r10		# -$num
990	mov	($n0),$n0		# *n0
991	lea	-72(%rsp,%r10),%rbp	# future alloca(frame+$num+8)
992	and	\$-128,%rbp
993	mov	%rsp,%r11
994	sub	%rbp,%r11
995	and	\$-4096,%r11
996	lea	(%rbp,%r11),%rsp
997	mov	(%rsp),%r10
998	cmp	%rbp,%rsp
999	ja	.Lmulx4x_page_walk
1000	jmp	.Lmulx4x_page_walk_done
1001
1002.align	16
1003.Lmulx4x_page_walk:
1004	lea	-4096(%rsp),%rsp
1005	mov	(%rsp),%r10
1006	cmp	%rbp,%rsp
1007	ja	.Lmulx4x_page_walk
1008.Lmulx4x_page_walk_done:
1009
1010	lea	($bp,$num),%r10
1011	##############################################################
1012	# Stack layout
1013	# +0	num
1014	# +8	off-loaded &b[i]
1015	# +16	end of b[num]
1016	# +24	saved n0
1017	# +32	saved rp
1018	# +40	saved %rsp
1019	# +48	inner counter
1020	# +56
1021	# +64	tmp[num+1]
1022	#
1023	mov	$num,0(%rsp)		# save $num
1024	shr	\$5,$num
1025	mov	%r10,16(%rsp)		# end of b[num]
1026	sub	\$1,$num
1027	mov	$n0, 24(%rsp)		# save *n0
1028	mov	$rp, 32(%rsp)		# save $rp
1029	mov	%rax,40(%rsp)		# save original %rsp
1030	mov	$num,48(%rsp)		# inner counter
1031	jmp	.Lmulx4x_body
1032
1033.align	32
1034.Lmulx4x_body:
1035___
1036my ($aptr, $bptr, $nptr, $tptr, $mi,  $bi,  $zero, $num)=
1037   ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
1038my $rptr=$bptr;
1039$code.=<<___;
1040	lea	8($bp),$bptr
1041	mov	($bp),%rdx		# b[0], $bp==%rdx actually
1042	lea	64+32(%rsp),$tptr
1043	mov	%rdx,$bi
1044
1045	mulx	0*8($aptr),$mi,%rax	# a[0]*b[0]
1046	mulx	1*8($aptr),%r11,%r14	# a[1]*b[0]
1047	add	%rax,%r11
1048	mov	$bptr,8(%rsp)		# off-load &b[i]
1049	mulx	2*8($aptr),%r12,%r13	# ...
1050	adc	%r14,%r12
1051	adc	\$0,%r13
1052
1053	mov	$mi,$bptr		# borrow $bptr
1054	imulq	24(%rsp),$mi		# "t[0]"*n0
1055	xor	$zero,$zero		# cf=0, of=0
1056
1057	mulx	3*8($aptr),%rax,%r14
1058	 mov	$mi,%rdx
1059	lea	4*8($aptr),$aptr
1060	adcx	%rax,%r13
1061	adcx	$zero,%r14		# cf=0
1062
1063	mulx	0*8($nptr),%rax,%r10
1064	adcx	%rax,$bptr		# discarded
1065	adox	%r11,%r10
1066	mulx	1*8($nptr),%rax,%r11
1067	adcx	%rax,%r10
1068	adox	%r12,%r11
1069	.byte	0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00	# mulx	2*8($nptr),%rax,%r12
1070	mov	48(%rsp),$bptr		# counter value
1071	mov	%r10,-4*8($tptr)
1072	adcx	%rax,%r11
1073	adox	%r13,%r12
1074	mulx	3*8($nptr),%rax,%r15
1075	 mov	$bi,%rdx
1076	mov	%r11,-3*8($tptr)
1077	adcx	%rax,%r12
1078	adox	$zero,%r15		# of=0
1079	lea	4*8($nptr),$nptr
1080	mov	%r12,-2*8($tptr)
1081
1082	jmp	.Lmulx4x_1st
1083
1084.align	32
1085.Lmulx4x_1st:
1086	adcx	$zero,%r15		# cf=0, modulo-scheduled
1087	mulx	0*8($aptr),%r10,%rax	# a[4]*b[0]
1088	adcx	%r14,%r10
1089	mulx	1*8($aptr),%r11,%r14	# a[5]*b[0]
1090	adcx	%rax,%r11
1091	mulx	2*8($aptr),%r12,%rax	# ...
1092	adcx	%r14,%r12
1093	mulx	3*8($aptr),%r13,%r14
1094	 .byte	0x67,0x67
1095	 mov	$mi,%rdx
1096	adcx	%rax,%r13
1097	adcx	$zero,%r14		# cf=0
1098	lea	4*8($aptr),$aptr
1099	lea	4*8($tptr),$tptr
1100
1101	adox	%r15,%r10
1102	mulx	0*8($nptr),%rax,%r15
1103	adcx	%rax,%r10
1104	adox	%r15,%r11
1105	mulx	1*8($nptr),%rax,%r15
1106	adcx	%rax,%r11
1107	adox	%r15,%r12
1108	mulx	2*8($nptr),%rax,%r15
1109	mov	%r10,-5*8($tptr)
1110	adcx	%rax,%r12
1111	mov	%r11,-4*8($tptr)
1112	adox	%r15,%r13
1113	mulx	3*8($nptr),%rax,%r15
1114	 mov	$bi,%rdx
1115	mov	%r12,-3*8($tptr)
1116	adcx	%rax,%r13
1117	adox	$zero,%r15
1118	lea	4*8($nptr),$nptr
1119	mov	%r13,-2*8($tptr)
1120
1121	dec	$bptr			# of=0, pass cf
1122	jnz	.Lmulx4x_1st
1123
1124	mov	0(%rsp),$num		# load num
1125	mov	8(%rsp),$bptr		# re-load &b[i]
1126	adc	$zero,%r15		# modulo-scheduled
1127	add	%r15,%r14
1128	sbb	%r15,%r15		# top-most carry
1129	mov	%r14,-1*8($tptr)
1130	jmp	.Lmulx4x_outer
1131
1132.align	32
1133.Lmulx4x_outer:
1134	mov	($bptr),%rdx		# b[i]
1135	lea	8($bptr),$bptr		# b++
1136	sub	$num,$aptr		# rewind $aptr
1137	mov	%r15,($tptr)		# save top-most carry
1138	lea	64+4*8(%rsp),$tptr
1139	sub	$num,$nptr		# rewind $nptr
1140
1141	mulx	0*8($aptr),$mi,%r11	# a[0]*b[i]
1142	xor	%ebp,%ebp		# xor	$zero,$zero	# cf=0, of=0
1143	mov	%rdx,$bi
1144	mulx	1*8($aptr),%r14,%r12	# a[1]*b[i]
1145	adox	-4*8($tptr),$mi
1146	adcx	%r14,%r11
1147	mulx	2*8($aptr),%r15,%r13	# ...
1148	adox	-3*8($tptr),%r11
1149	adcx	%r15,%r12
1150	adox	-2*8($tptr),%r12
1151	adcx	$zero,%r13
1152	adox	$zero,%r13
1153
1154	mov	$bptr,8(%rsp)		# off-load &b[i]
1155	mov	$mi,%r15
1156	imulq	24(%rsp),$mi		# "t[0]"*n0
1157	xor	%ebp,%ebp		# xor	$zero,$zero	# cf=0, of=0
1158
1159	mulx	3*8($aptr),%rax,%r14
1160	 mov	$mi,%rdx
1161	adcx	%rax,%r13
1162	adox	-1*8($tptr),%r13
1163	adcx	$zero,%r14
1164	lea	4*8($aptr),$aptr
1165	adox	$zero,%r14
1166
1167	mulx	0*8($nptr),%rax,%r10
1168	adcx	%rax,%r15		# discarded
1169	adox	%r11,%r10
1170	mulx	1*8($nptr),%rax,%r11
1171	adcx	%rax,%r10
1172	adox	%r12,%r11
1173	mulx	2*8($nptr),%rax,%r12
1174	mov	%r10,-4*8($tptr)
1175	adcx	%rax,%r11
1176	adox	%r13,%r12
1177	mulx	3*8($nptr),%rax,%r15
1178	 mov	$bi,%rdx
1179	mov	%r11,-3*8($tptr)
1180	lea	4*8($nptr),$nptr
1181	adcx	%rax,%r12
1182	adox	$zero,%r15		# of=0
1183	mov	48(%rsp),$bptr		# counter value
1184	mov	%r12,-2*8($tptr)
1185
1186	jmp	.Lmulx4x_inner
1187
1188.align	32
1189.Lmulx4x_inner:
1190	mulx	0*8($aptr),%r10,%rax	# a[4]*b[i]
1191	adcx	$zero,%r15		# cf=0, modulo-scheduled
1192	adox	%r14,%r10
1193	mulx	1*8($aptr),%r11,%r14	# a[5]*b[i]
1194	adcx	0*8($tptr),%r10
1195	adox	%rax,%r11
1196	mulx	2*8($aptr),%r12,%rax	# ...
1197	adcx	1*8($tptr),%r11
1198	adox	%r14,%r12
1199	mulx	3*8($aptr),%r13,%r14
1200	 mov	$mi,%rdx
1201	adcx	2*8($tptr),%r12
1202	adox	%rax,%r13
1203	adcx	3*8($tptr),%r13
1204	adox	$zero,%r14		# of=0
1205	lea	4*8($aptr),$aptr
1206	lea	4*8($tptr),$tptr
1207	adcx	$zero,%r14		# cf=0
1208
1209	adox	%r15,%r10
1210	mulx	0*8($nptr),%rax,%r15
1211	adcx	%rax,%r10
1212	adox	%r15,%r11
1213	mulx	1*8($nptr),%rax,%r15
1214	adcx	%rax,%r11
1215	adox	%r15,%r12
1216	mulx	2*8($nptr),%rax,%r15
1217	mov	%r10,-5*8($tptr)
1218	adcx	%rax,%r12
1219	adox	%r15,%r13
1220	mulx	3*8($nptr),%rax,%r15
1221	 mov	$bi,%rdx
1222	mov	%r11,-4*8($tptr)
1223	mov	%r12,-3*8($tptr)
1224	adcx	%rax,%r13
1225	adox	$zero,%r15
1226	lea	4*8($nptr),$nptr
1227	mov	%r13,-2*8($tptr)
1228
1229	dec	$bptr			# of=0, pass cf
1230	jnz	.Lmulx4x_inner
1231
1232	mov	0(%rsp),$num		# load num
1233	mov	8(%rsp),$bptr		# re-load &b[i]
1234	adc	$zero,%r15		# modulo-scheduled
1235	sub	0*8($tptr),$zero	# pull top-most carry
1236	adc	%r15,%r14
1237	sbb	%r15,%r15		# top-most carry
1238	mov	%r14,-1*8($tptr)
1239
1240	cmp	16(%rsp),$bptr
1241	jne	.Lmulx4x_outer
1242
1243	lea	64(%rsp),$tptr
1244	sub	$num,$nptr		# rewind $nptr
1245	neg	%r15
1246	mov	$num,%rdx
1247	shr	\$3+2,$num		# %cf=0
1248	mov	32(%rsp),$rptr		# restore rp
1249	jmp	.Lmulx4x_sub
1250
1251.align	32
1252.Lmulx4x_sub:
1253	mov	8*0($tptr),%r11
1254	mov	8*1($tptr),%r12
1255	mov	8*2($tptr),%r13
1256	mov	8*3($tptr),%r14
1257	lea	8*4($tptr),$tptr
1258	sbb	8*0($nptr),%r11
1259	sbb	8*1($nptr),%r12
1260	sbb	8*2($nptr),%r13
1261	sbb	8*3($nptr),%r14
1262	lea	8*4($nptr),$nptr
1263	mov	%r11,8*0($rptr)
1264	mov	%r12,8*1($rptr)
1265	mov	%r13,8*2($rptr)
1266	mov	%r14,8*3($rptr)
1267	lea	8*4($rptr),$rptr
1268	dec	$num			# preserves %cf
1269	jnz	.Lmulx4x_sub
1270
1271	sbb	\$0,%r15		# top-most carry
1272	lea	64(%rsp),$tptr
1273	sub	%rdx,$rptr		# rewind
1274
1275	movq	%r15,%xmm1
1276	pxor	%xmm0,%xmm0
1277	pshufd	\$0,%xmm1,%xmm1
1278	mov	40(%rsp),%rsi		# restore %rsp
1279	jmp	.Lmulx4x_cond_copy
1280
1281.align	32
1282.Lmulx4x_cond_copy:
1283	movdqa	16*0($tptr),%xmm2
1284	movdqa	16*1($tptr),%xmm3
1285	lea	16*2($tptr),$tptr
1286	movdqu	16*0($rptr),%xmm4
1287	movdqu	16*1($rptr),%xmm5
1288	lea	16*2($rptr),$rptr
1289	movdqa	%xmm0,-16*2($tptr)	# zero tp
1290	movdqa	%xmm0,-16*1($tptr)
1291	pcmpeqd	%xmm1,%xmm0
1292	pand	%xmm1,%xmm2
1293	pand	%xmm1,%xmm3
1294	pand	%xmm0,%xmm4
1295	pand	%xmm0,%xmm5
1296	pxor	%xmm0,%xmm0
1297	por	%xmm2,%xmm4
1298	por	%xmm3,%xmm5
1299	movdqu	%xmm4,-16*2($rptr)
1300	movdqu	%xmm5,-16*1($rptr)
1301	sub	\$32,%rdx
1302	jnz	.Lmulx4x_cond_copy
1303
1304	mov	%rdx,($tptr)
1305
1306	mov	\$1,%rax
1307	mov	-48(%rsi),%r15
1308	mov	-40(%rsi),%r14
1309	mov	-32(%rsi),%r13
1310	mov	-24(%rsi),%r12
1311	mov	-16(%rsi),%rbp
1312	mov	-8(%rsi),%rbx
1313	lea	(%rsi),%rsp
1314.Lmulx4x_epilogue:
1315	ret
1316.size	bn_mulx4x_mont,.-bn_mulx4x_mont
1317___
1318}}}
1319$code.=<<___;
1320.asciz	"Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1321.align	16
1322___
1323
1324# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1325#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
1326if ($win64) {
1327$rec="%rcx";
1328$frame="%rdx";
1329$context="%r8";
1330$disp="%r9";
1331
1332$code.=<<___;
1333.extern	__imp_RtlVirtualUnwind
1334.type	mul_handler,\@abi-omnipotent
1335.align	16
1336mul_handler:
1337	push	%rsi
1338	push	%rdi
1339	push	%rbx
1340	push	%rbp
1341	push	%r12
1342	push	%r13
1343	push	%r14
1344	push	%r15
1345	pushfq
1346	sub	\$64,%rsp
1347
1348	mov	120($context),%rax	# pull context->Rax
1349	mov	248($context),%rbx	# pull context->Rip
1350
1351	mov	8($disp),%rsi		# disp->ImageBase
1352	mov	56($disp),%r11		# disp->HandlerData
1353
1354	mov	0(%r11),%r10d		# HandlerData[0]
1355	lea	(%rsi,%r10),%r10	# end of prologue label
1356	cmp	%r10,%rbx		# context->Rip<end of prologue label
1357	jb	.Lcommon_seh_tail
1358
1359	mov	152($context),%rax	# pull context->Rsp
1360
1361	mov	4(%r11),%r10d		# HandlerData[1]
1362	lea	(%rsi,%r10),%r10	# epilogue label
1363	cmp	%r10,%rbx		# context->Rip>=epilogue label
1364	jae	.Lcommon_seh_tail
1365
1366	mov	192($context),%r10	# pull $num
1367	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
1368
1369	jmp	.Lcommon_pop_regs
1370.size	mul_handler,.-mul_handler
1371
1372.type	sqr_handler,\@abi-omnipotent
1373.align	16
1374sqr_handler:
1375	push	%rsi
1376	push	%rdi
1377	push	%rbx
1378	push	%rbp
1379	push	%r12
1380	push	%r13
1381	push	%r14
1382	push	%r15
1383	pushfq
1384	sub	\$64,%rsp
1385
1386	mov	120($context),%rax	# pull context->Rax
1387	mov	248($context),%rbx	# pull context->Rip
1388
1389	mov	8($disp),%rsi		# disp->ImageBase
1390	mov	56($disp),%r11		# disp->HandlerData
1391
1392	mov	0(%r11),%r10d		# HandlerData[0]
1393	lea	(%rsi,%r10),%r10	# end of prologue label
1394	cmp	%r10,%rbx		# context->Rip<.Lsqr_body
1395	jb	.Lcommon_seh_tail
1396
1397	mov	4(%r11),%r10d		# HandlerData[1]
1398	lea	(%rsi,%r10),%r10	# body label
1399	cmp	%r10,%rbx		# context->Rip>=.Lsqr_epilogue
1400	jb	.Lcommon_pop_regs
1401
1402	mov	152($context),%rax	# pull context->Rsp
1403
1404	mov	8(%r11),%r10d		# HandlerData[2]
1405	lea	(%rsi,%r10),%r10	# epilogue label
1406	cmp	%r10,%rbx		# context->Rip>=.Lsqr_epilogue
1407	jae	.Lcommon_seh_tail
1408
1409	mov	40(%rax),%rax		# pull saved stack pointer
1410
1411.Lcommon_pop_regs:
1412	mov	-8(%rax),%rbx
1413	mov	-16(%rax),%rbp
1414	mov	-24(%rax),%r12
1415	mov	-32(%rax),%r13
1416	mov	-40(%rax),%r14
1417	mov	-48(%rax),%r15
1418	mov	%rbx,144($context)	# restore context->Rbx
1419	mov	%rbp,160($context)	# restore context->Rbp
1420	mov	%r12,216($context)	# restore context->R12
1421	mov	%r13,224($context)	# restore context->R13
1422	mov	%r14,232($context)	# restore context->R14
1423	mov	%r15,240($context)	# restore context->R15
1424
1425.Lcommon_seh_tail:
1426	mov	8(%rax),%rdi
1427	mov	16(%rax),%rsi
1428	mov	%rax,152($context)	# restore context->Rsp
1429	mov	%rsi,168($context)	# restore context->Rsi
1430	mov	%rdi,176($context)	# restore context->Rdi
1431
1432	mov	40($disp),%rdi		# disp->ContextRecord
1433	mov	$context,%rsi		# context
1434	mov	\$154,%ecx		# sizeof(CONTEXT)
1435	.long	0xa548f3fc		# cld; rep movsq
1436
1437	mov	$disp,%rsi
1438	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
1439	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
1440	mov	0(%rsi),%r8		# arg3, disp->ControlPc
1441	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
1442	mov	40(%rsi),%r10		# disp->ContextRecord
1443	lea	56(%rsi),%r11		# &disp->HandlerData
1444	lea	24(%rsi),%r12		# &disp->EstablisherFrame
1445	mov	%r10,32(%rsp)		# arg5
1446	mov	%r11,40(%rsp)		# arg6
1447	mov	%r12,48(%rsp)		# arg7
1448	mov	%rcx,56(%rsp)		# arg8, (NULL)
1449	call	*__imp_RtlVirtualUnwind(%rip)
1450
1451	mov	\$1,%eax		# ExceptionContinueSearch
1452	add	\$64,%rsp
1453	popfq
1454	pop	%r15
1455	pop	%r14
1456	pop	%r13
1457	pop	%r12
1458	pop	%rbp
1459	pop	%rbx
1460	pop	%rdi
1461	pop	%rsi
1462	ret
1463.size	sqr_handler,.-sqr_handler
1464
1465.section	.pdata
1466.align	4
1467	.rva	.LSEH_begin_bn_mul_mont
1468	.rva	.LSEH_end_bn_mul_mont
1469	.rva	.LSEH_info_bn_mul_mont
1470
1471	.rva	.LSEH_begin_bn_mul4x_mont
1472	.rva	.LSEH_end_bn_mul4x_mont
1473	.rva	.LSEH_info_bn_mul4x_mont
1474
1475	.rva	.LSEH_begin_bn_sqr8x_mont
1476	.rva	.LSEH_end_bn_sqr8x_mont
1477	.rva	.LSEH_info_bn_sqr8x_mont
1478___
1479$code.=<<___ if ($addx);
1480	.rva	.LSEH_begin_bn_mulx4x_mont
1481	.rva	.LSEH_end_bn_mulx4x_mont
1482	.rva	.LSEH_info_bn_mulx4x_mont
1483___
1484$code.=<<___;
1485.section	.xdata
1486.align	8
1487.LSEH_info_bn_mul_mont:
1488	.byte	9,0,0,0
1489	.rva	mul_handler
1490	.rva	.Lmul_body,.Lmul_epilogue	# HandlerData[]
1491.LSEH_info_bn_mul4x_mont:
1492	.byte	9,0,0,0
1493	.rva	mul_handler
1494	.rva	.Lmul4x_body,.Lmul4x_epilogue	# HandlerData[]
1495.LSEH_info_bn_sqr8x_mont:
1496	.byte	9,0,0,0
1497	.rva	sqr_handler
1498	.rva	.Lsqr8x_prologue,.Lsqr8x_body,.Lsqr8x_epilogue		# HandlerData[]
1499.align	8
1500___
1501$code.=<<___ if ($addx);
1502.LSEH_info_bn_mulx4x_mont:
1503	.byte	9,0,0,0
1504	.rva	sqr_handler
1505	.rva	.Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue	# HandlerData[]
1506.align	8
1507___
1508}
1509
1510print $code;
1511close STDOUT;
1512