1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# October 2005.
11#
12# Montgomery multiplication routine for x86_64. While it gives modest
13# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14# than twice, >2x, as fast. Most common rsa1024 sign is improved by
15# respectful 50%. It remains to be seen if loop unrolling and
16# dedicated squaring routine can provide further improvement...
17
18# July 2011.
19#
20# Add dedicated squaring procedure. Performance improvement varies
21# from platform to platform, but in average it's ~5%/15%/25%/33%
22# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
23
24# August 2011.
25#
26# Unroll and modulo-schedule inner loops in such manner that they
27# are "fallen through" for input lengths of 8, which is critical for
28# 1024-bit RSA *sign*. Average performance improvement in comparison
29# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
31
32# June 2013.
33#
34# Optimize reduction in squaring procedure and improve 1024+-bit RSA
35# sign performance by 10-16% on Intel Sandy Bridge and later
36# (virtually same on non-Intel processors).
37
38# August 2013.
39#
40# Add MULX/ADOX/ADCX code path.
41
42$flavour = shift;
43$output  = shift;
44if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
45
46$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
47
48$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51die "can't locate x86_64-xlate.pl";
52
53open OUT,"| \"$^X\" $xlate $flavour $output";
54*STDOUT=*OUT;
55
56if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
58	$addx = ($1>=2.23);
59}
60
61if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62	    `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
63	$addx = ($1>=2.10);
64}
65
66if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67	    `ml64 2>&1` =~ /Version ([0-9]+)\./) {
68	$addx = ($1>=12);
69}
70
71# int bn_mul_mont(
72$rp="%rdi";	# BN_ULONG *rp,
73$ap="%rsi";	# const BN_ULONG *ap,
74$bp="%rdx";	# const BN_ULONG *bp,
75$np="%rcx";	# const BN_ULONG *np,
76$n0="%r8";	# const BN_ULONG *n0,
77$num="%r9";	# int num);
78$lo0="%r10";
79$hi0="%r11";
80$hi1="%r13";
81$i="%r14";
82$j="%r15";
83$m0="%rbx";
84$m1="%rbp";
85
86$code=<<___;
87.text
88
89.extern	OPENSSL_ia32cap_P
90
91.globl	bn_mul_mont
92.type	bn_mul_mont,\@function,6
93.align	16
94bn_mul_mont:
95	test	\$3,${num}d
96	jnz	.Lmul_enter
97	cmp	\$8,${num}d
98	jb	.Lmul_enter
99___
100$code.=<<___ if ($addx);
101	mov	OPENSSL_ia32cap_P+8(%rip),%r11d
102___
103$code.=<<___;
104	cmp	$ap,$bp
105	jne	.Lmul4x_enter
106	test	\$7,${num}d
107	jz	.Lsqr8x_enter
108	jmp	.Lmul4x_enter
109
110.align	16
111.Lmul_enter:
112	push	%rbx
113	push	%rbp
114	push	%r12
115	push	%r13
116	push	%r14
117	push	%r15
118
119	mov	${num}d,${num}d
120	lea	2($num),%r10
121	mov	%rsp,%r11
122	neg	%r10
123	lea	(%rsp,%r10,8),%rsp	# tp=alloca(8*(num+2))
124	and	\$-1024,%rsp		# minimize TLB usage
125
126	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
127.Lmul_body:
128	mov	$bp,%r12		# reassign $bp
129___
130		$bp="%r12";
131$code.=<<___;
132	mov	($n0),$n0		# pull n0[0] value
133	mov	($bp),$m0		# m0=bp[0]
134	mov	($ap),%rax
135
136	xor	$i,$i			# i=0
137	xor	$j,$j			# j=0
138
139	mov	$n0,$m1
140	mulq	$m0			# ap[0]*bp[0]
141	mov	%rax,$lo0
142	mov	($np),%rax
143
144	imulq	$lo0,$m1		# "tp[0]"*n0
145	mov	%rdx,$hi0
146
147	mulq	$m1			# np[0]*m1
148	add	%rax,$lo0		# discarded
149	mov	8($ap),%rax
150	adc	\$0,%rdx
151	mov	%rdx,$hi1
152
153	lea	1($j),$j		# j++
154	jmp	.L1st_enter
155
156.align	16
157.L1st:
158	add	%rax,$hi1
159	mov	($ap,$j,8),%rax
160	adc	\$0,%rdx
161	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
162	mov	$lo0,$hi0
163	adc	\$0,%rdx
164	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
165	mov	%rdx,$hi1
166
167.L1st_enter:
168	mulq	$m0			# ap[j]*bp[0]
169	add	%rax,$hi0
170	mov	($np,$j,8),%rax
171	adc	\$0,%rdx
172	lea	1($j),$j		# j++
173	mov	%rdx,$lo0
174
175	mulq	$m1			# np[j]*m1
176	cmp	$num,$j
177	jne	.L1st
178
179	add	%rax,$hi1
180	mov	($ap),%rax		# ap[0]
181	adc	\$0,%rdx
182	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
183	adc	\$0,%rdx
184	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
185	mov	%rdx,$hi1
186	mov	$lo0,$hi0
187
188	xor	%rdx,%rdx
189	add	$hi0,$hi1
190	adc	\$0,%rdx
191	mov	$hi1,-8(%rsp,$num,8)
192	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
193
194	lea	1($i),$i		# i++
195	jmp	.Louter
196.align	16
197.Louter:
198	mov	($bp,$i,8),$m0		# m0=bp[i]
199	xor	$j,$j			# j=0
200	mov	$n0,$m1
201	mov	(%rsp),$lo0
202	mulq	$m0			# ap[0]*bp[i]
203	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
204	mov	($np),%rax
205	adc	\$0,%rdx
206
207	imulq	$lo0,$m1		# tp[0]*n0
208	mov	%rdx,$hi0
209
210	mulq	$m1			# np[0]*m1
211	add	%rax,$lo0		# discarded
212	mov	8($ap),%rax
213	adc	\$0,%rdx
214	mov	8(%rsp),$lo0		# tp[1]
215	mov	%rdx,$hi1
216
217	lea	1($j),$j		# j++
218	jmp	.Linner_enter
219
220.align	16
221.Linner:
222	add	%rax,$hi1
223	mov	($ap,$j,8),%rax
224	adc	\$0,%rdx
225	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
226	mov	(%rsp,$j,8),$lo0
227	adc	\$0,%rdx
228	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
229	mov	%rdx,$hi1
230
231.Linner_enter:
232	mulq	$m0			# ap[j]*bp[i]
233	add	%rax,$hi0
234	mov	($np,$j,8),%rax
235	adc	\$0,%rdx
236	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
237	mov	%rdx,$hi0
238	adc	\$0,$hi0
239	lea	1($j),$j		# j++
240
241	mulq	$m1			# np[j]*m1
242	cmp	$num,$j
243	jne	.Linner
244
245	add	%rax,$hi1
246	mov	($ap),%rax		# ap[0]
247	adc	\$0,%rdx
248	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
249	mov	(%rsp,$j,8),$lo0
250	adc	\$0,%rdx
251	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
252	mov	%rdx,$hi1
253
254	xor	%rdx,%rdx
255	add	$hi0,$hi1
256	adc	\$0,%rdx
257	add	$lo0,$hi1		# pull upmost overflow bit
258	adc	\$0,%rdx
259	mov	$hi1,-8(%rsp,$num,8)
260	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
261
262	lea	1($i),$i		# i++
263	cmp	$num,$i
264	jb	.Louter
265
266	xor	$i,$i			# i=0 and clear CF!
267	mov	(%rsp),%rax		# tp[0]
268	lea	(%rsp),$ap		# borrow ap for tp
269	mov	$num,$j			# j=num
270	jmp	.Lsub
271.align	16
272.Lsub:	sbb	($np,$i,8),%rax
273	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
274	mov	8($ap,$i,8),%rax	# tp[i+1]
275	lea	1($i),$i		# i++
276	dec	$j			# doesnn't affect CF!
277	jnz	.Lsub
278
279	sbb	\$0,%rax		# handle upmost overflow bit
280	xor	$i,$i
281	and	%rax,$ap
282	not	%rax
283	mov	$rp,$np
284	and	%rax,$np
285	mov	$num,$j			# j=num
286	or	$np,$ap			# ap=borrow?tp:rp
287.align	16
288.Lcopy:					# copy or in-place refresh
289	mov	($ap,$i,8),%rax
290	mov	$i,(%rsp,$i,8)		# zap temporary vector
291	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]
292	lea	1($i),$i
293	sub	\$1,$j
294	jnz	.Lcopy
295
296	mov	8(%rsp,$num,8),%rsi	# restore %rsp
297	mov	\$1,%rax
298	mov	(%rsi),%r15
299	mov	8(%rsi),%r14
300	mov	16(%rsi),%r13
301	mov	24(%rsi),%r12
302	mov	32(%rsi),%rbp
303	mov	40(%rsi),%rbx
304	lea	48(%rsi),%rsp
305.Lmul_epilogue:
306	ret
307.size	bn_mul_mont,.-bn_mul_mont
308___
309{{{
310my @A=("%r10","%r11");
311my @N=("%r13","%rdi");
312$code.=<<___;
313.type	bn_mul4x_mont,\@function,6
314.align	16
315bn_mul4x_mont:
316.Lmul4x_enter:
317___
318$code.=<<___ if ($addx);
319	and	\$0x80100,%r11d
320	cmp	\$0x80100,%r11d
321	je	.Lmulx4x_enter
322___
323$code.=<<___;
324	push	%rbx
325	push	%rbp
326	push	%r12
327	push	%r13
328	push	%r14
329	push	%r15
330
331	mov	${num}d,${num}d
332	lea	4($num),%r10
333	mov	%rsp,%r11
334	neg	%r10
335	lea	(%rsp,%r10,8),%rsp	# tp=alloca(8*(num+4))
336	and	\$-1024,%rsp		# minimize TLB usage
337
338	mov	%r11,8(%rsp,$num,8)	# tp[num+1]=%rsp
339.Lmul4x_body:
340	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
341	mov	%rdx,%r12		# reassign $bp
342___
343		$bp="%r12";
344$code.=<<___;
345	mov	($n0),$n0		# pull n0[0] value
346	mov	($bp),$m0		# m0=bp[0]
347	mov	($ap),%rax
348
349	xor	$i,$i			# i=0
350	xor	$j,$j			# j=0
351
352	mov	$n0,$m1
353	mulq	$m0			# ap[0]*bp[0]
354	mov	%rax,$A[0]
355	mov	($np),%rax
356
357	imulq	$A[0],$m1		# "tp[0]"*n0
358	mov	%rdx,$A[1]
359
360	mulq	$m1			# np[0]*m1
361	add	%rax,$A[0]		# discarded
362	mov	8($ap),%rax
363	adc	\$0,%rdx
364	mov	%rdx,$N[1]
365
366	mulq	$m0
367	add	%rax,$A[1]
368	mov	8($np),%rax
369	adc	\$0,%rdx
370	mov	%rdx,$A[0]
371
372	mulq	$m1
373	add	%rax,$N[1]
374	mov	16($ap),%rax
375	adc	\$0,%rdx
376	add	$A[1],$N[1]
377	lea	4($j),$j		# j++
378	adc	\$0,%rdx
379	mov	$N[1],(%rsp)
380	mov	%rdx,$N[0]
381	jmp	.L1st4x
382.align	16
383.L1st4x:
384	mulq	$m0			# ap[j]*bp[0]
385	add	%rax,$A[0]
386	mov	-16($np,$j,8),%rax
387	adc	\$0,%rdx
388	mov	%rdx,$A[1]
389
390	mulq	$m1			# np[j]*m1
391	add	%rax,$N[0]
392	mov	-8($ap,$j,8),%rax
393	adc	\$0,%rdx
394	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
395	adc	\$0,%rdx
396	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
397	mov	%rdx,$N[1]
398
399	mulq	$m0			# ap[j]*bp[0]
400	add	%rax,$A[1]
401	mov	-8($np,$j,8),%rax
402	adc	\$0,%rdx
403	mov	%rdx,$A[0]
404
405	mulq	$m1			# np[j]*m1
406	add	%rax,$N[1]
407	mov	($ap,$j,8),%rax
408	adc	\$0,%rdx
409	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
410	adc	\$0,%rdx
411	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
412	mov	%rdx,$N[0]
413
414	mulq	$m0			# ap[j]*bp[0]
415	add	%rax,$A[0]
416	mov	($np,$j,8),%rax
417	adc	\$0,%rdx
418	mov	%rdx,$A[1]
419
420	mulq	$m1			# np[j]*m1
421	add	%rax,$N[0]
422	mov	8($ap,$j,8),%rax
423	adc	\$0,%rdx
424	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
425	adc	\$0,%rdx
426	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
427	mov	%rdx,$N[1]
428
429	mulq	$m0			# ap[j]*bp[0]
430	add	%rax,$A[1]
431	mov	8($np,$j,8),%rax
432	adc	\$0,%rdx
433	lea	4($j),$j		# j++
434	mov	%rdx,$A[0]
435
436	mulq	$m1			# np[j]*m1
437	add	%rax,$N[1]
438	mov	-16($ap,$j,8),%rax
439	adc	\$0,%rdx
440	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
441	adc	\$0,%rdx
442	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
443	mov	%rdx,$N[0]
444	cmp	$num,$j
445	jb	.L1st4x
446
447	mulq	$m0			# ap[j]*bp[0]
448	add	%rax,$A[0]
449	mov	-16($np,$j,8),%rax
450	adc	\$0,%rdx
451	mov	%rdx,$A[1]
452
453	mulq	$m1			# np[j]*m1
454	add	%rax,$N[0]
455	mov	-8($ap,$j,8),%rax
456	adc	\$0,%rdx
457	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
458	adc	\$0,%rdx
459	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
460	mov	%rdx,$N[1]
461
462	mulq	$m0			# ap[j]*bp[0]
463	add	%rax,$A[1]
464	mov	-8($np,$j,8),%rax
465	adc	\$0,%rdx
466	mov	%rdx,$A[0]
467
468	mulq	$m1			# np[j]*m1
469	add	%rax,$N[1]
470	mov	($ap),%rax		# ap[0]
471	adc	\$0,%rdx
472	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
473	adc	\$0,%rdx
474	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
475	mov	%rdx,$N[0]
476
477	xor	$N[1],$N[1]
478	add	$A[0],$N[0]
479	adc	\$0,$N[1]
480	mov	$N[0],-8(%rsp,$j,8)
481	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
482
483	lea	1($i),$i		# i++
484.align	4
485.Louter4x:
486	mov	($bp,$i,8),$m0		# m0=bp[i]
487	xor	$j,$j			# j=0
488	mov	(%rsp),$A[0]
489	mov	$n0,$m1
490	mulq	$m0			# ap[0]*bp[i]
491	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
492	mov	($np),%rax
493	adc	\$0,%rdx
494
495	imulq	$A[0],$m1		# tp[0]*n0
496	mov	%rdx,$A[1]
497
498	mulq	$m1			# np[0]*m1
499	add	%rax,$A[0]		# "$N[0]", discarded
500	mov	8($ap),%rax
501	adc	\$0,%rdx
502	mov	%rdx,$N[1]
503
504	mulq	$m0			# ap[j]*bp[i]
505	add	%rax,$A[1]
506	mov	8($np),%rax
507	adc	\$0,%rdx
508	add	8(%rsp),$A[1]		# +tp[1]
509	adc	\$0,%rdx
510	mov	%rdx,$A[0]
511
512	mulq	$m1			# np[j]*m1
513	add	%rax,$N[1]
514	mov	16($ap),%rax
515	adc	\$0,%rdx
516	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
517	lea	4($j),$j		# j+=2
518	adc	\$0,%rdx
519	mov	$N[1],(%rsp)		# tp[j-1]
520	mov	%rdx,$N[0]
521	jmp	.Linner4x
522.align	16
523.Linner4x:
524	mulq	$m0			# ap[j]*bp[i]
525	add	%rax,$A[0]
526	mov	-16($np,$j,8),%rax
527	adc	\$0,%rdx
528	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
529	adc	\$0,%rdx
530	mov	%rdx,$A[1]
531
532	mulq	$m1			# np[j]*m1
533	add	%rax,$N[0]
534	mov	-8($ap,$j,8),%rax
535	adc	\$0,%rdx
536	add	$A[0],$N[0]
537	adc	\$0,%rdx
538	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
539	mov	%rdx,$N[1]
540
541	mulq	$m0			# ap[j]*bp[i]
542	add	%rax,$A[1]
543	mov	-8($np,$j,8),%rax
544	adc	\$0,%rdx
545	add	-8(%rsp,$j,8),$A[1]
546	adc	\$0,%rdx
547	mov	%rdx,$A[0]
548
549	mulq	$m1			# np[j]*m1
550	add	%rax,$N[1]
551	mov	($ap,$j,8),%rax
552	adc	\$0,%rdx
553	add	$A[1],$N[1]
554	adc	\$0,%rdx
555	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
556	mov	%rdx,$N[0]
557
558	mulq	$m0			# ap[j]*bp[i]
559	add	%rax,$A[0]
560	mov	($np,$j,8),%rax
561	adc	\$0,%rdx
562	add	(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
563	adc	\$0,%rdx
564	mov	%rdx,$A[1]
565
566	mulq	$m1			# np[j]*m1
567	add	%rax,$N[0]
568	mov	8($ap,$j,8),%rax
569	adc	\$0,%rdx
570	add	$A[0],$N[0]
571	adc	\$0,%rdx
572	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
573	mov	%rdx,$N[1]
574
575	mulq	$m0			# ap[j]*bp[i]
576	add	%rax,$A[1]
577	mov	8($np,$j,8),%rax
578	adc	\$0,%rdx
579	add	8(%rsp,$j,8),$A[1]
580	adc	\$0,%rdx
581	lea	4($j),$j		# j++
582	mov	%rdx,$A[0]
583
584	mulq	$m1			# np[j]*m1
585	add	%rax,$N[1]
586	mov	-16($ap,$j,8),%rax
587	adc	\$0,%rdx
588	add	$A[1],$N[1]
589	adc	\$0,%rdx
590	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
591	mov	%rdx,$N[0]
592	cmp	$num,$j
593	jb	.Linner4x
594
595	mulq	$m0			# ap[j]*bp[i]
596	add	%rax,$A[0]
597	mov	-16($np,$j,8),%rax
598	adc	\$0,%rdx
599	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
600	adc	\$0,%rdx
601	mov	%rdx,$A[1]
602
603	mulq	$m1			# np[j]*m1
604	add	%rax,$N[0]
605	mov	-8($ap,$j,8),%rax
606	adc	\$0,%rdx
607	add	$A[0],$N[0]
608	adc	\$0,%rdx
609	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
610	mov	%rdx,$N[1]
611
612	mulq	$m0			# ap[j]*bp[i]
613	add	%rax,$A[1]
614	mov	-8($np,$j,8),%rax
615	adc	\$0,%rdx
616	add	-8(%rsp,$j,8),$A[1]
617	adc	\$0,%rdx
618	lea	1($i),$i		# i++
619	mov	%rdx,$A[0]
620
621	mulq	$m1			# np[j]*m1
622	add	%rax,$N[1]
623	mov	($ap),%rax		# ap[0]
624	adc	\$0,%rdx
625	add	$A[1],$N[1]
626	adc	\$0,%rdx
627	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
628	mov	%rdx,$N[0]
629
630	xor	$N[1],$N[1]
631	add	$A[0],$N[0]
632	adc	\$0,$N[1]
633	add	(%rsp,$num,8),$N[0]	# pull upmost overflow bit
634	adc	\$0,$N[1]
635	mov	$N[0],-8(%rsp,$j,8)
636	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
637
638	cmp	$num,$i
639	jb	.Louter4x
640___
641{
642my @ri=("%rax","%rdx",$m0,$m1);
643$code.=<<___;
644	mov	16(%rsp,$num,8),$rp	# restore $rp
645	mov	0(%rsp),@ri[0]		# tp[0]
646	pxor	%xmm0,%xmm0
647	mov	8(%rsp),@ri[1]		# tp[1]
648	shr	\$2,$num		# num/=4
649	lea	(%rsp),$ap		# borrow ap for tp
650	xor	$i,$i			# i=0 and clear CF!
651
652	sub	0($np),@ri[0]
653	mov	16($ap),@ri[2]		# tp[2]
654	mov	24($ap),@ri[3]		# tp[3]
655	sbb	8($np),@ri[1]
656	lea	-1($num),$j		# j=num/4-1
657	jmp	.Lsub4x
658.align	16
659.Lsub4x:
660	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
661	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
662	sbb	16($np,$i,8),@ri[2]
663	mov	32($ap,$i,8),@ri[0]	# tp[i+1]
664	mov	40($ap,$i,8),@ri[1]
665	sbb	24($np,$i,8),@ri[3]
666	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
667	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
668	sbb	32($np,$i,8),@ri[0]
669	mov	48($ap,$i,8),@ri[2]
670	mov	56($ap,$i,8),@ri[3]
671	sbb	40($np,$i,8),@ri[1]
672	lea	4($i),$i		# i++
673	dec	$j			# doesnn't affect CF!
674	jnz	.Lsub4x
675
676	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
677	mov	32($ap,$i,8),@ri[0]	# load overflow bit
678	sbb	16($np,$i,8),@ri[2]
679	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
680	sbb	24($np,$i,8),@ri[3]
681	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
682
683	sbb	\$0,@ri[0]		# handle upmost overflow bit
684	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
685	xor	$i,$i			# i=0
686	and	@ri[0],$ap
687	not	@ri[0]
688	mov	$rp,$np
689	and	@ri[0],$np
690	lea	-1($num),$j
691	or	$np,$ap			# ap=borrow?tp:rp
692
693	movdqu	($ap),%xmm1
694	movdqa	%xmm0,(%rsp)
695	movdqu	%xmm1,($rp)
696	jmp	.Lcopy4x
697.align	16
698.Lcopy4x:					# copy or in-place refresh
699	movdqu	16($ap,$i),%xmm2
700	movdqu	32($ap,$i),%xmm1
701	movdqa	%xmm0,16(%rsp,$i)
702	movdqu	%xmm2,16($rp,$i)
703	movdqa	%xmm0,32(%rsp,$i)
704	movdqu	%xmm1,32($rp,$i)
705	lea	32($i),$i
706	dec	$j
707	jnz	.Lcopy4x
708
709	shl	\$2,$num
710	movdqu	16($ap,$i),%xmm2
711	movdqa	%xmm0,16(%rsp,$i)
712	movdqu	%xmm2,16($rp,$i)
713___
714}
715$code.=<<___;
716	mov	8(%rsp,$num,8),%rsi	# restore %rsp
717	mov	\$1,%rax
718	mov	(%rsi),%r15
719	mov	8(%rsi),%r14
720	mov	16(%rsi),%r13
721	mov	24(%rsi),%r12
722	mov	32(%rsi),%rbp
723	mov	40(%rsi),%rbx
724	lea	48(%rsi),%rsp
725.Lmul4x_epilogue:
726	ret
727.size	bn_mul4x_mont,.-bn_mul4x_mont
728___
729}}}
730{{{
731######################################################################
732# void bn_sqr8x_mont(
733my $rptr="%rdi";	# const BN_ULONG *rptr,
734my $aptr="%rsi";	# const BN_ULONG *aptr,
735my $bptr="%rdx";	# not used
736my $nptr="%rcx";	# const BN_ULONG *nptr,
737my $n0  ="%r8";		# const BN_ULONG *n0);
738my $num ="%r9";		# int num, has to be divisible by 8
739
740my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
741my @A0=("%r10","%r11");
742my @A1=("%r12","%r13");
743my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
744
745$code.=<<___	if ($addx);
746.extern	bn_sqrx8x_internal		# see x86_64-mont5 module
747___
748$code.=<<___;
749.extern	bn_sqr8x_internal		# see x86_64-mont5 module
750
751.type	bn_sqr8x_mont,\@function,6
752.align	32
753bn_sqr8x_mont:
754.Lsqr8x_enter:
755	mov	%rsp,%rax
756	push	%rbx
757	push	%rbp
758	push	%r12
759	push	%r13
760	push	%r14
761	push	%r15
762
763	mov	${num}d,%r10d
764	shl	\$3,${num}d		# convert $num to bytes
765	shl	\$3+2,%r10		# 4*$num
766	neg	$num
767
768	##############################################################
769	# ensure that stack frame doesn't alias with $aptr modulo
770	# 4096. this is done to allow memory disambiguation logic
771	# do its job.
772	#
773	lea	-64(%rsp,$num,4),%r11
774	mov	($n0),$n0		# *n0
775	sub	$aptr,%r11
776	and	\$4095,%r11
777	cmp	%r11,%r10
778	jb	.Lsqr8x_sp_alt
779	sub	%r11,%rsp		# align with $aptr
780	lea	-64(%rsp,$num,4),%rsp	# alloca(frame+4*$num)
781	jmp	.Lsqr8x_sp_done
782
783.align	32
784.Lsqr8x_sp_alt:
785	lea	4096-64(,$num,4),%r10	# 4096-frame-4*$num
786	lea	-64(%rsp,$num,4),%rsp	# alloca(frame+4*$num)
787	sub	%r10,%r11
788	mov	\$0,%r10
789	cmovc	%r10,%r11
790	sub	%r11,%rsp
791.Lsqr8x_sp_done:
792	and	\$-64,%rsp
793	mov	$num,%r10
794	neg	$num
795
796	lea	64(%rsp,$num,2),%r11	# copy of modulus
797	mov	$n0,  32(%rsp)
798	mov	%rax, 40(%rsp)		# save original %rsp
799.Lsqr8x_body:
800
801	mov	$num,$i
802	movq	%r11, %xmm2		# save pointer to modulus copy
803	shr	\$3+2,$i
804	mov	OPENSSL_ia32cap_P+8(%rip),%eax
805	jmp	.Lsqr8x_copy_n
806
807.align	32
808.Lsqr8x_copy_n:
809	movq	8*0($nptr),%xmm0
810	movq	8*1($nptr),%xmm1
811	movq	8*2($nptr),%xmm3
812	movq	8*3($nptr),%xmm4
813	lea	8*4($nptr),$nptr
814	movdqa	%xmm0,16*0(%r11)
815	movdqa	%xmm1,16*1(%r11)
816	movdqa	%xmm3,16*2(%r11)
817	movdqa	%xmm4,16*3(%r11)
818	lea	16*4(%r11),%r11
819	dec	$i
820	jnz	.Lsqr8x_copy_n
821
822	pxor	%xmm0,%xmm0
823	movq	$rptr,%xmm1		# save $rptr
824	movq	%r10, %xmm3		# -$num
825___
826$code.=<<___ if ($addx);
827	and	\$0x80100,%eax
828	cmp	\$0x80100,%eax
829	jne	.Lsqr8x_nox
830
831	call	bn_sqrx8x_internal	# see x86_64-mont5 module
832
833	pxor	%xmm0,%xmm0
834	lea	48(%rsp),%rax
835	lea	64(%rsp,$num,2),%rdx
836	shr	\$3+2,$num
837	mov	40(%rsp),%rsi		# restore %rsp
838	jmp	.Lsqr8x_zero
839
840.align	32
841.Lsqr8x_nox:
842___
843$code.=<<___;
844	call	bn_sqr8x_internal	# see x86_64-mont5 module
845
846	pxor	%xmm0,%xmm0
847	lea	48(%rsp),%rax
848	lea	64(%rsp,$num,2),%rdx
849	shr	\$3+2,$num
850	mov	40(%rsp),%rsi		# restore %rsp
851	jmp	.Lsqr8x_zero
852
853.align	32
854.Lsqr8x_zero:
855	movdqa	%xmm0,16*0(%rax)	# wipe t
856	movdqa	%xmm0,16*1(%rax)
857	movdqa	%xmm0,16*2(%rax)
858	movdqa	%xmm0,16*3(%rax)
859	lea	16*4(%rax),%rax
860	movdqa	%xmm0,16*0(%rdx)	# wipe n
861	movdqa	%xmm0,16*1(%rdx)
862	movdqa	%xmm0,16*2(%rdx)
863	movdqa	%xmm0,16*3(%rdx)
864	lea	16*4(%rdx),%rdx
865	dec	$num
866	jnz	.Lsqr8x_zero
867
868	mov	\$1,%rax
869	mov	-48(%rsi),%r15
870	mov	-40(%rsi),%r14
871	mov	-32(%rsi),%r13
872	mov	-24(%rsi),%r12
873	mov	-16(%rsi),%rbp
874	mov	-8(%rsi),%rbx
875	lea	(%rsi),%rsp
876.Lsqr8x_epilogue:
877	ret
878.size	bn_sqr8x_mont,.-bn_sqr8x_mont
879___
880}}}
881
882if ($addx) {{{
883my $bp="%rdx";	# original value
884
885$code.=<<___;
886.type	bn_mulx4x_mont,\@function,6
887.align	32
888bn_mulx4x_mont:
889.Lmulx4x_enter:
890	mov	%rsp,%rax
891	push	%rbx
892	push	%rbp
893	push	%r12
894	push	%r13
895	push	%r14
896	push	%r15
897
898	shl	\$3,${num}d		# convert $num to bytes
899	.byte	0x67
900	xor	%r10,%r10
901	sub	$num,%r10		# -$num
902	mov	($n0),$n0		# *n0
903	lea	-72(%rsp,%r10),%rsp	# alloca(frame+$num+8)
904	lea	($bp,$num),%r10
905	and	\$-128,%rsp
906	##############################################################
907	# Stack layout
908	# +0	num
909	# +8	off-loaded &b[i]
910	# +16	end of b[num]
911	# +24	saved n0
912	# +32	saved rp
913	# +40	saved %rsp
914	# +48	inner counter
915	# +56
916	# +64	tmp[num+1]
917	#
918	mov	$num,0(%rsp)		# save $num
919	shr	\$5,$num
920	mov	%r10,16(%rsp)		# end of b[num]
921	sub	\$1,$num
922	mov	$n0, 24(%rsp)		# save *n0
923	mov	$rp, 32(%rsp)		# save $rp
924	mov	%rax,40(%rsp)		# save original %rsp
925	mov	$num,48(%rsp)		# inner counter
926	jmp	.Lmulx4x_body
927
928.align	32
929.Lmulx4x_body:
930___
931my ($aptr, $bptr, $nptr, $tptr, $mi,  $bi,  $zero, $num)=
932   ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
933my $rptr=$bptr;
934$code.=<<___;
935	lea	8($bp),$bptr
936	mov	($bp),%rdx		# b[0], $bp==%rdx actually
937	lea	64+32(%rsp),$tptr
938	mov	%rdx,$bi
939
940	mulx	0*8($aptr),$mi,%rax	# a[0]*b[0]
941	mulx	1*8($aptr),%r11,%r14	# a[1]*b[0]
942	add	%rax,%r11
943	mov	$bptr,8(%rsp)		# off-load &b[i]
944	mulx	2*8($aptr),%r12,%r13	# ...
945	adc	%r14,%r12
946	adc	\$0,%r13
947
948	mov	$mi,$bptr		# borrow $bptr
949	imulq	24(%rsp),$mi		# "t[0]"*n0
950	xor	$zero,$zero		# cf=0, of=0
951
952	mulx	3*8($aptr),%rax,%r14
953	 mov	$mi,%rdx
954	lea	4*8($aptr),$aptr
955	adcx	%rax,%r13
956	adcx	$zero,%r14		# cf=0
957
958	mulx	0*8($nptr),%rax,%r10
959	adcx	%rax,$bptr		# discarded
960	adox	%r11,%r10
961	mulx	1*8($nptr),%rax,%r11
962	adcx	%rax,%r10
963	adox	%r12,%r11
964	.byte	0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00	# mulx	2*8($nptr),%rax,%r12
965	mov	48(%rsp),$bptr		# counter value
966	mov	%r10,-4*8($tptr)
967	adcx	%rax,%r11
968	adox	%r13,%r12
969	mulx	3*8($nptr),%rax,%r15
970	 mov	$bi,%rdx
971	mov	%r11,-3*8($tptr)
972	adcx	%rax,%r12
973	adox	$zero,%r15		# of=0
974	lea	4*8($nptr),$nptr
975	mov	%r12,-2*8($tptr)
976
977	jmp	.Lmulx4x_1st
978
979.align	32
980.Lmulx4x_1st:
981	adcx	$zero,%r15		# cf=0, modulo-scheduled
982	mulx	0*8($aptr),%r10,%rax	# a[4]*b[0]
983	adcx	%r14,%r10
984	mulx	1*8($aptr),%r11,%r14	# a[5]*b[0]
985	adcx	%rax,%r11
986	mulx	2*8($aptr),%r12,%rax	# ...
987	adcx	%r14,%r12
988	mulx	3*8($aptr),%r13,%r14
989	 .byte	0x67,0x67
990	 mov	$mi,%rdx
991	adcx	%rax,%r13
992	adcx	$zero,%r14		# cf=0
993	lea	4*8($aptr),$aptr
994	lea	4*8($tptr),$tptr
995
996	adox	%r15,%r10
997	mulx	0*8($nptr),%rax,%r15
998	adcx	%rax,%r10
999	adox	%r15,%r11
1000	mulx	1*8($nptr),%rax,%r15
1001	adcx	%rax,%r11
1002	adox	%r15,%r12
1003	mulx	2*8($nptr),%rax,%r15
1004	mov	%r10,-5*8($tptr)
1005	adcx	%rax,%r12
1006	mov	%r11,-4*8($tptr)
1007	adox	%r15,%r13
1008	mulx	3*8($nptr),%rax,%r15
1009	 mov	$bi,%rdx
1010	mov	%r12,-3*8($tptr)
1011	adcx	%rax,%r13
1012	adox	$zero,%r15
1013	lea	4*8($nptr),$nptr
1014	mov	%r13,-2*8($tptr)
1015
1016	dec	$bptr			# of=0, pass cf
1017	jnz	.Lmulx4x_1st
1018
1019	mov	0(%rsp),$num		# load num
1020	mov	8(%rsp),$bptr		# re-load &b[i]
1021	adc	$zero,%r15		# modulo-scheduled
1022	add	%r15,%r14
1023	sbb	%r15,%r15		# top-most carry
1024	mov	%r14,-1*8($tptr)
1025	jmp	.Lmulx4x_outer
1026
1027.align	32
1028.Lmulx4x_outer:
1029	mov	($bptr),%rdx		# b[i]
1030	lea	8($bptr),$bptr		# b++
1031	sub	$num,$aptr		# rewind $aptr
1032	mov	%r15,($tptr)		# save top-most carry
1033	lea	64+4*8(%rsp),$tptr
1034	sub	$num,$nptr		# rewind $nptr
1035
1036	mulx	0*8($aptr),$mi,%r11	# a[0]*b[i]
1037	xor	%ebp,%ebp		# xor	$zero,$zero	# cf=0, of=0
1038	mov	%rdx,$bi
1039	mulx	1*8($aptr),%r14,%r12	# a[1]*b[i]
1040	adox	-4*8($tptr),$mi
1041	adcx	%r14,%r11
1042	mulx	2*8($aptr),%r15,%r13	# ...
1043	adox	-3*8($tptr),%r11
1044	adcx	%r15,%r12
1045	adox	$zero,%r12
1046	adcx	$zero,%r13
1047
1048	mov	$bptr,8(%rsp)		# off-load &b[i]
1049	.byte	0x67
1050	mov	$mi,%r15
1051	imulq	24(%rsp),$mi		# "t[0]"*n0
1052	xor	%ebp,%ebp		# xor	$zero,$zero	# cf=0, of=0
1053
1054	mulx	3*8($aptr),%rax,%r14
1055	 mov	$mi,%rdx
1056	adox	-2*8($tptr),%r12
1057	adcx	%rax,%r13
1058	adox	-1*8($tptr),%r13
1059	adcx	$zero,%r14
1060	lea	4*8($aptr),$aptr
1061	adox	$zero,%r14
1062
1063	mulx	0*8($nptr),%rax,%r10
1064	adcx	%rax,%r15		# discarded
1065	adox	%r11,%r10
1066	mulx	1*8($nptr),%rax,%r11
1067	adcx	%rax,%r10
1068	adox	%r12,%r11
1069	mulx	2*8($nptr),%rax,%r12
1070	mov	%r10,-4*8($tptr)
1071	adcx	%rax,%r11
1072	adox	%r13,%r12
1073	mulx	3*8($nptr),%rax,%r15
1074	 mov	$bi,%rdx
1075	mov	%r11,-3*8($tptr)
1076	lea	4*8($nptr),$nptr
1077	adcx	%rax,%r12
1078	adox	$zero,%r15		# of=0
1079	mov	48(%rsp),$bptr		# counter value
1080	mov	%r12,-2*8($tptr)
1081
1082	jmp	.Lmulx4x_inner
1083
1084.align	32
1085.Lmulx4x_inner:
1086	mulx	0*8($aptr),%r10,%rax	# a[4]*b[i]
1087	adcx	$zero,%r15		# cf=0, modulo-scheduled
1088	adox	%r14,%r10
1089	mulx	1*8($aptr),%r11,%r14	# a[5]*b[i]
1090	adcx	0*8($tptr),%r10
1091	adox	%rax,%r11
1092	mulx	2*8($aptr),%r12,%rax	# ...
1093	adcx	1*8($tptr),%r11
1094	adox	%r14,%r12
1095	mulx	3*8($aptr),%r13,%r14
1096	 mov	$mi,%rdx
1097	adcx	2*8($tptr),%r12
1098	adox	%rax,%r13
1099	adcx	3*8($tptr),%r13
1100	adox	$zero,%r14		# of=0
1101	lea	4*8($aptr),$aptr
1102	lea	4*8($tptr),$tptr
1103	adcx	$zero,%r14		# cf=0
1104
1105	adox	%r15,%r10
1106	mulx	0*8($nptr),%rax,%r15
1107	adcx	%rax,%r10
1108	adox	%r15,%r11
1109	mulx	1*8($nptr),%rax,%r15
1110	adcx	%rax,%r11
1111	adox	%r15,%r12
1112	mulx	2*8($nptr),%rax,%r15
1113	mov	%r10,-5*8($tptr)
1114	adcx	%rax,%r12
1115	adox	%r15,%r13
1116	mulx	3*8($nptr),%rax,%r15
1117	 mov	$bi,%rdx
1118	mov	%r11,-4*8($tptr)
1119	mov	%r12,-3*8($tptr)
1120	adcx	%rax,%r13
1121	adox	$zero,%r15
1122	lea	4*8($nptr),$nptr
1123	mov	%r13,-2*8($tptr)
1124
1125	dec	$bptr			# of=0, pass cf
1126	jnz	.Lmulx4x_inner
1127
1128	mov	0(%rsp),$num		# load num
1129	mov	8(%rsp),$bptr		# re-load &b[i]
1130	adc	$zero,%r15		# modulo-scheduled
1131	sub	0*8($tptr),$zero	# pull top-most carry
1132	adc	%r15,%r14
1133	mov	-8($nptr),$mi
1134	sbb	%r15,%r15		# top-most carry
1135	mov	%r14,-1*8($tptr)
1136
1137	cmp	16(%rsp),$bptr
1138	jne	.Lmulx4x_outer
1139
1140	sub	%r14,$mi		# compare top-most words
1141	sbb	$mi,$mi
1142	or	$mi,%r15
1143
1144	neg	$num
1145	xor	%rdx,%rdx
1146	mov	32(%rsp),$rptr		# restore rp
1147	lea	64(%rsp),$tptr
1148
1149	pxor	%xmm0,%xmm0
1150	mov	0*8($nptr,$num),%r8
1151	mov	1*8($nptr,$num),%r9
1152	neg	%r8
1153	jmp	.Lmulx4x_sub_entry
1154
1155.align	32
1156.Lmulx4x_sub:
1157	mov	0*8($nptr,$num),%r8
1158	mov	1*8($nptr,$num),%r9
1159	not	%r8
1160.Lmulx4x_sub_entry:
1161	mov	2*8($nptr,$num),%r10
1162	not	%r9
1163	and	%r15,%r8
1164	mov	3*8($nptr,$num),%r11
1165	not	%r10
1166	and	%r15,%r9
1167	not	%r11
1168	and	%r15,%r10
1169	and	%r15,%r11
1170
1171	neg	%rdx			# mov %rdx,%cf
1172	adc	0*8($tptr),%r8
1173	adc	1*8($tptr),%r9
1174	movdqa	%xmm0,($tptr)
1175	adc	2*8($tptr),%r10
1176	adc	3*8($tptr),%r11
1177	movdqa	%xmm0,16($tptr)
1178	lea	4*8($tptr),$tptr
1179	sbb	%rdx,%rdx		# mov %cf,%rdx
1180
1181	mov	%r8,0*8($rptr)
1182	mov	%r9,1*8($rptr)
1183	mov	%r10,2*8($rptr)
1184	mov	%r11,3*8($rptr)
1185	lea	4*8($rptr),$rptr
1186
1187	add	\$32,$num
1188	jnz	.Lmulx4x_sub
1189
1190	mov	40(%rsp),%rsi		# restore %rsp
1191	mov	\$1,%rax
1192	mov	-48(%rsi),%r15
1193	mov	-40(%rsi),%r14
1194	mov	-32(%rsi),%r13
1195	mov	-24(%rsi),%r12
1196	mov	-16(%rsi),%rbp
1197	mov	-8(%rsi),%rbx
1198	lea	(%rsi),%rsp
1199.Lmulx4x_epilogue:
1200	ret
1201.size	bn_mulx4x_mont,.-bn_mulx4x_mont
1202___
1203}}}
1204$code.=<<___;
1205.asciz	"Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1206.align	16
1207___
1208
1209# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1210#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
1211if ($win64) {
1212$rec="%rcx";
1213$frame="%rdx";
1214$context="%r8";
1215$disp="%r9";
1216
1217$code.=<<___;
1218.extern	__imp_RtlVirtualUnwind
1219.type	mul_handler,\@abi-omnipotent
1220.align	16
1221mul_handler:
1222	push	%rsi
1223	push	%rdi
1224	push	%rbx
1225	push	%rbp
1226	push	%r12
1227	push	%r13
1228	push	%r14
1229	push	%r15
1230	pushfq
1231	sub	\$64,%rsp
1232
1233	mov	120($context),%rax	# pull context->Rax
1234	mov	248($context),%rbx	# pull context->Rip
1235
1236	mov	8($disp),%rsi		# disp->ImageBase
1237	mov	56($disp),%r11		# disp->HandlerData
1238
1239	mov	0(%r11),%r10d		# HandlerData[0]
1240	lea	(%rsi,%r10),%r10	# end of prologue label
1241	cmp	%r10,%rbx		# context->Rip<end of prologue label
1242	jb	.Lcommon_seh_tail
1243
1244	mov	152($context),%rax	# pull context->Rsp
1245
1246	mov	4(%r11),%r10d		# HandlerData[1]
1247	lea	(%rsi,%r10),%r10	# epilogue label
1248	cmp	%r10,%rbx		# context->Rip>=epilogue label
1249	jae	.Lcommon_seh_tail
1250
1251	mov	192($context),%r10	# pull $num
1252	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
1253	lea	48(%rax),%rax
1254
1255	mov	-8(%rax),%rbx
1256	mov	-16(%rax),%rbp
1257	mov	-24(%rax),%r12
1258	mov	-32(%rax),%r13
1259	mov	-40(%rax),%r14
1260	mov	-48(%rax),%r15
1261	mov	%rbx,144($context)	# restore context->Rbx
1262	mov	%rbp,160($context)	# restore context->Rbp
1263	mov	%r12,216($context)	# restore context->R12
1264	mov	%r13,224($context)	# restore context->R13
1265	mov	%r14,232($context)	# restore context->R14
1266	mov	%r15,240($context)	# restore context->R15
1267
1268	jmp	.Lcommon_seh_tail
1269.size	mul_handler,.-mul_handler
1270
1271.type	sqr_handler,\@abi-omnipotent
1272.align	16
1273sqr_handler:
1274	push	%rsi
1275	push	%rdi
1276	push	%rbx
1277	push	%rbp
1278	push	%r12
1279	push	%r13
1280	push	%r14
1281	push	%r15
1282	pushfq
1283	sub	\$64,%rsp
1284
1285	mov	120($context),%rax	# pull context->Rax
1286	mov	248($context),%rbx	# pull context->Rip
1287
1288	mov	8($disp),%rsi		# disp->ImageBase
1289	mov	56($disp),%r11		# disp->HandlerData
1290
1291	mov	0(%r11),%r10d		# HandlerData[0]
1292	lea	(%rsi,%r10),%r10	# end of prologue label
1293	cmp	%r10,%rbx		# context->Rip<.Lsqr_body
1294	jb	.Lcommon_seh_tail
1295
1296	mov	152($context),%rax	# pull context->Rsp
1297
1298	mov	4(%r11),%r10d		# HandlerData[1]
1299	lea	(%rsi,%r10),%r10	# epilogue label
1300	cmp	%r10,%rbx		# context->Rip>=.Lsqr_epilogue
1301	jae	.Lcommon_seh_tail
1302
1303	mov	40(%rax),%rax		# pull saved stack pointer
1304
1305	mov	-8(%rax),%rbx
1306	mov	-16(%rax),%rbp
1307	mov	-24(%rax),%r12
1308	mov	-32(%rax),%r13
1309	mov	-40(%rax),%r14
1310	mov	-48(%rax),%r15
1311	mov	%rbx,144($context)	# restore context->Rbx
1312	mov	%rbp,160($context)	# restore context->Rbp
1313	mov	%r12,216($context)	# restore context->R12
1314	mov	%r13,224($context)	# restore context->R13
1315	mov	%r14,232($context)	# restore context->R14
1316	mov	%r15,240($context)	# restore context->R15
1317
1318.Lcommon_seh_tail:
1319	mov	8(%rax),%rdi
1320	mov	16(%rax),%rsi
1321	mov	%rax,152($context)	# restore context->Rsp
1322	mov	%rsi,168($context)	# restore context->Rsi
1323	mov	%rdi,176($context)	# restore context->Rdi
1324
1325	mov	40($disp),%rdi		# disp->ContextRecord
1326	mov	$context,%rsi		# context
1327	mov	\$154,%ecx		# sizeof(CONTEXT)
1328	.long	0xa548f3fc		# cld; rep movsq
1329
1330	mov	$disp,%rsi
1331	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
1332	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
1333	mov	0(%rsi),%r8		# arg3, disp->ControlPc
1334	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
1335	mov	40(%rsi),%r10		# disp->ContextRecord
1336	lea	56(%rsi),%r11		# &disp->HandlerData
1337	lea	24(%rsi),%r12		# &disp->EstablisherFrame
1338	mov	%r10,32(%rsp)		# arg5
1339	mov	%r11,40(%rsp)		# arg6
1340	mov	%r12,48(%rsp)		# arg7
1341	mov	%rcx,56(%rsp)		# arg8, (NULL)
1342	call	*__imp_RtlVirtualUnwind(%rip)
1343
1344	mov	\$1,%eax		# ExceptionContinueSearch
1345	add	\$64,%rsp
1346	popfq
1347	pop	%r15
1348	pop	%r14
1349	pop	%r13
1350	pop	%r12
1351	pop	%rbp
1352	pop	%rbx
1353	pop	%rdi
1354	pop	%rsi
1355	ret
1356.size	sqr_handler,.-sqr_handler
1357
1358.section	.pdata
1359.align	4
1360	.rva	.LSEH_begin_bn_mul_mont
1361	.rva	.LSEH_end_bn_mul_mont
1362	.rva	.LSEH_info_bn_mul_mont
1363
1364	.rva	.LSEH_begin_bn_mul4x_mont
1365	.rva	.LSEH_end_bn_mul4x_mont
1366	.rva	.LSEH_info_bn_mul4x_mont
1367
1368	.rva	.LSEH_begin_bn_sqr8x_mont
1369	.rva	.LSEH_end_bn_sqr8x_mont
1370	.rva	.LSEH_info_bn_sqr8x_mont
1371___
1372$code.=<<___ if ($addx);
1373	.rva	.LSEH_begin_bn_mulx4x_mont
1374	.rva	.LSEH_end_bn_mulx4x_mont
1375	.rva	.LSEH_info_bn_mulx4x_mont
1376___
1377$code.=<<___;
1378.section	.xdata
1379.align	8
1380.LSEH_info_bn_mul_mont:
1381	.byte	9,0,0,0
1382	.rva	mul_handler
1383	.rva	.Lmul_body,.Lmul_epilogue	# HandlerData[]
1384.LSEH_info_bn_mul4x_mont:
1385	.byte	9,0,0,0
1386	.rva	mul_handler
1387	.rva	.Lmul4x_body,.Lmul4x_epilogue	# HandlerData[]
1388.LSEH_info_bn_sqr8x_mont:
1389	.byte	9,0,0,0
1390	.rva	sqr_handler
1391	.rva	.Lsqr8x_body,.Lsqr8x_epilogue	# HandlerData[]
1392___
1393$code.=<<___ if ($addx);
1394.LSEH_info_bn_mulx4x_mont:
1395	.byte	9,0,0,0
1396	.rva	sqr_handler
1397	.rva	.Lmulx4x_body,.Lmulx4x_epilogue	# HandlerData[]
1398___
1399}
1400
1401print $code;
1402close STDOUT;
1403