• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# October 2005.
18#
19# Montgomery multiplication routine for x86_64. While it gives modest
20# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
21# than twice, >2x, as fast. Most common rsa1024 sign is improved by
22# respectful 50%. It remains to be seen if loop unrolling and
23# dedicated squaring routine can provide further improvement...
24
25# July 2011.
26#
27# Add dedicated squaring procedure. Performance improvement varies
28# from platform to platform, but in average it's ~5%/15%/25%/33%
29# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
30
31# August 2011.
32#
33# Unroll and modulo-schedule inner loops in such manner that they
34# are "fallen through" for input lengths of 8, which is critical for
35# 1024-bit RSA *sign*. Average performance improvement in comparison
36# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
37# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
38
39# June 2013.
40#
41# Optimize reduction in squaring procedure and improve 1024+-bit RSA
42# sign performance by 10-16% on Intel Sandy Bridge and later
43# (virtually same on non-Intel processors).
44
45# August 2013.
46#
47# Add MULX/ADOX/ADCX code path.
48
49$flavour = shift;
50$output  = shift;
51if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
52
53$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
54
55$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
56( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
57( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
58die "can't locate x86_64-xlate.pl";
59
60open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
61*STDOUT=*OUT;
62
63# In upstream, this is controlled by shelling out to the compiler to check
64# versions, but BoringSSL is intended to be used with pre-generated perlasm
65# output, so this isn't useful anyway.
66$addx = 1;
67
68# int bn_mul_mont_nohw(
69$rp="%rdi";	# BN_ULONG *rp,
70$ap="%rsi";	# const BN_ULONG *ap,
71$bp="%rdx";	# const BN_ULONG *bp,
72$np="%rcx";	# const BN_ULONG *np,
73$n0="%r8";	# const BN_ULONG *n0,
74# TODO(davidben): The code below treats $num as an int, but C passes in a
75# size_t.
76$num="%r9";	# size_t num);
77$lo0="%r10";
78$hi0="%r11";
79$hi1="%r13";
80$i="%r14";
81$j="%r15";
82$m0="%rbx";
83$m1="%rbp";
84
85$code=<<___;
86.text
87
88.globl	bn_mul_mont_nohw
89.type	bn_mul_mont_nohw,\@function,6
90.align	16
91bn_mul_mont_nohw:
92.cfi_startproc
93	_CET_ENDBR
94	mov	${num}d,${num}d
95	mov	%rsp,%rax
96.cfi_def_cfa_register	%rax
97	push	%rbx
98.cfi_push	%rbx
99	push	%rbp
100.cfi_push	%rbp
101	push	%r12
102.cfi_push	%r12
103	push	%r13
104.cfi_push	%r13
105	push	%r14
106.cfi_push	%r14
107	push	%r15
108.cfi_push	%r15
109
110	neg	$num
111	mov	%rsp,%r11
112	lea	-16(%rsp,$num,8),%r10	# future alloca(8*(num+2))
113	neg	$num			# restore $num
114	and	\$-1024,%r10		# minimize TLB usage
115
116	# An OS-agnostic version of __chkstk.
117	#
118	# Some OSes (Windows) insist on stack being "wired" to
119	# physical memory in strictly sequential manner, i.e. if stack
120	# allocation spans two pages, then reference to farmost one can
121	# be punishable by SEGV. But page walking can do good even on
122	# other OSes, because it guarantees that villain thread hits
123	# the guard page before it can make damage to innocent one...
124	sub	%r10,%r11
125	and	\$-4096,%r11
126	lea	(%r10,%r11),%rsp
127	mov	(%rsp),%r11
128	cmp	%r10,%rsp
129	ja	.Lmul_page_walk
130	jmp	.Lmul_page_walk_done
131
132.align	16
133.Lmul_page_walk:
134	lea	-4096(%rsp),%rsp
135	mov	(%rsp),%r11
136	cmp	%r10,%rsp
137	ja	.Lmul_page_walk
138.Lmul_page_walk_done:
139
140	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
141.cfi_cfa_expression	%rsp+8,$num,8,mul,plus,deref,+8
142.Lmul_body:
143	mov	$bp,%r12		# reassign $bp
144___
145		$bp="%r12";
146$code.=<<___;
147	mov	($n0),$n0		# pull n0[0] value
148	mov	($bp),$m0		# m0=bp[0]
149	mov	($ap),%rax
150
151	xor	$i,$i			# i=0
152	xor	$j,$j			# j=0
153
154	mov	$n0,$m1
155	mulq	$m0			# ap[0]*bp[0]
156	mov	%rax,$lo0
157	mov	($np),%rax
158
159	imulq	$lo0,$m1		# "tp[0]"*n0
160	mov	%rdx,$hi0
161
162	mulq	$m1			# np[0]*m1
163	add	%rax,$lo0		# discarded
164	mov	8($ap),%rax
165	adc	\$0,%rdx
166	mov	%rdx,$hi1
167
168	lea	1($j),$j		# j++
169	jmp	.L1st_enter
170
171.align	16
172.L1st:
173	add	%rax,$hi1
174	mov	($ap,$j,8),%rax
175	adc	\$0,%rdx
176	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
177	mov	$lo0,$hi0
178	adc	\$0,%rdx
179	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
180	mov	%rdx,$hi1
181
182.L1st_enter:
183	mulq	$m0			# ap[j]*bp[0]
184	add	%rax,$hi0
185	mov	($np,$j,8),%rax
186	adc	\$0,%rdx
187	lea	1($j),$j		# j++
188	mov	%rdx,$lo0
189
190	mulq	$m1			# np[j]*m1
191	cmp	$num,$j
192	jne	.L1st
193
194	add	%rax,$hi1
195	mov	($ap),%rax		# ap[0]
196	adc	\$0,%rdx
197	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
198	adc	\$0,%rdx
199	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
200	mov	%rdx,$hi1
201	mov	$lo0,$hi0
202
203	xor	%rdx,%rdx
204	add	$hi0,$hi1
205	adc	\$0,%rdx
206	mov	$hi1,-8(%rsp,$num,8)
207	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
208
209	lea	1($i),$i		# i++
210	jmp	.Louter
211.align	16
212.Louter:
213	mov	($bp,$i,8),$m0		# m0=bp[i]
214	xor	$j,$j			# j=0
215	mov	$n0,$m1
216	mov	(%rsp),$lo0
217	mulq	$m0			# ap[0]*bp[i]
218	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
219	mov	($np),%rax
220	adc	\$0,%rdx
221
222	imulq	$lo0,$m1		# tp[0]*n0
223	mov	%rdx,$hi0
224
225	mulq	$m1			# np[0]*m1
226	add	%rax,$lo0		# discarded
227	mov	8($ap),%rax
228	adc	\$0,%rdx
229	mov	8(%rsp),$lo0		# tp[1]
230	mov	%rdx,$hi1
231
232	lea	1($j),$j		# j++
233	jmp	.Linner_enter
234
235.align	16
236.Linner:
237	add	%rax,$hi1
238	mov	($ap,$j,8),%rax
239	adc	\$0,%rdx
240	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
241	mov	(%rsp,$j,8),$lo0
242	adc	\$0,%rdx
243	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
244	mov	%rdx,$hi1
245
246.Linner_enter:
247	mulq	$m0			# ap[j]*bp[i]
248	add	%rax,$hi0
249	mov	($np,$j,8),%rax
250	adc	\$0,%rdx
251	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
252	mov	%rdx,$hi0
253	adc	\$0,$hi0
254	lea	1($j),$j		# j++
255
256	mulq	$m1			# np[j]*m1
257	cmp	$num,$j
258	jne	.Linner
259
260	add	%rax,$hi1
261	mov	($ap),%rax		# ap[0]
262	adc	\$0,%rdx
263	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
264	mov	(%rsp,$j,8),$lo0
265	adc	\$0,%rdx
266	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
267	mov	%rdx,$hi1
268
269	xor	%rdx,%rdx
270	add	$hi0,$hi1
271	adc	\$0,%rdx
272	add	$lo0,$hi1		# pull upmost overflow bit
273	adc	\$0,%rdx
274	mov	$hi1,-8(%rsp,$num,8)
275	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
276
277	lea	1($i),$i		# i++
278	cmp	$num,$i
279	jb	.Louter
280
281	xor	$i,$i			# i=0 and clear CF!
282	mov	(%rsp),%rax		# tp[0]
283	mov	$num,$j			# j=num
284
285.align	16
286.Lsub:	sbb	($np,$i,8),%rax
287	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
288	mov	8(%rsp,$i,8),%rax	# tp[i+1]
289	lea	1($i),$i		# i++
290	dec	$j			# doesn't affect CF!
291	jnz	.Lsub
292
293	sbb	\$0,%rax		# handle upmost overflow bit
294	mov	\$-1,%rbx
295	xor	%rax,%rbx		# not %rax
296	xor	$i,$i
297	mov	$num,$j			# j=num
298
299.Lcopy:					# conditional copy
300	mov	($rp,$i,8),%rcx
301	mov	(%rsp,$i,8),%rdx
302	and	%rbx,%rcx
303	and	%rax,%rdx
304	mov	$num,(%rsp,$i,8)	# zap temporary vector
305	or	%rcx,%rdx
306	mov	%rdx,($rp,$i,8)		# rp[i]=tp[i]
307	lea	1($i),$i
308	sub	\$1,$j
309	jnz	.Lcopy
310
311	mov	8(%rsp,$num,8),%rsi	# restore %rsp
312.cfi_def_cfa	%rsi,8
313	mov	\$1,%rax
314	mov	-48(%rsi),%r15
315.cfi_restore	%r15
316	mov	-40(%rsi),%r14
317.cfi_restore	%r14
318	mov	-32(%rsi),%r13
319.cfi_restore	%r13
320	mov	-24(%rsi),%r12
321.cfi_restore	%r12
322	mov	-16(%rsi),%rbp
323.cfi_restore	%rbp
324	mov	-8(%rsi),%rbx
325.cfi_restore	%rbx
326	lea	(%rsi),%rsp
327.cfi_def_cfa_register	%rsp
328.Lmul_epilogue:
329	ret
330.cfi_endproc
331.size	bn_mul_mont_nohw,.-bn_mul_mont_nohw
332___
333{{{
334my @A=("%r10","%r11");
335my @N=("%r13","%rdi");
336$code.=<<___;
337.globl	bn_mul4x_mont
338.type	bn_mul4x_mont,\@function,6
339.align	16
340bn_mul4x_mont:
341.cfi_startproc
342	_CET_ENDBR
343	mov	${num}d,${num}d
344	mov	%rsp,%rax
345.cfi_def_cfa_register	%rax
346	push	%rbx
347.cfi_push	%rbx
348	push	%rbp
349.cfi_push	%rbp
350	push	%r12
351.cfi_push	%r12
352	push	%r13
353.cfi_push	%r13
354	push	%r14
355.cfi_push	%r14
356	push	%r15
357.cfi_push	%r15
358
359	neg	$num
360	mov	%rsp,%r11
361	lea	-32(%rsp,$num,8),%r10	# future alloca(8*(num+4))
362	neg	$num			# restore
363	and	\$-1024,%r10		# minimize TLB usage
364
365	sub	%r10,%r11
366	and	\$-4096,%r11
367	lea	(%r10,%r11),%rsp
368	mov	(%rsp),%r11
369	cmp	%r10,%rsp
370	ja	.Lmul4x_page_walk
371	jmp	.Lmul4x_page_walk_done
372
373.Lmul4x_page_walk:
374	lea	-4096(%rsp),%rsp
375	mov	(%rsp),%r11
376	cmp	%r10,%rsp
377	ja	.Lmul4x_page_walk
378.Lmul4x_page_walk_done:
379
380	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
381.cfi_cfa_expression	%rsp+8,$num,8,mul,plus,deref,+8
382.Lmul4x_body:
383	mov	$rp,16(%rsp,$num,8)	# tp[num+2]=$rp
384	mov	%rdx,%r12		# reassign $bp
385___
386		$bp="%r12";
387$code.=<<___;
388	mov	($n0),$n0		# pull n0[0] value
389	mov	($bp),$m0		# m0=bp[0]
390	mov	($ap),%rax
391
392	xor	$i,$i			# i=0
393	xor	$j,$j			# j=0
394
395	mov	$n0,$m1
396	mulq	$m0			# ap[0]*bp[0]
397	mov	%rax,$A[0]
398	mov	($np),%rax
399
400	imulq	$A[0],$m1		# "tp[0]"*n0
401	mov	%rdx,$A[1]
402
403	mulq	$m1			# np[0]*m1
404	add	%rax,$A[0]		# discarded
405	mov	8($ap),%rax
406	adc	\$0,%rdx
407	mov	%rdx,$N[1]
408
409	mulq	$m0
410	add	%rax,$A[1]
411	mov	8($np),%rax
412	adc	\$0,%rdx
413	mov	%rdx,$A[0]
414
415	mulq	$m1
416	add	%rax,$N[1]
417	mov	16($ap),%rax
418	adc	\$0,%rdx
419	add	$A[1],$N[1]
420	lea	4($j),$j		# j++
421	adc	\$0,%rdx
422	mov	$N[1],(%rsp)
423	mov	%rdx,$N[0]
424	jmp	.L1st4x
425.align	16
426.L1st4x:
427	mulq	$m0			# ap[j]*bp[0]
428	add	%rax,$A[0]
429	mov	-16($np,$j,8),%rax
430	adc	\$0,%rdx
431	mov	%rdx,$A[1]
432
433	mulq	$m1			# np[j]*m1
434	add	%rax,$N[0]
435	mov	-8($ap,$j,8),%rax
436	adc	\$0,%rdx
437	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
438	adc	\$0,%rdx
439	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
440	mov	%rdx,$N[1]
441
442	mulq	$m0			# ap[j]*bp[0]
443	add	%rax,$A[1]
444	mov	-8($np,$j,8),%rax
445	adc	\$0,%rdx
446	mov	%rdx,$A[0]
447
448	mulq	$m1			# np[j]*m1
449	add	%rax,$N[1]
450	mov	($ap,$j,8),%rax
451	adc	\$0,%rdx
452	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
453	adc	\$0,%rdx
454	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
455	mov	%rdx,$N[0]
456
457	mulq	$m0			# ap[j]*bp[0]
458	add	%rax,$A[0]
459	mov	($np,$j,8),%rax
460	adc	\$0,%rdx
461	mov	%rdx,$A[1]
462
463	mulq	$m1			# np[j]*m1
464	add	%rax,$N[0]
465	mov	8($ap,$j,8),%rax
466	adc	\$0,%rdx
467	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
468	adc	\$0,%rdx
469	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
470	mov	%rdx,$N[1]
471
472	mulq	$m0			# ap[j]*bp[0]
473	add	%rax,$A[1]
474	mov	8($np,$j,8),%rax
475	adc	\$0,%rdx
476	lea	4($j),$j		# j++
477	mov	%rdx,$A[0]
478
479	mulq	$m1			# np[j]*m1
480	add	%rax,$N[1]
481	mov	-16($ap,$j,8),%rax
482	adc	\$0,%rdx
483	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
484	adc	\$0,%rdx
485	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
486	mov	%rdx,$N[0]
487	cmp	$num,$j
488	jb	.L1st4x
489
490	mulq	$m0			# ap[j]*bp[0]
491	add	%rax,$A[0]
492	mov	-16($np,$j,8),%rax
493	adc	\$0,%rdx
494	mov	%rdx,$A[1]
495
496	mulq	$m1			# np[j]*m1
497	add	%rax,$N[0]
498	mov	-8($ap,$j,8),%rax
499	adc	\$0,%rdx
500	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
501	adc	\$0,%rdx
502	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
503	mov	%rdx,$N[1]
504
505	mulq	$m0			# ap[j]*bp[0]
506	add	%rax,$A[1]
507	mov	-8($np,$j,8),%rax
508	adc	\$0,%rdx
509	mov	%rdx,$A[0]
510
511	mulq	$m1			# np[j]*m1
512	add	%rax,$N[1]
513	mov	($ap),%rax		# ap[0]
514	adc	\$0,%rdx
515	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
516	adc	\$0,%rdx
517	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
518	mov	%rdx,$N[0]
519
520	xor	$N[1],$N[1]
521	add	$A[0],$N[0]
522	adc	\$0,$N[1]
523	mov	$N[0],-8(%rsp,$j,8)
524	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
525
526	lea	1($i),$i		# i++
527.align	4
528.Louter4x:
529	mov	($bp,$i,8),$m0		# m0=bp[i]
530	xor	$j,$j			# j=0
531	mov	(%rsp),$A[0]
532	mov	$n0,$m1
533	mulq	$m0			# ap[0]*bp[i]
534	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
535	mov	($np),%rax
536	adc	\$0,%rdx
537
538	imulq	$A[0],$m1		# tp[0]*n0
539	mov	%rdx,$A[1]
540
541	mulq	$m1			# np[0]*m1
542	add	%rax,$A[0]		# "$N[0]", discarded
543	mov	8($ap),%rax
544	adc	\$0,%rdx
545	mov	%rdx,$N[1]
546
547	mulq	$m0			# ap[j]*bp[i]
548	add	%rax,$A[1]
549	mov	8($np),%rax
550	adc	\$0,%rdx
551	add	8(%rsp),$A[1]		# +tp[1]
552	adc	\$0,%rdx
553	mov	%rdx,$A[0]
554
555	mulq	$m1			# np[j]*m1
556	add	%rax,$N[1]
557	mov	16($ap),%rax
558	adc	\$0,%rdx
559	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
560	lea	4($j),$j		# j+=2
561	adc	\$0,%rdx
562	mov	$N[1],(%rsp)		# tp[j-1]
563	mov	%rdx,$N[0]
564	jmp	.Linner4x
565.align	16
566.Linner4x:
567	mulq	$m0			# ap[j]*bp[i]
568	add	%rax,$A[0]
569	mov	-16($np,$j,8),%rax
570	adc	\$0,%rdx
571	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
572	adc	\$0,%rdx
573	mov	%rdx,$A[1]
574
575	mulq	$m1			# np[j]*m1
576	add	%rax,$N[0]
577	mov	-8($ap,$j,8),%rax
578	adc	\$0,%rdx
579	add	$A[0],$N[0]
580	adc	\$0,%rdx
581	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
582	mov	%rdx,$N[1]
583
584	mulq	$m0			# ap[j]*bp[i]
585	add	%rax,$A[1]
586	mov	-8($np,$j,8),%rax
587	adc	\$0,%rdx
588	add	-8(%rsp,$j,8),$A[1]
589	adc	\$0,%rdx
590	mov	%rdx,$A[0]
591
592	mulq	$m1			# np[j]*m1
593	add	%rax,$N[1]
594	mov	($ap,$j,8),%rax
595	adc	\$0,%rdx
596	add	$A[1],$N[1]
597	adc	\$0,%rdx
598	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
599	mov	%rdx,$N[0]
600
601	mulq	$m0			# ap[j]*bp[i]
602	add	%rax,$A[0]
603	mov	($np,$j,8),%rax
604	adc	\$0,%rdx
605	add	(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
606	adc	\$0,%rdx
607	mov	%rdx,$A[1]
608
609	mulq	$m1			# np[j]*m1
610	add	%rax,$N[0]
611	mov	8($ap,$j,8),%rax
612	adc	\$0,%rdx
613	add	$A[0],$N[0]
614	adc	\$0,%rdx
615	mov	$N[0],-8(%rsp,$j,8)	# tp[j-1]
616	mov	%rdx,$N[1]
617
618	mulq	$m0			# ap[j]*bp[i]
619	add	%rax,$A[1]
620	mov	8($np,$j,8),%rax
621	adc	\$0,%rdx
622	add	8(%rsp,$j,8),$A[1]
623	adc	\$0,%rdx
624	lea	4($j),$j		# j++
625	mov	%rdx,$A[0]
626
627	mulq	$m1			# np[j]*m1
628	add	%rax,$N[1]
629	mov	-16($ap,$j,8),%rax
630	adc	\$0,%rdx
631	add	$A[1],$N[1]
632	adc	\$0,%rdx
633	mov	$N[1],-32(%rsp,$j,8)	# tp[j-1]
634	mov	%rdx,$N[0]
635	cmp	$num,$j
636	jb	.Linner4x
637
638	mulq	$m0			# ap[j]*bp[i]
639	add	%rax,$A[0]
640	mov	-16($np,$j,8),%rax
641	adc	\$0,%rdx
642	add	-16(%rsp,$j,8),$A[0]	# ap[j]*bp[i]+tp[j]
643	adc	\$0,%rdx
644	mov	%rdx,$A[1]
645
646	mulq	$m1			# np[j]*m1
647	add	%rax,$N[0]
648	mov	-8($ap,$j,8),%rax
649	adc	\$0,%rdx
650	add	$A[0],$N[0]
651	adc	\$0,%rdx
652	mov	$N[0],-24(%rsp,$j,8)	# tp[j-1]
653	mov	%rdx,$N[1]
654
655	mulq	$m0			# ap[j]*bp[i]
656	add	%rax,$A[1]
657	mov	-8($np,$j,8),%rax
658	adc	\$0,%rdx
659	add	-8(%rsp,$j,8),$A[1]
660	adc	\$0,%rdx
661	lea	1($i),$i		# i++
662	mov	%rdx,$A[0]
663
664	mulq	$m1			# np[j]*m1
665	add	%rax,$N[1]
666	mov	($ap),%rax		# ap[0]
667	adc	\$0,%rdx
668	add	$A[1],$N[1]
669	adc	\$0,%rdx
670	mov	$N[1],-16(%rsp,$j,8)	# tp[j-1]
671	mov	%rdx,$N[0]
672
673	xor	$N[1],$N[1]
674	add	$A[0],$N[0]
675	adc	\$0,$N[1]
676	add	(%rsp,$num,8),$N[0]	# pull upmost overflow bit
677	adc	\$0,$N[1]
678	mov	$N[0],-8(%rsp,$j,8)
679	mov	$N[1],(%rsp,$j,8)	# store upmost overflow bit
680
681	cmp	$num,$i
682	jb	.Louter4x
683___
684{
685my @ri=("%rax","%rdx",$m0,$m1);
686$code.=<<___;
687	mov	16(%rsp,$num,8),$rp	# restore $rp
688	lea	-4($num),$j
689	mov	0(%rsp),@ri[0]		# tp[0]
690	mov	8(%rsp),@ri[1]		# tp[1]
691	shr	\$2,$j			# j=num/4-1
692	lea	(%rsp),$ap		# borrow ap for tp
693	xor	$i,$i			# i=0 and clear CF!
694
695	sub	0($np),@ri[0]
696	mov	16($ap),@ri[2]		# tp[2]
697	mov	24($ap),@ri[3]		# tp[3]
698	sbb	8($np),@ri[1]
699
700.Lsub4x:
701	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
702	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
703	sbb	16($np,$i,8),@ri[2]
704	mov	32($ap,$i,8),@ri[0]	# tp[i+1]
705	mov	40($ap,$i,8),@ri[1]
706	sbb	24($np,$i,8),@ri[3]
707	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
708	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
709	sbb	32($np,$i,8),@ri[0]
710	mov	48($ap,$i,8),@ri[2]
711	mov	56($ap,$i,8),@ri[3]
712	sbb	40($np,$i,8),@ri[1]
713	lea	4($i),$i		# i++
714	dec	$j			# doesn't affect CF!
715	jnz	.Lsub4x
716
717	mov	@ri[0],0($rp,$i,8)	# rp[i]=tp[i]-np[i]
718	mov	32($ap,$i,8),@ri[0]	# load overflow bit
719	sbb	16($np,$i,8),@ri[2]
720	mov	@ri[1],8($rp,$i,8)	# rp[i]=tp[i]-np[i]
721	sbb	24($np,$i,8),@ri[3]
722	mov	@ri[2],16($rp,$i,8)	# rp[i]=tp[i]-np[i]
723
724	sbb	\$0,@ri[0]		# handle upmost overflow bit
725	mov	@ri[3],24($rp,$i,8)	# rp[i]=tp[i]-np[i]
726	pxor	%xmm0,%xmm0
727	movq	@ri[0],%xmm4
728	pcmpeqd	%xmm5,%xmm5
729	pshufd	\$0,%xmm4,%xmm4
730	mov	$num,$j
731	pxor	%xmm4,%xmm5
732	shr	\$2,$j			# j=num/4
733	xor	%eax,%eax		# i=0
734
735	jmp	.Lcopy4x
736.align	16
737.Lcopy4x:				# conditional copy
738	movdqa	(%rsp,%rax),%xmm1
739	movdqu	($rp,%rax),%xmm2
740	pand	%xmm4,%xmm1
741	pand	%xmm5,%xmm2
742	movdqa	16(%rsp,%rax),%xmm3
743	movdqa	%xmm0,(%rsp,%rax)
744	por	%xmm2,%xmm1
745	movdqu	16($rp,%rax),%xmm2
746	movdqu	%xmm1,($rp,%rax)
747	pand	%xmm4,%xmm3
748	pand	%xmm5,%xmm2
749	movdqa	%xmm0,16(%rsp,%rax)
750	por	%xmm2,%xmm3
751	movdqu	%xmm3,16($rp,%rax)
752	lea	32(%rax),%rax
753	dec	$j
754	jnz	.Lcopy4x
755___
756}
757$code.=<<___;
758	mov	8(%rsp,$num,8),%rsi	# restore %rsp
759.cfi_def_cfa	%rsi, 8
760	mov	\$1,%rax
761	mov	-48(%rsi),%r15
762.cfi_restore	%r15
763	mov	-40(%rsi),%r14
764.cfi_restore	%r14
765	mov	-32(%rsi),%r13
766.cfi_restore	%r13
767	mov	-24(%rsi),%r12
768.cfi_restore	%r12
769	mov	-16(%rsi),%rbp
770.cfi_restore	%rbp
771	mov	-8(%rsi),%rbx
772.cfi_restore	%rbx
773	lea	(%rsi),%rsp
774.cfi_def_cfa_register	%rsp
775.Lmul4x_epilogue:
776	ret
777.cfi_endproc
778.size	bn_mul4x_mont,.-bn_mul4x_mont
779___
780}}}
781{{{
782######################################################################
783# int bn_sqr8x_mont(
784my $rptr="%rdi";	# const BN_ULONG *rptr,
785my $aptr="%rsi";	# const BN_ULONG *aptr,
786my $mulx_adx_capable="%rdx"; # Different than upstream!
787my $nptr="%rcx";	# const BN_ULONG *nptr,
788my $n0  ="%r8";		# const BN_ULONG *n0);
789my $num ="%r9";		# int num, has to be divisible by 8
790
791my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
792my @A0=("%r10","%r11");
793my @A1=("%r12","%r13");
794my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
795
796$code.=<<___	if ($addx);
797.extern	bn_sqrx8x_internal		# see x86_64-mont5 module
798___
799$code.=<<___;
800.extern	bn_sqr8x_internal		# see x86_64-mont5 module
801
802.globl	bn_sqr8x_mont
803.type	bn_sqr8x_mont,\@function,6
804.align	32
805bn_sqr8x_mont:
806.cfi_startproc
807	_CET_ENDBR
808	mov	${num}d,${num}d
809	mov	%rsp,%rax
810.cfi_def_cfa_register	%rax
811	push	%rbx
812.cfi_push	%rbx
813	push	%rbp
814.cfi_push	%rbp
815	push	%r12
816.cfi_push	%r12
817	push	%r13
818.cfi_push	%r13
819	push	%r14
820.cfi_push	%r14
821	push	%r15
822.cfi_push	%r15
823.Lsqr8x_prologue:
824
825	mov	${num}d,%r10d
826	shl	\$3,${num}d		# convert $num to bytes
827	shl	\$3+2,%r10		# 4*$num
828	neg	$num
829
830	##############################################################
831	# ensure that stack frame doesn't alias with $aptr modulo
832	# 4096. this is done to allow memory disambiguation logic
833	# do its job.
834	#
835	lea	-64(%rsp,$num,2),%r11
836	mov	%rsp,%rbp
837	mov	($n0),$n0		# *n0
838	sub	$aptr,%r11
839	and	\$4095,%r11
840	cmp	%r11,%r10
841	jb	.Lsqr8x_sp_alt
842	sub	%r11,%rbp		# align with $aptr
843	lea	-64(%rbp,$num,2),%rbp	# future alloca(frame+2*$num)
844	jmp	.Lsqr8x_sp_done
845
846.align	32
847.Lsqr8x_sp_alt:
848	lea	4096-64(,$num,2),%r10	# 4096-frame-2*$num
849	lea	-64(%rbp,$num,2),%rbp	# future alloca(frame+2*$num)
850	sub	%r10,%r11
851	mov	\$0,%r10
852	cmovc	%r10,%r11
853	sub	%r11,%rbp
854.Lsqr8x_sp_done:
855	and	\$-64,%rbp
856	mov	%rsp,%r11
857	sub	%rbp,%r11
858	and	\$-4096,%r11
859	lea	(%rbp,%r11),%rsp
860	mov	(%rsp),%r10
861	cmp	%rbp,%rsp
862	ja	.Lsqr8x_page_walk
863	jmp	.Lsqr8x_page_walk_done
864
865.align	16
866.Lsqr8x_page_walk:
867	lea	-4096(%rsp),%rsp
868	mov	(%rsp),%r10
869	cmp	%rbp,%rsp
870	ja	.Lsqr8x_page_walk
871.Lsqr8x_page_walk_done:
872
873	mov	$num,%r10
874	neg	$num
875
876	mov	$n0,  32(%rsp)
877	mov	%rax, 40(%rsp)		# save original %rsp
878.cfi_cfa_expression	%rsp+40,deref,+8
879.Lsqr8x_body:
880
881	movq	$nptr, %xmm2		# save pointer to modulus
882	pxor	%xmm0,%xmm0
883	movq	$rptr,%xmm1		# save $rptr
884	movq	%r10, %xmm3		# -$num
885___
886$code.=<<___ if ($addx);
887	test	$mulx_adx_capable,$mulx_adx_capable
888	jz	.Lsqr8x_nox
889
890	call	bn_sqrx8x_internal	# see x86_64-mont5 module
891					# %rax	top-most carry
892					# %rbp	nptr
893					# %rcx	-8*num
894					# %r8	end of tp[2*num]
895	lea	(%r8,%rcx),%rbx
896	mov	%rcx,$num
897	mov	%rcx,%rdx
898	movq	%xmm1,$rptr
899	sar	\$3+2,%rcx		# %cf=0
900	jmp	.Lsqr8x_sub
901
902.align	32
903.Lsqr8x_nox:
904___
905$code.=<<___;
906	call	bn_sqr8x_internal	# see x86_64-mont5 module
907					# %rax	top-most carry
908					# %rbp	nptr
909					# %r8	-8*num
910					# %rdi	end of tp[2*num]
911	lea	(%rdi,$num),%rbx
912	mov	$num,%rcx
913	mov	$num,%rdx
914	movq	%xmm1,$rptr
915	sar	\$3+2,%rcx		# %cf=0
916	jmp	.Lsqr8x_sub
917
918.align	32
919.Lsqr8x_sub:
920	mov	8*0(%rbx),%r12
921	mov	8*1(%rbx),%r13
922	mov	8*2(%rbx),%r14
923	mov	8*3(%rbx),%r15
924	lea	8*4(%rbx),%rbx
925	sbb	8*0(%rbp),%r12
926	sbb	8*1(%rbp),%r13
927	sbb	8*2(%rbp),%r14
928	sbb	8*3(%rbp),%r15
929	lea	8*4(%rbp),%rbp
930	mov	%r12,8*0($rptr)
931	mov	%r13,8*1($rptr)
932	mov	%r14,8*2($rptr)
933	mov	%r15,8*3($rptr)
934	lea	8*4($rptr),$rptr
935	inc	%rcx			# preserves %cf
936	jnz	.Lsqr8x_sub
937
938	sbb	\$0,%rax		# top-most carry
939	lea	(%rbx,$num),%rbx	# rewind
940	lea	($rptr,$num),$rptr	# rewind
941
942	movq	%rax,%xmm1
943	pxor	%xmm0,%xmm0
944	pshufd	\$0,%xmm1,%xmm1
945	mov	40(%rsp),%rsi		# restore %rsp
946.cfi_def_cfa	%rsi,8
947	jmp	.Lsqr8x_cond_copy
948
949.align	32
950.Lsqr8x_cond_copy:
951	movdqa	16*0(%rbx),%xmm2
952	movdqa	16*1(%rbx),%xmm3
953	lea	16*2(%rbx),%rbx
954	movdqu	16*0($rptr),%xmm4
955	movdqu	16*1($rptr),%xmm5
956	lea	16*2($rptr),$rptr
957	movdqa	%xmm0,-16*2(%rbx)	# zero tp
958	movdqa	%xmm0,-16*1(%rbx)
959	movdqa	%xmm0,-16*2(%rbx,%rdx)
960	movdqa	%xmm0,-16*1(%rbx,%rdx)
961	pcmpeqd	%xmm1,%xmm0
962	pand	%xmm1,%xmm2
963	pand	%xmm1,%xmm3
964	pand	%xmm0,%xmm4
965	pand	%xmm0,%xmm5
966	pxor	%xmm0,%xmm0
967	por	%xmm2,%xmm4
968	por	%xmm3,%xmm5
969	movdqu	%xmm4,-16*2($rptr)
970	movdqu	%xmm5,-16*1($rptr)
971	add	\$32,$num
972	jnz	.Lsqr8x_cond_copy
973
974	mov	\$1,%rax
975	mov	-48(%rsi),%r15
976.cfi_restore	%r15
977	mov	-40(%rsi),%r14
978.cfi_restore	%r14
979	mov	-32(%rsi),%r13
980.cfi_restore	%r13
981	mov	-24(%rsi),%r12
982.cfi_restore	%r12
983	mov	-16(%rsi),%rbp
984.cfi_restore	%rbp
985	mov	-8(%rsi),%rbx
986.cfi_restore	%rbx
987	lea	(%rsi),%rsp
988.cfi_def_cfa_register	%rsp
989.Lsqr8x_epilogue:
990	ret
991.cfi_endproc
992.size	bn_sqr8x_mont,.-bn_sqr8x_mont
993___
994}}}
995
996if ($addx) {{{
997my $bp="%rdx";	# original value
998
999$code.=<<___;
1000.globl	bn_mulx4x_mont
1001.type	bn_mulx4x_mont,\@function,6
1002.align	32
1003bn_mulx4x_mont:
1004.cfi_startproc
1005	_CET_ENDBR
1006	mov	%rsp,%rax
1007.cfi_def_cfa_register	%rax
1008	push	%rbx
1009.cfi_push	%rbx
1010	push	%rbp
1011.cfi_push	%rbp
1012	push	%r12
1013.cfi_push	%r12
1014	push	%r13
1015.cfi_push	%r13
1016	push	%r14
1017.cfi_push	%r14
1018	push	%r15
1019.cfi_push	%r15
1020.Lmulx4x_prologue:
1021
1022	shl	\$3,${num}d		# convert $num to bytes
1023	xor	%r10,%r10
1024	sub	$num,%r10		# -$num
1025	mov	($n0),$n0		# *n0
1026	lea	-72(%rsp,%r10),%rbp	# future alloca(frame+$num+8)
1027	and	\$-128,%rbp
1028	mov	%rsp,%r11
1029	sub	%rbp,%r11
1030	and	\$-4096,%r11
1031	lea	(%rbp,%r11),%rsp
1032	mov	(%rsp),%r10
1033	cmp	%rbp,%rsp
1034	ja	.Lmulx4x_page_walk
1035	jmp	.Lmulx4x_page_walk_done
1036
1037.align	16
1038.Lmulx4x_page_walk:
1039	lea	-4096(%rsp),%rsp
1040	mov	(%rsp),%r10
1041	cmp	%rbp,%rsp
1042	ja	.Lmulx4x_page_walk
1043.Lmulx4x_page_walk_done:
1044
1045	lea	($bp,$num),%r10
1046	##############################################################
1047	# Stack layout
1048	# +0	num
1049	# +8	off-loaded &b[i]
1050	# +16	end of b[num]
1051	# +24	saved n0
1052	# +32	saved rp
1053	# +40	saved %rsp
1054	# +48	inner counter
1055	# +56
1056	# +64	tmp[num+1]
1057	#
1058	mov	$num,0(%rsp)		# save $num
1059	shr	\$5,$num
1060	mov	%r10,16(%rsp)		# end of b[num]
1061	sub	\$1,$num
1062	mov	$n0, 24(%rsp)		# save *n0
1063	mov	$rp, 32(%rsp)		# save $rp
1064	mov	%rax,40(%rsp)		# save original %rsp
1065.cfi_cfa_expression	%rsp+40,deref,+8
1066	mov	$num,48(%rsp)		# inner counter
1067	jmp	.Lmulx4x_body
1068
1069.align	32
1070.Lmulx4x_body:
1071___
1072my ($aptr, $bptr, $nptr, $tptr, $mi,  $bi,  $zero, $num)=
1073   ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
1074my $rptr=$bptr;
1075$code.=<<___;
1076	lea	8($bp),$bptr
1077	mov	($bp),%rdx		# b[0], $bp==%rdx actually
1078	lea	64+32(%rsp),$tptr
1079	mov	%rdx,$bi
1080
1081	mulx	0*8($aptr),$mi,%rax	# a[0]*b[0]
1082	mulx	1*8($aptr),%r11,%r14	# a[1]*b[0]
1083	add	%rax,%r11
1084	mov	$bptr,8(%rsp)		# off-load &b[i]
1085	mulx	2*8($aptr),%r12,%r13	# ...
1086	adc	%r14,%r12
1087	adc	\$0,%r13
1088
1089	mov	$mi,$bptr		# borrow $bptr
1090	imulq	24(%rsp),$mi		# "t[0]"*n0
1091	xor	$zero,$zero		# cf=0, of=0
1092
1093	mulx	3*8($aptr),%rax,%r14
1094	 mov	$mi,%rdx
1095	lea	4*8($aptr),$aptr
1096	adcx	%rax,%r13
1097	adcx	$zero,%r14		# cf=0
1098
1099	mulx	0*8($nptr),%rax,%r10
1100	adcx	%rax,$bptr		# discarded
1101	adox	%r11,%r10
1102	mulx	1*8($nptr),%rax,%r11
1103	adcx	%rax,%r10
1104	adox	%r12,%r11
1105	.byte	0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00	# mulx	2*8($nptr),%rax,%r12
1106	mov	48(%rsp),$bptr		# counter value
1107	mov	%r10,-4*8($tptr)
1108	adcx	%rax,%r11
1109	adox	%r13,%r12
1110	mulx	3*8($nptr),%rax,%r15
1111	 mov	$bi,%rdx
1112	mov	%r11,-3*8($tptr)
1113	adcx	%rax,%r12
1114	adox	$zero,%r15		# of=0
1115	lea	4*8($nptr),$nptr
1116	mov	%r12,-2*8($tptr)
1117
1118	jmp	.Lmulx4x_1st
1119
1120.align	32
1121.Lmulx4x_1st:
1122	adcx	$zero,%r15		# cf=0, modulo-scheduled
1123	mulx	0*8($aptr),%r10,%rax	# a[4]*b[0]
1124	adcx	%r14,%r10
1125	mulx	1*8($aptr),%r11,%r14	# a[5]*b[0]
1126	adcx	%rax,%r11
1127	mulx	2*8($aptr),%r12,%rax	# ...
1128	adcx	%r14,%r12
1129	mulx	3*8($aptr),%r13,%r14
1130	 .byte	0x67,0x67
1131	 mov	$mi,%rdx
1132	adcx	%rax,%r13
1133	adcx	$zero,%r14		# cf=0
1134	lea	4*8($aptr),$aptr
1135	lea	4*8($tptr),$tptr
1136
1137	adox	%r15,%r10
1138	mulx	0*8($nptr),%rax,%r15
1139	adcx	%rax,%r10
1140	adox	%r15,%r11
1141	mulx	1*8($nptr),%rax,%r15
1142	adcx	%rax,%r11
1143	adox	%r15,%r12
1144	mulx	2*8($nptr),%rax,%r15
1145	mov	%r10,-5*8($tptr)
1146	adcx	%rax,%r12
1147	mov	%r11,-4*8($tptr)
1148	adox	%r15,%r13
1149	mulx	3*8($nptr),%rax,%r15
1150	 mov	$bi,%rdx
1151	mov	%r12,-3*8($tptr)
1152	adcx	%rax,%r13
1153	adox	$zero,%r15
1154	lea	4*8($nptr),$nptr
1155	mov	%r13,-2*8($tptr)
1156
1157	dec	$bptr			# of=0, pass cf
1158	jnz	.Lmulx4x_1st
1159
1160	mov	0(%rsp),$num		# load num
1161	mov	8(%rsp),$bptr		# re-load &b[i]
1162	adc	$zero,%r15		# modulo-scheduled
1163	add	%r15,%r14
1164	sbb	%r15,%r15		# top-most carry
1165	mov	%r14,-1*8($tptr)
1166	jmp	.Lmulx4x_outer
1167
1168.align	32
1169.Lmulx4x_outer:
1170	mov	($bptr),%rdx		# b[i]
1171	lea	8($bptr),$bptr		# b++
1172	sub	$num,$aptr		# rewind $aptr
1173	mov	%r15,($tptr)		# save top-most carry
1174	lea	64+4*8(%rsp),$tptr
1175	sub	$num,$nptr		# rewind $nptr
1176
1177	mulx	0*8($aptr),$mi,%r11	# a[0]*b[i]
1178	xor	%ebp,%ebp		# xor	$zero,$zero	# cf=0, of=0
1179	mov	%rdx,$bi
1180	mulx	1*8($aptr),%r14,%r12	# a[1]*b[i]
1181	adox	-4*8($tptr),$mi
1182	adcx	%r14,%r11
1183	mulx	2*8($aptr),%r15,%r13	# ...
1184	adox	-3*8($tptr),%r11
1185	adcx	%r15,%r12
1186	adox	-2*8($tptr),%r12
1187	adcx	$zero,%r13
1188	adox	$zero,%r13
1189
1190	mov	$bptr,8(%rsp)		# off-load &b[i]
1191	mov	$mi,%r15
1192	imulq	24(%rsp),$mi		# "t[0]"*n0
1193	xor	%ebp,%ebp		# xor	$zero,$zero	# cf=0, of=0
1194
1195	mulx	3*8($aptr),%rax,%r14
1196	 mov	$mi,%rdx
1197	adcx	%rax,%r13
1198	adox	-1*8($tptr),%r13
1199	adcx	$zero,%r14
1200	lea	4*8($aptr),$aptr
1201	adox	$zero,%r14
1202
1203	mulx	0*8($nptr),%rax,%r10
1204	adcx	%rax,%r15		# discarded
1205	adox	%r11,%r10
1206	mulx	1*8($nptr),%rax,%r11
1207	adcx	%rax,%r10
1208	adox	%r12,%r11
1209	mulx	2*8($nptr),%rax,%r12
1210	mov	%r10,-4*8($tptr)
1211	adcx	%rax,%r11
1212	adox	%r13,%r12
1213	mulx	3*8($nptr),%rax,%r15
1214	 mov	$bi,%rdx
1215	mov	%r11,-3*8($tptr)
1216	lea	4*8($nptr),$nptr
1217	adcx	%rax,%r12
1218	adox	$zero,%r15		# of=0
1219	mov	48(%rsp),$bptr		# counter value
1220	mov	%r12,-2*8($tptr)
1221
1222	jmp	.Lmulx4x_inner
1223
1224.align	32
1225.Lmulx4x_inner:
1226	mulx	0*8($aptr),%r10,%rax	# a[4]*b[i]
1227	adcx	$zero,%r15		# cf=0, modulo-scheduled
1228	adox	%r14,%r10
1229	mulx	1*8($aptr),%r11,%r14	# a[5]*b[i]
1230	adcx	0*8($tptr),%r10
1231	adox	%rax,%r11
1232	mulx	2*8($aptr),%r12,%rax	# ...
1233	adcx	1*8($tptr),%r11
1234	adox	%r14,%r12
1235	mulx	3*8($aptr),%r13,%r14
1236	 mov	$mi,%rdx
1237	adcx	2*8($tptr),%r12
1238	adox	%rax,%r13
1239	adcx	3*8($tptr),%r13
1240	adox	$zero,%r14		# of=0
1241	lea	4*8($aptr),$aptr
1242	lea	4*8($tptr),$tptr
1243	adcx	$zero,%r14		# cf=0
1244
1245	adox	%r15,%r10
1246	mulx	0*8($nptr),%rax,%r15
1247	adcx	%rax,%r10
1248	adox	%r15,%r11
1249	mulx	1*8($nptr),%rax,%r15
1250	adcx	%rax,%r11
1251	adox	%r15,%r12
1252	mulx	2*8($nptr),%rax,%r15
1253	mov	%r10,-5*8($tptr)
1254	adcx	%rax,%r12
1255	adox	%r15,%r13
1256	mulx	3*8($nptr),%rax,%r15
1257	 mov	$bi,%rdx
1258	mov	%r11,-4*8($tptr)
1259	mov	%r12,-3*8($tptr)
1260	adcx	%rax,%r13
1261	adox	$zero,%r15
1262	lea	4*8($nptr),$nptr
1263	mov	%r13,-2*8($tptr)
1264
1265	dec	$bptr			# of=0, pass cf
1266	jnz	.Lmulx4x_inner
1267
1268	mov	0(%rsp),$num		# load num
1269	mov	8(%rsp),$bptr		# re-load &b[i]
1270	adc	$zero,%r15		# modulo-scheduled
1271	sub	0*8($tptr),$zero	# pull top-most carry
1272	adc	%r15,%r14
1273	sbb	%r15,%r15		# top-most carry
1274	mov	%r14,-1*8($tptr)
1275
1276	cmp	16(%rsp),$bptr
1277	jne	.Lmulx4x_outer
1278
1279	lea	64(%rsp),$tptr
1280	sub	$num,$nptr		# rewind $nptr
1281	neg	%r15
1282	mov	$num,%rdx
1283	shr	\$3+2,$num		# %cf=0
1284	mov	32(%rsp),$rptr		# restore rp
1285	jmp	.Lmulx4x_sub
1286
1287.align	32
1288.Lmulx4x_sub:
1289	mov	8*0($tptr),%r11
1290	mov	8*1($tptr),%r12
1291	mov	8*2($tptr),%r13
1292	mov	8*3($tptr),%r14
1293	lea	8*4($tptr),$tptr
1294	sbb	8*0($nptr),%r11
1295	sbb	8*1($nptr),%r12
1296	sbb	8*2($nptr),%r13
1297	sbb	8*3($nptr),%r14
1298	lea	8*4($nptr),$nptr
1299	mov	%r11,8*0($rptr)
1300	mov	%r12,8*1($rptr)
1301	mov	%r13,8*2($rptr)
1302	mov	%r14,8*3($rptr)
1303	lea	8*4($rptr),$rptr
1304	dec	$num			# preserves %cf
1305	jnz	.Lmulx4x_sub
1306
1307	sbb	\$0,%r15		# top-most carry
1308	lea	64(%rsp),$tptr
1309	sub	%rdx,$rptr		# rewind
1310
1311	movq	%r15,%xmm1
1312	pxor	%xmm0,%xmm0
1313	pshufd	\$0,%xmm1,%xmm1
1314	mov	40(%rsp),%rsi		# restore %rsp
1315.cfi_def_cfa	%rsi,8
1316	jmp	.Lmulx4x_cond_copy
1317
1318.align	32
1319.Lmulx4x_cond_copy:
1320	movdqa	16*0($tptr),%xmm2
1321	movdqa	16*1($tptr),%xmm3
1322	lea	16*2($tptr),$tptr
1323	movdqu	16*0($rptr),%xmm4
1324	movdqu	16*1($rptr),%xmm5
1325	lea	16*2($rptr),$rptr
1326	movdqa	%xmm0,-16*2($tptr)	# zero tp
1327	movdqa	%xmm0,-16*1($tptr)
1328	pcmpeqd	%xmm1,%xmm0
1329	pand	%xmm1,%xmm2
1330	pand	%xmm1,%xmm3
1331	pand	%xmm0,%xmm4
1332	pand	%xmm0,%xmm5
1333	pxor	%xmm0,%xmm0
1334	por	%xmm2,%xmm4
1335	por	%xmm3,%xmm5
1336	movdqu	%xmm4,-16*2($rptr)
1337	movdqu	%xmm5,-16*1($rptr)
1338	sub	\$32,%rdx
1339	jnz	.Lmulx4x_cond_copy
1340
1341	mov	%rdx,($tptr)
1342
1343	mov	\$1,%rax
1344	mov	-48(%rsi),%r15
1345.cfi_restore	%r15
1346	mov	-40(%rsi),%r14
1347.cfi_restore	%r14
1348	mov	-32(%rsi),%r13
1349.cfi_restore	%r13
1350	mov	-24(%rsi),%r12
1351.cfi_restore	%r12
1352	mov	-16(%rsi),%rbp
1353.cfi_restore	%rbp
1354	mov	-8(%rsi),%rbx
1355.cfi_restore	%rbx
1356	lea	(%rsi),%rsp
1357.cfi_def_cfa_register	%rsp
1358.Lmulx4x_epilogue:
1359	ret
1360.cfi_endproc
1361.size	bn_mulx4x_mont,.-bn_mulx4x_mont
1362___
1363}}}
1364$code.=<<___;
1365.asciz	"Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1366.align	16
1367___
1368
1369# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1370#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
1371if ($win64) {
1372$rec="%rcx";
1373$frame="%rdx";
1374$context="%r8";
1375$disp="%r9";
1376
1377$code.=<<___;
1378.extern	__imp_RtlVirtualUnwind
1379.type	mul_handler,\@abi-omnipotent
1380.align	16
1381mul_handler:
1382	push	%rsi
1383	push	%rdi
1384	push	%rbx
1385	push	%rbp
1386	push	%r12
1387	push	%r13
1388	push	%r14
1389	push	%r15
1390	pushfq
1391	sub	\$64,%rsp
1392
1393	mov	120($context),%rax	# pull context->Rax
1394	mov	248($context),%rbx	# pull context->Rip
1395
1396	mov	8($disp),%rsi		# disp->ImageBase
1397	mov	56($disp),%r11		# disp->HandlerData
1398
1399	mov	0(%r11),%r10d		# HandlerData[0]
1400	lea	(%rsi,%r10),%r10	# end of prologue label
1401	cmp	%r10,%rbx		# context->Rip<end of prologue label
1402	jb	.Lcommon_seh_tail
1403
1404	mov	152($context),%rax	# pull context->Rsp
1405
1406	mov	4(%r11),%r10d		# HandlerData[1]
1407	lea	(%rsi,%r10),%r10	# epilogue label
1408	cmp	%r10,%rbx		# context->Rip>=epilogue label
1409	jae	.Lcommon_seh_tail
1410
1411	mov	192($context),%r10	# pull $num
1412	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
1413
1414	jmp	.Lcommon_pop_regs
1415.size	mul_handler,.-mul_handler
1416
1417.type	sqr_handler,\@abi-omnipotent
1418.align	16
1419sqr_handler:
1420	push	%rsi
1421	push	%rdi
1422	push	%rbx
1423	push	%rbp
1424	push	%r12
1425	push	%r13
1426	push	%r14
1427	push	%r15
1428	pushfq
1429	sub	\$64,%rsp
1430
1431	mov	120($context),%rax	# pull context->Rax
1432	mov	248($context),%rbx	# pull context->Rip
1433
1434	mov	8($disp),%rsi		# disp->ImageBase
1435	mov	56($disp),%r11		# disp->HandlerData
1436
1437	mov	0(%r11),%r10d		# HandlerData[0]
1438	lea	(%rsi,%r10),%r10	# end of prologue label
1439	cmp	%r10,%rbx		# context->Rip<.Lsqr_prologue
1440	jb	.Lcommon_seh_tail
1441
1442	mov	4(%r11),%r10d		# HandlerData[1]
1443	lea	(%rsi,%r10),%r10	# body label
1444	cmp	%r10,%rbx		# context->Rip<.Lsqr_body
1445	jb	.Lcommon_pop_regs
1446
1447	mov	152($context),%rax	# pull context->Rsp
1448
1449	mov	8(%r11),%r10d		# HandlerData[2]
1450	lea	(%rsi,%r10),%r10	# epilogue label
1451	cmp	%r10,%rbx		# context->Rip>=.Lsqr_epilogue
1452	jae	.Lcommon_seh_tail
1453
1454	mov	40(%rax),%rax		# pull saved stack pointer
1455
1456.Lcommon_pop_regs:
1457	mov	-8(%rax),%rbx
1458	mov	-16(%rax),%rbp
1459	mov	-24(%rax),%r12
1460	mov	-32(%rax),%r13
1461	mov	-40(%rax),%r14
1462	mov	-48(%rax),%r15
1463	mov	%rbx,144($context)	# restore context->Rbx
1464	mov	%rbp,160($context)	# restore context->Rbp
1465	mov	%r12,216($context)	# restore context->R12
1466	mov	%r13,224($context)	# restore context->R13
1467	mov	%r14,232($context)	# restore context->R14
1468	mov	%r15,240($context)	# restore context->R15
1469
1470.Lcommon_seh_tail:
1471	mov	8(%rax),%rdi
1472	mov	16(%rax),%rsi
1473	mov	%rax,152($context)	# restore context->Rsp
1474	mov	%rsi,168($context)	# restore context->Rsi
1475	mov	%rdi,176($context)	# restore context->Rdi
1476
1477	mov	40($disp),%rdi		# disp->ContextRecord
1478	mov	$context,%rsi		# context
1479	mov	\$154,%ecx		# sizeof(CONTEXT)
1480	.long	0xa548f3fc		# cld; rep movsq
1481
1482	mov	$disp,%rsi
1483	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
1484	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
1485	mov	0(%rsi),%r8		# arg3, disp->ControlPc
1486	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
1487	mov	40(%rsi),%r10		# disp->ContextRecord
1488	lea	56(%rsi),%r11		# &disp->HandlerData
1489	lea	24(%rsi),%r12		# &disp->EstablisherFrame
1490	mov	%r10,32(%rsp)		# arg5
1491	mov	%r11,40(%rsp)		# arg6
1492	mov	%r12,48(%rsp)		# arg7
1493	mov	%rcx,56(%rsp)		# arg8, (NULL)
1494	call	*__imp_RtlVirtualUnwind(%rip)
1495
1496	mov	\$1,%eax		# ExceptionContinueSearch
1497	add	\$64,%rsp
1498	popfq
1499	pop	%r15
1500	pop	%r14
1501	pop	%r13
1502	pop	%r12
1503	pop	%rbp
1504	pop	%rbx
1505	pop	%rdi
1506	pop	%rsi
1507	ret
1508.size	sqr_handler,.-sqr_handler
1509
1510.section	.pdata
1511.align	4
1512	.rva	.LSEH_begin_bn_mul_mont_nohw
1513	.rva	.LSEH_end_bn_mul_mont_nohw
1514	.rva	.LSEH_info_bn_mul_mont_nohw
1515
1516	.rva	.LSEH_begin_bn_mul4x_mont
1517	.rva	.LSEH_end_bn_mul4x_mont
1518	.rva	.LSEH_info_bn_mul4x_mont
1519
1520	.rva	.LSEH_begin_bn_sqr8x_mont
1521	.rva	.LSEH_end_bn_sqr8x_mont
1522	.rva	.LSEH_info_bn_sqr8x_mont
1523___
1524$code.=<<___ if ($addx);
1525	.rva	.LSEH_begin_bn_mulx4x_mont
1526	.rva	.LSEH_end_bn_mulx4x_mont
1527	.rva	.LSEH_info_bn_mulx4x_mont
1528___
1529$code.=<<___;
1530.section	.xdata
1531.align	8
1532.LSEH_info_bn_mul_mont_nohw:
1533	.byte	9,0,0,0
1534	.rva	mul_handler
1535	.rva	.Lmul_body,.Lmul_epilogue	# HandlerData[]
1536.LSEH_info_bn_mul4x_mont:
1537	.byte	9,0,0,0
1538	.rva	mul_handler
1539	.rva	.Lmul4x_body,.Lmul4x_epilogue	# HandlerData[]
1540.LSEH_info_bn_sqr8x_mont:
1541	.byte	9,0,0,0
1542	.rva	sqr_handler
1543	.rva	.Lsqr8x_prologue,.Lsqr8x_body,.Lsqr8x_epilogue		# HandlerData[]
1544.align	8
1545___
1546$code.=<<___ if ($addx);
1547.LSEH_info_bn_mulx4x_mont:
1548	.byte	9,0,0,0
1549	.rva	sqr_handler
1550	.rva	.Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue	# HandlerData[]
1551.align	8
1552___
1553}
1554
1555print $code;
1556close STDOUT or die "error closing STDOUT: $!";
1557