• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# August 2011.
18#
19# Companion to x86_64-mont.pl that optimizes cache-timing attack
20# countermeasures. The subroutines are produced by replacing bp[i]
21# references in their x86_64-mont.pl counterparts with cache-neutral
22# references to powers table computed in BN_mod_exp_mont_consttime.
23# In addition subroutine that scatters elements of the powers table
24# is implemented, so that scatter-/gathering can be tuned without
25# bn_exp.c modifications.
26
27# August 2013.
28#
29# Add MULX/AD*X code paths and additional interfaces to optimize for
30# branch prediction unit. For input lengths that are multiples of 8
31# the np argument is not just modulus value, but one interleaved
32# with 0. This is to optimize post-condition...
33
34$flavour = shift;
35$output  = shift;
36if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
37
38$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
39
40$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
41( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
42( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
43die "can't locate x86_64-xlate.pl";
44
45open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
46*STDOUT=*OUT;
47
48# In upstream, this is controlled by shelling out to the compiler to check
49# versions, but BoringSSL is intended to be used with pre-generated perlasm
50# output, so this isn't useful anyway.
51$addx = 1;
52
53# int bn_mul_mont_gather5_nohw(
54$rp="%rdi";	# BN_ULONG *rp,
55$ap="%rsi";	# const BN_ULONG *ap,
56$bp="%rdx";	# const BN_ULONG *bp,
57$np="%rcx";	# const BN_ULONG *np,
58$n0="%r8";	# const BN_ULONG *n0,
59$num="%r9";	# int num,
60		# int idx);	# 0 to 2^5-1, "index" in $bp holding
61				# pre-computed powers of a', interlaced
62				# in such manner that b[0] is $bp[idx],
63				# b[1] is [2^5+idx], etc.
64$lo0="%r10";
65$hi0="%r11";
66$hi1="%r13";
67$i="%r14";
68$j="%r15";
69$m0="%rbx";
70$m1="%rbp";
71
72$code=<<___;
73.text
74
75.globl	bn_mul_mont_gather5_nohw
76.type	bn_mul_mont_gather5_nohw,\@function,6
77.align	64
78bn_mul_mont_gather5_nohw:
79.cfi_startproc
80	_CET_ENDBR
81	# num is declared as an int, a 32-bit parameter, so the upper half is
82	# undefined. Zero the upper half to normalize it.
83	mov	${num}d,${num}d
84	mov	%rsp,%rax
85.cfi_def_cfa_register	%rax
86	movd	`($win64?56:8)`(%rsp),%xmm5	# load 7th argument
87	push	%rbx
88.cfi_push	%rbx
89	push	%rbp
90.cfi_push	%rbp
91	push	%r12
92.cfi_push	%r12
93	push	%r13
94.cfi_push	%r13
95	push	%r14
96.cfi_push	%r14
97	push	%r15
98.cfi_push	%r15
99
100	neg	$num
101	mov	%rsp,%r11
102	lea	-280(%rsp,$num,8),%r10	# future alloca(8*(num+2)+256+8)
103	neg	$num			# restore $num
104	and	\$-1024,%r10		# minimize TLB usage
105
106	# An OS-agnostic version of __chkstk.
107	#
108	# Some OSes (Windows) insist on stack being "wired" to
109	# physical memory in strictly sequential manner, i.e. if stack
110	# allocation spans two pages, then reference to farmost one can
111	# be punishable by SEGV. But page walking can do good even on
112	# other OSes, because it guarantees that villain thread hits
113	# the guard page before it can make damage to innocent one...
114	sub	%r10,%r11
115	and	\$-4096,%r11
116	lea	(%r10,%r11),%rsp
117	mov	(%rsp),%r11
118	cmp	%r10,%rsp
119	ja	.Lmul_page_walk
120	jmp	.Lmul_page_walk_done
121
122.Lmul_page_walk:
123	lea	-4096(%rsp),%rsp
124	mov	(%rsp),%r11
125	cmp	%r10,%rsp
126	ja	.Lmul_page_walk
127.Lmul_page_walk_done:
128
129	lea	.Linc(%rip),%r10
130	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
131.cfi_cfa_expression	%rsp+8,$num,8,mul,plus,deref,+8
132.Lmul_body:
133
134	lea	128($bp),%r12		# reassign $bp (+size optimization)
135___
136		$bp="%r12";
137		$STRIDE=2**5*8;		# 5 is "window size"
138		$N=$STRIDE/4;		# should match cache line size
139$code.=<<___;
140	movdqa	0(%r10),%xmm0		# 00000001000000010000000000000000
141	movdqa	16(%r10),%xmm1		# 00000002000000020000000200000002
142	lea	24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
143	and	\$-16,%r10
144
145	pshufd	\$0,%xmm5,%xmm5		# broadcast index
146	movdqa	%xmm1,%xmm4
147	movdqa	%xmm1,%xmm2
148___
149########################################################################
150# Calculate masks by comparing 0..31 to $idx and save result to stack.
151#
152# We compute sixteen 16-byte masks and store them on the stack. Mask i is stored
153# in `16*i - 128`(%rax) and contains the comparisons for idx == 2*i and
154# idx == 2*i + 1 in its lower and upper halves, respectively. Mask calculations
155# are scheduled in groups of four.
156$code.=<<___;
157	paddd	%xmm0,%xmm1
158	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
159	.byte	0x67
160	movdqa	%xmm4,%xmm3
161___
162for($k=0;$k<$STRIDE/16-4;$k+=4) {
163$code.=<<___;
164	paddd	%xmm1,%xmm2
165	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
166	movdqa	%xmm0,`16*($k+0)+112`(%r10)
167	movdqa	%xmm4,%xmm0
168
169	paddd	%xmm2,%xmm3
170	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
171	movdqa	%xmm1,`16*($k+1)+112`(%r10)
172	movdqa	%xmm4,%xmm1
173
174	paddd	%xmm3,%xmm0
175	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
176	movdqa	%xmm2,`16*($k+2)+112`(%r10)
177	movdqa	%xmm4,%xmm2
178
179	paddd	%xmm0,%xmm1
180	pcmpeqd	%xmm5,%xmm0
181	movdqa	%xmm3,`16*($k+3)+112`(%r10)
182	movdqa	%xmm4,%xmm3
183___
184}
185$code.=<<___;				# last iteration can be optimized
186	paddd	%xmm1,%xmm2
187	pcmpeqd	%xmm5,%xmm1
188	movdqa	%xmm0,`16*($k+0)+112`(%r10)
189
190	paddd	%xmm2,%xmm3
191	.byte	0x67
192	pcmpeqd	%xmm5,%xmm2
193	movdqa	%xmm1,`16*($k+1)+112`(%r10)
194
195	pcmpeqd	%xmm5,%xmm3
196	movdqa	%xmm2,`16*($k+2)+112`(%r10)
197	pand	`16*($k+0)-128`($bp),%xmm0	# while it's still in register
198
199	pand	`16*($k+1)-128`($bp),%xmm1
200	pand	`16*($k+2)-128`($bp),%xmm2
201	movdqa	%xmm3,`16*($k+3)+112`(%r10)
202	pand	`16*($k+3)-128`($bp),%xmm3
203	por	%xmm2,%xmm0
204	por	%xmm3,%xmm1
205___
206for($k=0;$k<$STRIDE/16-4;$k+=4) {
207$code.=<<___;
208	movdqa	`16*($k+0)-128`($bp),%xmm4
209	movdqa	`16*($k+1)-128`($bp),%xmm5
210	movdqa	`16*($k+2)-128`($bp),%xmm2
211	pand	`16*($k+0)+112`(%r10),%xmm4
212	movdqa	`16*($k+3)-128`($bp),%xmm3
213	pand	`16*($k+1)+112`(%r10),%xmm5
214	por	%xmm4,%xmm0
215	pand	`16*($k+2)+112`(%r10),%xmm2
216	por	%xmm5,%xmm1
217	pand	`16*($k+3)+112`(%r10),%xmm3
218	por	%xmm2,%xmm0
219	por	%xmm3,%xmm1
220___
221}
222$code.=<<___;
223	por	%xmm1,%xmm0
224	# Combine the upper and lower halves of %xmm0.
225	pshufd	\$0x4e,%xmm0,%xmm1	# Swap upper and lower halves.
226	por	%xmm1,%xmm0
227	lea	$STRIDE($bp),$bp
228	movq	%xmm0,$m0		# m0=bp[0]
229
230	mov	($n0),$n0		# pull n0[0] value
231	mov	($ap),%rax
232
233	xor	$i,$i			# i=0
234	xor	$j,$j			# j=0
235
236	mov	$n0,$m1
237	mulq	$m0			# ap[0]*bp[0]
238	mov	%rax,$lo0
239	mov	($np),%rax
240
241	imulq	$lo0,$m1		# "tp[0]"*n0
242	mov	%rdx,$hi0
243
244	mulq	$m1			# np[0]*m1
245	add	%rax,$lo0		# discarded
246	mov	8($ap),%rax
247	adc	\$0,%rdx
248	mov	%rdx,$hi1
249
250	lea	1($j),$j		# j++
251	jmp	.L1st_enter
252
253.align	16
254.L1st:
255	add	%rax,$hi1
256	mov	($ap,$j,8),%rax
257	adc	\$0,%rdx
258	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
259	mov	$lo0,$hi0
260	adc	\$0,%rdx
261	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
262	mov	%rdx,$hi1
263
264.L1st_enter:
265	mulq	$m0			# ap[j]*bp[0]
266	add	%rax,$hi0
267	mov	($np,$j,8),%rax
268	adc	\$0,%rdx
269	lea	1($j),$j		# j++
270	mov	%rdx,$lo0
271
272	mulq	$m1			# np[j]*m1
273	cmp	$num,$j
274	jne	.L1st			# note that upon exit $j==$num, so
275					# they can be used interchangeably
276
277	add	%rax,$hi1
278	adc	\$0,%rdx
279	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
280	adc	\$0,%rdx
281	mov	$hi1,-16(%rsp,$num,8)	# tp[num-1]
282	mov	%rdx,$hi1
283	mov	$lo0,$hi0
284
285	xor	%rdx,%rdx
286	add	$hi0,$hi1
287	adc	\$0,%rdx
288	mov	$hi1,-8(%rsp,$num,8)
289	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
290
291	lea	1($i),$i		# i++
292	jmp	.Louter
293.align	16
294.Louter:
295	lea	24+128(%rsp,$num,8),%rdx	# where 256-byte mask is (+size optimization)
296	and	\$-16,%rdx
297	pxor	%xmm4,%xmm4
298	pxor	%xmm5,%xmm5
299___
300for($k=0;$k<$STRIDE/16;$k+=4) {
301$code.=<<___;
302	movdqa	`16*($k+0)-128`($bp),%xmm0
303	movdqa	`16*($k+1)-128`($bp),%xmm1
304	movdqa	`16*($k+2)-128`($bp),%xmm2
305	movdqa	`16*($k+3)-128`($bp),%xmm3
306	pand	`16*($k+0)-128`(%rdx),%xmm0
307	pand	`16*($k+1)-128`(%rdx),%xmm1
308	por	%xmm0,%xmm4
309	pand	`16*($k+2)-128`(%rdx),%xmm2
310	por	%xmm1,%xmm5
311	pand	`16*($k+3)-128`(%rdx),%xmm3
312	por	%xmm2,%xmm4
313	por	%xmm3,%xmm5
314___
315}
316$code.=<<___;
317	por	%xmm5,%xmm4
318	# Combine the upper and lower halves of %xmm4 as %xmm0.
319	pshufd	\$0x4e,%xmm4,%xmm0	# Swap upper and lower halves.
320	por	%xmm4,%xmm0
321	lea	$STRIDE($bp),$bp
322
323	mov	($ap),%rax		# ap[0]
324	movq	%xmm0,$m0		# m0=bp[i]
325
326	xor	$j,$j			# j=0
327	mov	$n0,$m1
328	mov	(%rsp),$lo0
329
330	mulq	$m0			# ap[0]*bp[i]
331	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
332	mov	($np),%rax
333	adc	\$0,%rdx
334
335	imulq	$lo0,$m1		# tp[0]*n0
336	mov	%rdx,$hi0
337
338	mulq	$m1			# np[0]*m1
339	add	%rax,$lo0		# discarded
340	mov	8($ap),%rax
341	adc	\$0,%rdx
342	mov	8(%rsp),$lo0		# tp[1]
343	mov	%rdx,$hi1
344
345	lea	1($j),$j		# j++
346	jmp	.Linner_enter
347
348.align	16
349.Linner:
350	add	%rax,$hi1
351	mov	($ap,$j,8),%rax
352	adc	\$0,%rdx
353	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
354	mov	(%rsp,$j,8),$lo0
355	adc	\$0,%rdx
356	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
357	mov	%rdx,$hi1
358
359.Linner_enter:
360	mulq	$m0			# ap[j]*bp[i]
361	add	%rax,$hi0
362	mov	($np,$j,8),%rax
363	adc	\$0,%rdx
364	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
365	mov	%rdx,$hi0
366	adc	\$0,$hi0
367	lea	1($j),$j		# j++
368
369	mulq	$m1			# np[j]*m1
370	cmp	$num,$j
371	jne	.Linner			# note that upon exit $j==$num, so
372					# they can be used interchangeably
373	add	%rax,$hi1
374	adc	\$0,%rdx
375	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
376	mov	(%rsp,$num,8),$lo0
377	adc	\$0,%rdx
378	mov	$hi1,-16(%rsp,$num,8)	# tp[num-1]
379	mov	%rdx,$hi1
380
381	xor	%rdx,%rdx
382	add	$hi0,$hi1
383	adc	\$0,%rdx
384	add	$lo0,$hi1		# pull upmost overflow bit
385	adc	\$0,%rdx
386	mov	$hi1,-8(%rsp,$num,8)
387	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
388
389	lea	1($i),$i		# i++
390	cmp	$num,$i
391	jb	.Louter
392
393	xor	$i,$i			# i=0 and clear CF!
394	mov	(%rsp),%rax		# tp[0]
395	lea	(%rsp),$ap		# borrow ap for tp
396	mov	$num,$j			# j=num
397	jmp	.Lsub
398.align	16
399.Lsub:	sbb	($np,$i,8),%rax
400	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
401	mov	8($ap,$i,8),%rax	# tp[i+1]
402	lea	1($i),$i		# i++
403	dec	$j			# doesn't affect CF!
404	jnz	.Lsub
405
406	sbb	\$0,%rax		# handle upmost overflow bit
407	mov	\$-1,%rbx
408	xor	%rax,%rbx
409	xor	$i,$i
410	mov	$num,$j			# j=num
411
412.Lcopy:					# conditional copy
413	mov	($rp,$i,8),%rcx
414	mov	(%rsp,$i,8),%rdx
415	and	%rbx,%rcx
416	and	%rax,%rdx
417	mov	$i,(%rsp,$i,8)		# zap temporary vector
418	or	%rcx,%rdx
419	mov	%rdx,($rp,$i,8)		# rp[i]=tp[i]
420	lea	1($i),$i
421	sub	\$1,$j
422	jnz	.Lcopy
423
424	mov	8(%rsp,$num,8),%rsi	# restore %rsp
425.cfi_def_cfa	%rsi,8
426	mov	\$1,%rax
427
428	mov	-48(%rsi),%r15
429.cfi_restore	%r15
430	mov	-40(%rsi),%r14
431.cfi_restore	%r14
432	mov	-32(%rsi),%r13
433.cfi_restore	%r13
434	mov	-24(%rsi),%r12
435.cfi_restore	%r12
436	mov	-16(%rsi),%rbp
437.cfi_restore	%rbp
438	mov	-8(%rsi),%rbx
439.cfi_restore	%rbx
440	lea	(%rsi),%rsp
441.cfi_def_cfa_register	%rsp
442.Lmul_epilogue:
443	ret
444.cfi_endproc
445.size	bn_mul_mont_gather5_nohw,.-bn_mul_mont_gather5_nohw
446___
447{{{
448my @A=("%r10","%r11");
449my @N=("%r13","%rdi");
450$code.=<<___;
451.globl	bn_mul4x_mont_gather5
452.type	bn_mul4x_mont_gather5,\@function,6
453.align	32
454bn_mul4x_mont_gather5:
455.cfi_startproc
456	_CET_ENDBR
457	.byte	0x67
458	mov	%rsp,%rax
459.cfi_def_cfa_register	%rax
460	push	%rbx
461.cfi_push	%rbx
462	push	%rbp
463.cfi_push	%rbp
464	push	%r12
465.cfi_push	%r12
466	push	%r13
467.cfi_push	%r13
468	push	%r14
469.cfi_push	%r14
470	push	%r15
471.cfi_push	%r15
472.Lmul4x_prologue:
473
474	.byte	0x67
475	# num is declared as an int, a 32-bit parameter, so the upper half is
476	# undefined. It is important that this write to ${num}, which zeros the
477	# upper half, predates the first access.
478	shl	\$3,${num}d		# convert $num to bytes
479	lea	($num,$num,2),%r10	# 3*$num in bytes
480	neg	$num			# -$num
481
482	##############################################################
483	# Ensure that stack frame doesn't alias with $rptr+3*$num
484	# modulo 4096, which covers ret[num], am[num] and n[num]
485	# (see bn_exp.c). This is done to allow memory disambiguation
486	# logic do its magic. [Extra [num] is allocated in order
487	# to align with bn_power5's frame, which is cleansed after
488	# completing exponentiation. Extra 256 bytes is for power mask
489	# calculated from 7th argument, the index.]
490	#
491	lea	-320(%rsp,$num,2),%r11
492	mov	%rsp,%rbp
493	sub	$rp,%r11
494	and	\$4095,%r11
495	cmp	%r11,%r10
496	jb	.Lmul4xsp_alt
497	sub	%r11,%rbp		# align with $rp
498	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
499	jmp	.Lmul4xsp_done
500
501.align	32
502.Lmul4xsp_alt:
503	lea	4096-320(,$num,2),%r10
504	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
505	sub	%r10,%r11
506	mov	\$0,%r10
507	cmovc	%r10,%r11
508	sub	%r11,%rbp
509.Lmul4xsp_done:
510	and	\$-64,%rbp
511	mov	%rsp,%r11
512	sub	%rbp,%r11
513	and	\$-4096,%r11
514	lea	(%rbp,%r11),%rsp
515	mov	(%rsp),%r10
516	cmp	%rbp,%rsp
517	ja	.Lmul4x_page_walk
518	jmp	.Lmul4x_page_walk_done
519
520.Lmul4x_page_walk:
521	lea	-4096(%rsp),%rsp
522	mov	(%rsp),%r10
523	cmp	%rbp,%rsp
524	ja	.Lmul4x_page_walk
525.Lmul4x_page_walk_done:
526
527	neg	$num
528
529	mov	%rax,40(%rsp)
530.cfi_cfa_expression	%rsp+40,deref,+8
531.Lmul4x_body:
532
533	call	mul4x_internal
534
535	mov	40(%rsp),%rsi		# restore %rsp
536.cfi_def_cfa	%rsi,8
537	mov	\$1,%rax
538
539	mov	-48(%rsi),%r15
540.cfi_restore	%r15
541	mov	-40(%rsi),%r14
542.cfi_restore	%r14
543	mov	-32(%rsi),%r13
544.cfi_restore	%r13
545	mov	-24(%rsi),%r12
546.cfi_restore	%r12
547	mov	-16(%rsi),%rbp
548.cfi_restore	%rbp
549	mov	-8(%rsi),%rbx
550.cfi_restore	%rbx
551	lea	(%rsi),%rsp
552.cfi_def_cfa_register	%rsp
553.Lmul4x_epilogue:
554	ret
555.cfi_endproc
556.size	bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
557
558.type	mul4x_internal,\@abi-omnipotent
559.align	32
560mul4x_internal:
561.cfi_startproc
562	shl	\$5,$num		# $num was in bytes
563	movd	`($win64?56:8)`(%rax),%xmm5	# load 7th argument, index
564	lea	.Linc(%rip),%rax
565	lea	128(%rdx,$num),%r13	# end of powers table (+size optimization)
566	shr	\$5,$num		# restore $num
567___
568		$bp="%r12";
569		$STRIDE=2**5*8;		# 5 is "window size"
570		$tp=$i;
571$code.=<<___;
572	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
573	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
574	lea	88-112(%rsp,$num),%r10	# place the mask after tp[num+1] (+ICache optimization)
575	lea	128(%rdx),$bp		# size optimization
576
577	pshufd	\$0,%xmm5,%xmm5		# broadcast index
578	movdqa	%xmm1,%xmm4
579	.byte	0x67,0x67
580	movdqa	%xmm1,%xmm2
581___
582########################################################################
583# Calculate masks by comparing 0..31 to $idx and save result to stack.
584#
585# We compute sixteen 16-byte masks and store them on the stack. Mask i is stored
586# in `16*i - 128`(%rax) and contains the comparisons for idx == 2*i and
587# idx == 2*i + 1 in its lower and upper halves, respectively. Mask calculations
588# are scheduled in groups of four.
589$code.=<<___;
590	paddd	%xmm0,%xmm1
591	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
592	.byte	0x67
593	movdqa	%xmm4,%xmm3
594___
595for($i=0;$i<$STRIDE/16-4;$i+=4) {
596$code.=<<___;
597	paddd	%xmm1,%xmm2
598	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
599	movdqa	%xmm0,`16*($i+0)+112`(%r10)
600	movdqa	%xmm4,%xmm0
601
602	paddd	%xmm2,%xmm3
603	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
604	movdqa	%xmm1,`16*($i+1)+112`(%r10)
605	movdqa	%xmm4,%xmm1
606
607	paddd	%xmm3,%xmm0
608	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
609	movdqa	%xmm2,`16*($i+2)+112`(%r10)
610	movdqa	%xmm4,%xmm2
611
612	paddd	%xmm0,%xmm1
613	pcmpeqd	%xmm5,%xmm0
614	movdqa	%xmm3,`16*($i+3)+112`(%r10)
615	movdqa	%xmm4,%xmm3
616___
617}
618$code.=<<___;				# last iteration can be optimized
619	paddd	%xmm1,%xmm2
620	pcmpeqd	%xmm5,%xmm1
621	movdqa	%xmm0,`16*($i+0)+112`(%r10)
622
623	paddd	%xmm2,%xmm3
624	.byte	0x67
625	pcmpeqd	%xmm5,%xmm2
626	movdqa	%xmm1,`16*($i+1)+112`(%r10)
627
628	pcmpeqd	%xmm5,%xmm3
629	movdqa	%xmm2,`16*($i+2)+112`(%r10)
630	pand	`16*($i+0)-128`($bp),%xmm0	# while it's still in register
631
632	pand	`16*($i+1)-128`($bp),%xmm1
633	pand	`16*($i+2)-128`($bp),%xmm2
634	movdqa	%xmm3,`16*($i+3)+112`(%r10)
635	pand	`16*($i+3)-128`($bp),%xmm3
636	por	%xmm2,%xmm0
637	por	%xmm3,%xmm1
638___
639for($i=0;$i<$STRIDE/16-4;$i+=4) {
640$code.=<<___;
641	movdqa	`16*($i+0)-128`($bp),%xmm4
642	movdqa	`16*($i+1)-128`($bp),%xmm5
643	movdqa	`16*($i+2)-128`($bp),%xmm2
644	pand	`16*($i+0)+112`(%r10),%xmm4
645	movdqa	`16*($i+3)-128`($bp),%xmm3
646	pand	`16*($i+1)+112`(%r10),%xmm5
647	por	%xmm4,%xmm0
648	pand	`16*($i+2)+112`(%r10),%xmm2
649	por	%xmm5,%xmm1
650	pand	`16*($i+3)+112`(%r10),%xmm3
651	por	%xmm2,%xmm0
652	por	%xmm3,%xmm1
653___
654}
655$code.=<<___;
656	por	%xmm1,%xmm0
657	# Combine the upper and lower halves of %xmm0.
658	pshufd	\$0x4e,%xmm0,%xmm1	# Swap upper and lower halves.
659	por	%xmm1,%xmm0
660	lea	$STRIDE($bp),$bp
661	movq	%xmm0,$m0		# m0=bp[0]
662
663	mov	%r13,16+8(%rsp)		# save end of b[num]
664	mov	$rp, 56+8(%rsp)		# save $rp
665
666	mov	($n0),$n0		# pull n0[0] value
667	mov	($ap),%rax
668	lea	($ap,$num),$ap		# end of a[num]
669	neg	$num
670
671	mov	$n0,$m1
672	mulq	$m0			# ap[0]*bp[0]
673	mov	%rax,$A[0]
674	mov	($np),%rax
675
676	imulq	$A[0],$m1		# "tp[0]"*n0
677	lea	64+8(%rsp),$tp
678	mov	%rdx,$A[1]
679
680	mulq	$m1			# np[0]*m1
681	add	%rax,$A[0]		# discarded
682	mov	8($ap,$num),%rax
683	adc	\$0,%rdx
684	mov	%rdx,$N[1]
685
686	mulq	$m0
687	add	%rax,$A[1]
688	mov	8*1($np),%rax
689	adc	\$0,%rdx
690	mov	%rdx,$A[0]
691
692	mulq	$m1
693	add	%rax,$N[1]
694	mov	16($ap,$num),%rax
695	adc	\$0,%rdx
696	add	$A[1],$N[1]
697	lea	4*8($num),$j		# j=4
698	lea	8*4($np),$np
699	adc	\$0,%rdx
700	mov	$N[1],($tp)
701	mov	%rdx,$N[0]
702	jmp	.L1st4x
703
704.align	32
705.L1st4x:
706	mulq	$m0			# ap[j]*bp[0]
707	add	%rax,$A[0]
708	mov	-8*2($np),%rax
709	lea	32($tp),$tp
710	adc	\$0,%rdx
711	mov	%rdx,$A[1]
712
713	mulq	$m1			# np[j]*m1
714	add	%rax,$N[0]
715	mov	-8($ap,$j),%rax
716	adc	\$0,%rdx
717	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
718	adc	\$0,%rdx
719	mov	$N[0],-24($tp)		# tp[j-1]
720	mov	%rdx,$N[1]
721
722	mulq	$m0			# ap[j]*bp[0]
723	add	%rax,$A[1]
724	mov	-8*1($np),%rax
725	adc	\$0,%rdx
726	mov	%rdx,$A[0]
727
728	mulq	$m1			# np[j]*m1
729	add	%rax,$N[1]
730	mov	($ap,$j),%rax
731	adc	\$0,%rdx
732	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
733	adc	\$0,%rdx
734	mov	$N[1],-16($tp)		# tp[j-1]
735	mov	%rdx,$N[0]
736
737	mulq	$m0			# ap[j]*bp[0]
738	add	%rax,$A[0]
739	mov	8*0($np),%rax
740	adc	\$0,%rdx
741	mov	%rdx,$A[1]
742
743	mulq	$m1			# np[j]*m1
744	add	%rax,$N[0]
745	mov	8($ap,$j),%rax
746	adc	\$0,%rdx
747	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
748	adc	\$0,%rdx
749	mov	$N[0],-8($tp)		# tp[j-1]
750	mov	%rdx,$N[1]
751
752	mulq	$m0			# ap[j]*bp[0]
753	add	%rax,$A[1]
754	mov	8*1($np),%rax
755	adc	\$0,%rdx
756	mov	%rdx,$A[0]
757
758	mulq	$m1			# np[j]*m1
759	add	%rax,$N[1]
760	mov	16($ap,$j),%rax
761	adc	\$0,%rdx
762	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
763	lea	8*4($np),$np
764	adc	\$0,%rdx
765	mov	$N[1],($tp)		# tp[j-1]
766	mov	%rdx,$N[0]
767
768	add	\$32,$j			# j+=4
769	jnz	.L1st4x
770
771	mulq	$m0			# ap[j]*bp[0]
772	add	%rax,$A[0]
773	mov	-8*2($np),%rax
774	lea	32($tp),$tp
775	adc	\$0,%rdx
776	mov	%rdx,$A[1]
777
778	mulq	$m1			# np[j]*m1
779	add	%rax,$N[0]
780	mov	-8($ap),%rax
781	adc	\$0,%rdx
782	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
783	adc	\$0,%rdx
784	mov	$N[0],-24($tp)		# tp[j-1]
785	mov	%rdx,$N[1]
786
787	mulq	$m0			# ap[j]*bp[0]
788	add	%rax,$A[1]
789	mov	-8*1($np),%rax
790	adc	\$0,%rdx
791	mov	%rdx,$A[0]
792
793	mulq	$m1			# np[j]*m1
794	add	%rax,$N[1]
795	mov	($ap,$num),%rax		# ap[0]
796	adc	\$0,%rdx
797	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
798	adc	\$0,%rdx
799	mov	$N[1],-16($tp)		# tp[j-1]
800	mov	%rdx,$N[0]
801
802	lea	($np,$num),$np		# rewind $np
803
804	xor	$N[1],$N[1]
805	add	$A[0],$N[0]
806	adc	\$0,$N[1]
807	mov	$N[0],-8($tp)
808
809	jmp	.Louter4x
810
811.align	32
812.Louter4x:
813	lea	16+128($tp),%rdx	# where 256-byte mask is (+size optimization)
814	pxor	%xmm4,%xmm4
815	pxor	%xmm5,%xmm5
816___
817for($i=0;$i<$STRIDE/16;$i+=4) {
818$code.=<<___;
819	movdqa	`16*($i+0)-128`($bp),%xmm0
820	movdqa	`16*($i+1)-128`($bp),%xmm1
821	movdqa	`16*($i+2)-128`($bp),%xmm2
822	movdqa	`16*($i+3)-128`($bp),%xmm3
823	pand	`16*($i+0)-128`(%rdx),%xmm0
824	pand	`16*($i+1)-128`(%rdx),%xmm1
825	por	%xmm0,%xmm4
826	pand	`16*($i+2)-128`(%rdx),%xmm2
827	por	%xmm1,%xmm5
828	pand	`16*($i+3)-128`(%rdx),%xmm3
829	por	%xmm2,%xmm4
830	por	%xmm3,%xmm5
831___
832}
833$code.=<<___;
834	por	%xmm5,%xmm4
835	# Combine the upper and lower halves of %xmm4 as %xmm0.
836	pshufd	\$0x4e,%xmm4,%xmm0	# Swap upper and lower halves.
837	por	%xmm4,%xmm0
838	lea	$STRIDE($bp),$bp
839	movq	%xmm0,$m0		# m0=bp[i]
840
841	mov	($tp,$num),$A[0]
842	mov	$n0,$m1
843	mulq	$m0			# ap[0]*bp[i]
844	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
845	mov	($np),%rax
846	adc	\$0,%rdx
847
848	imulq	$A[0],$m1		# tp[0]*n0
849	mov	%rdx,$A[1]
850	mov	$N[1],($tp)		# store upmost overflow bit
851
852	lea	($tp,$num),$tp		# rewind $tp
853
854	mulq	$m1			# np[0]*m1
855	add	%rax,$A[0]		# "$N[0]", discarded
856	mov	8($ap,$num),%rax
857	adc	\$0,%rdx
858	mov	%rdx,$N[1]
859
860	mulq	$m0			# ap[j]*bp[i]
861	add	%rax,$A[1]
862	mov	8*1($np),%rax
863	adc	\$0,%rdx
864	add	8($tp),$A[1]		# +tp[1]
865	adc	\$0,%rdx
866	mov	%rdx,$A[0]
867
868	mulq	$m1			# np[j]*m1
869	add	%rax,$N[1]
870	mov	16($ap,$num),%rax
871	adc	\$0,%rdx
872	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
873	lea	4*8($num),$j		# j=4
874	lea	8*4($np),$np
875	adc	\$0,%rdx
876	mov	%rdx,$N[0]
877	jmp	.Linner4x
878
879.align	32
880.Linner4x:
881	mulq	$m0			# ap[j]*bp[i]
882	add	%rax,$A[0]
883	mov	-8*2($np),%rax
884	adc	\$0,%rdx
885	add	16($tp),$A[0]		# ap[j]*bp[i]+tp[j]
886	lea	32($tp),$tp
887	adc	\$0,%rdx
888	mov	%rdx,$A[1]
889
890	mulq	$m1			# np[j]*m1
891	add	%rax,$N[0]
892	mov	-8($ap,$j),%rax
893	adc	\$0,%rdx
894	add	$A[0],$N[0]
895	adc	\$0,%rdx
896	mov	$N[1],-32($tp)		# tp[j-1]
897	mov	%rdx,$N[1]
898
899	mulq	$m0			# ap[j]*bp[i]
900	add	%rax,$A[1]
901	mov	-8*1($np),%rax
902	adc	\$0,%rdx
903	add	-8($tp),$A[1]
904	adc	\$0,%rdx
905	mov	%rdx,$A[0]
906
907	mulq	$m1			# np[j]*m1
908	add	%rax,$N[1]
909	mov	($ap,$j),%rax
910	adc	\$0,%rdx
911	add	$A[1],$N[1]
912	adc	\$0,%rdx
913	mov	$N[0],-24($tp)		# tp[j-1]
914	mov	%rdx,$N[0]
915
916	mulq	$m0			# ap[j]*bp[i]
917	add	%rax,$A[0]
918	mov	8*0($np),%rax
919	adc	\$0,%rdx
920	add	($tp),$A[0]		# ap[j]*bp[i]+tp[j]
921	adc	\$0,%rdx
922	mov	%rdx,$A[1]
923
924	mulq	$m1			# np[j]*m1
925	add	%rax,$N[0]
926	mov	8($ap,$j),%rax
927	adc	\$0,%rdx
928	add	$A[0],$N[0]
929	adc	\$0,%rdx
930	mov	$N[1],-16($tp)		# tp[j-1]
931	mov	%rdx,$N[1]
932
933	mulq	$m0			# ap[j]*bp[i]
934	add	%rax,$A[1]
935	mov	8*1($np),%rax
936	adc	\$0,%rdx
937	add	8($tp),$A[1]
938	adc	\$0,%rdx
939	mov	%rdx,$A[0]
940
941	mulq	$m1			# np[j]*m1
942	add	%rax,$N[1]
943	mov	16($ap,$j),%rax
944	adc	\$0,%rdx
945	add	$A[1],$N[1]
946	lea	8*4($np),$np
947	adc	\$0,%rdx
948	mov	$N[0],-8($tp)		# tp[j-1]
949	mov	%rdx,$N[0]
950
951	add	\$32,$j			# j+=4
952	jnz	.Linner4x
953
954	mulq	$m0			# ap[j]*bp[i]
955	add	%rax,$A[0]
956	mov	-8*2($np),%rax
957	adc	\$0,%rdx
958	add	16($tp),$A[0]		# ap[j]*bp[i]+tp[j]
959	lea	32($tp),$tp
960	adc	\$0,%rdx
961	mov	%rdx,$A[1]
962
963	mulq	$m1			# np[j]*m1
964	add	%rax,$N[0]
965	mov	-8($ap),%rax
966	adc	\$0,%rdx
967	add	$A[0],$N[0]
968	adc	\$0,%rdx
969	mov	$N[1],-32($tp)		# tp[j-1]
970	mov	%rdx,$N[1]
971
972	mulq	$m0			# ap[j]*bp[i]
973	add	%rax,$A[1]
974	mov	$m1,%rax
975	mov	-8*1($np),$m1
976	adc	\$0,%rdx
977	add	-8($tp),$A[1]
978	adc	\$0,%rdx
979	mov	%rdx,$A[0]
980
981	mulq	$m1			# np[j]*m1
982	add	%rax,$N[1]
983	mov	($ap,$num),%rax		# ap[0]
984	adc	\$0,%rdx
985	add	$A[1],$N[1]
986	adc	\$0,%rdx
987	mov	$N[0],-24($tp)		# tp[j-1]
988	mov	%rdx,$N[0]
989
990	mov	$N[1],-16($tp)		# tp[j-1]
991	lea	($np,$num),$np		# rewind $np
992
993	xor	$N[1],$N[1]
994	add	$A[0],$N[0]
995	adc	\$0,$N[1]
996	add	($tp),$N[0]		# pull upmost overflow bit
997	adc	\$0,$N[1]		# upmost overflow bit
998	mov	$N[0],-8($tp)
999
1000	cmp	16+8(%rsp),$bp
1001	jb	.Louter4x
1002___
1003if (1) {
1004$code.=<<___;
1005	xor	%rax,%rax
1006	sub	$N[0],$m1		# compare top-most words
1007	adc	$j,$j			# $j is zero
1008	or	$j,$N[1]
1009	sub	$N[1],%rax		# %rax=-$N[1]
1010	lea	($tp,$num),%rbx		# tptr in .sqr4x_sub
1011	mov	($np),%r12
1012	lea	($np),%rbp		# nptr in .sqr4x_sub
1013	mov	%r9,%rcx
1014	sar	\$3+2,%rcx
1015	mov	56+8(%rsp),%rdi		# rptr in .sqr4x_sub
1016	dec	%r12			# so that after 'not' we get -n[0]
1017	xor	%r10,%r10
1018	mov	8*1(%rbp),%r13
1019	mov	8*2(%rbp),%r14
1020	mov	8*3(%rbp),%r15
1021	jmp	.Lsqr4x_sub_entry
1022___
1023} else {
1024my @ri=("%rax",$bp,$m0,$m1);
1025my $rp="%rdx";
1026$code.=<<___
1027	xor	\$1,$N[1]
1028	lea	($tp,$num),$tp		# rewind $tp
1029	sar	\$5,$num		# cf=0
1030	lea	($np,$N[1],8),$np
1031	mov	56+8(%rsp),$rp		# restore $rp
1032	jmp	.Lsub4x
1033
1034.align	32
1035.Lsub4x:
1036	.byte	0x66
1037	mov	8*0($tp),@ri[0]
1038	mov	8*1($tp),@ri[1]
1039	.byte	0x66
1040	sbb	16*0($np),@ri[0]
1041	mov	8*2($tp),@ri[2]
1042	sbb	16*1($np),@ri[1]
1043	mov	3*8($tp),@ri[3]
1044	lea	4*8($tp),$tp
1045	sbb	16*2($np),@ri[2]
1046	mov	@ri[0],8*0($rp)
1047	sbb	16*3($np),@ri[3]
1048	lea	16*4($np),$np
1049	mov	@ri[1],8*1($rp)
1050	mov	@ri[2],8*2($rp)
1051	mov	@ri[3],8*3($rp)
1052	lea	8*4($rp),$rp
1053
1054	inc	$num
1055	jnz	.Lsub4x
1056
1057	ret
1058___
1059}
1060$code.=<<___;
1061.cfi_endproc
1062.size	mul4x_internal,.-mul4x_internal
1063___
1064}}}
1065{{{
1066######################################################################
1067# void bn_power5_nohw(
1068my $rptr="%rdi";	# BN_ULONG *rptr,
1069my $aptr="%rsi";	# const BN_ULONG *aptr,
1070my $bptr="%rdx";	# const BN_ULONG *table,
1071my $nptr="%rcx";	# const BN_ULONG *nptr,
1072my $n0  ="%r8";		# const BN_ULONG *n0);
1073my $num ="%r9";		# int num, has to be divisible by 8
1074			# int pwr
1075
1076my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1077my @A0=("%r10","%r11");
1078my @A1=("%r12","%r13");
1079my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1080
1081$code.=<<___;
1082.globl	bn_power5_nohw
1083.type	bn_power5_nohw,\@function,6
1084.align	32
1085bn_power5_nohw:
1086.cfi_startproc
1087	_CET_ENDBR
1088	mov	%rsp,%rax
1089.cfi_def_cfa_register	%rax
1090	push	%rbx
1091.cfi_push	%rbx
1092	push	%rbp
1093.cfi_push	%rbp
1094	push	%r12
1095.cfi_push	%r12
1096	push	%r13
1097.cfi_push	%r13
1098	push	%r14
1099.cfi_push	%r14
1100	push	%r15
1101.cfi_push	%r15
1102.Lpower5_prologue:
1103
1104	# num is declared as an int, a 32-bit parameter, so the upper half is
1105	# undefined. It is important that this write to ${num}, which zeros the
1106	# upper half, come before the first access.
1107	shl	\$3,${num}d		# convert $num to bytes
1108	lea	($num,$num,2),%r10d	# 3*$num
1109	neg	$num
1110	mov	($n0),$n0		# *n0
1111
1112	##############################################################
1113	# Ensure that stack frame doesn't alias with $rptr+3*$num
1114	# modulo 4096, which covers ret[num], am[num] and n[num]
1115	# (see bn_exp.c). This is done to allow memory disambiguation
1116	# logic do its magic. [Extra 256 bytes is for power mask
1117	# calculated from 7th argument, the index.]
1118	#
1119	lea	-320(%rsp,$num,2),%r11
1120	mov	%rsp,%rbp
1121	sub	$rptr,%r11
1122	and	\$4095,%r11
1123	cmp	%r11,%r10
1124	jb	.Lpwr_sp_alt
1125	sub	%r11,%rbp		# align with $aptr
1126	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
1127	jmp	.Lpwr_sp_done
1128
1129.align	32
1130.Lpwr_sp_alt:
1131	lea	4096-320(,$num,2),%r10
1132	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
1133	sub	%r10,%r11
1134	mov	\$0,%r10
1135	cmovc	%r10,%r11
1136	sub	%r11,%rbp
1137.Lpwr_sp_done:
1138	and	\$-64,%rbp
1139	mov	%rsp,%r11
1140	sub	%rbp,%r11
1141	and	\$-4096,%r11
1142	lea	(%rbp,%r11),%rsp
1143	mov	(%rsp),%r10
1144	cmp	%rbp,%rsp
1145	ja	.Lpwr_page_walk
1146	jmp	.Lpwr_page_walk_done
1147
1148.Lpwr_page_walk:
1149	lea	-4096(%rsp),%rsp
1150	mov	(%rsp),%r10
1151	cmp	%rbp,%rsp
1152	ja	.Lpwr_page_walk
1153.Lpwr_page_walk_done:
1154
1155	mov	$num,%r10
1156	neg	$num
1157
1158	##############################################################
1159	# Stack layout
1160	#
1161	# +0	saved $num, used in reduction section
1162	# +8	&t[2*$num], used in reduction section
1163	# +32	saved *n0
1164	# +40	saved %rsp
1165	# +48	t[2*$num]
1166	#
1167	mov	$n0,  32(%rsp)
1168	mov	%rax, 40(%rsp)		# save original %rsp
1169.cfi_cfa_expression	%rsp+40,deref,+8
1170.Lpower5_body:
1171	movq	$rptr,%xmm1		# save $rptr, used in sqr8x
1172	movq	$nptr,%xmm2		# save $nptr
1173	movq	%r10, %xmm3		# -$num, used in sqr8x
1174	movq	$bptr,%xmm4
1175
1176	call	__bn_sqr8x_internal
1177	call	__bn_post4x_internal
1178	call	__bn_sqr8x_internal
1179	call	__bn_post4x_internal
1180	call	__bn_sqr8x_internal
1181	call	__bn_post4x_internal
1182	call	__bn_sqr8x_internal
1183	call	__bn_post4x_internal
1184	call	__bn_sqr8x_internal
1185	call	__bn_post4x_internal
1186
1187	movq	%xmm2,$nptr
1188	movq	%xmm4,$bptr
1189	mov	$aptr,$rptr
1190	mov	40(%rsp),%rax
1191	lea	32(%rsp),$n0
1192
1193	call	mul4x_internal
1194
1195	mov	40(%rsp),%rsi		# restore %rsp
1196.cfi_def_cfa	%rsi,8
1197	mov	\$1,%rax
1198	mov	-48(%rsi),%r15
1199.cfi_restore	%r15
1200	mov	-40(%rsi),%r14
1201.cfi_restore	%r14
1202	mov	-32(%rsi),%r13
1203.cfi_restore	%r13
1204	mov	-24(%rsi),%r12
1205.cfi_restore	%r12
1206	mov	-16(%rsi),%rbp
1207.cfi_restore	%rbp
1208	mov	-8(%rsi),%rbx
1209.cfi_restore	%rbx
1210	lea	(%rsi),%rsp
1211.cfi_def_cfa_register	%rsp
1212.Lpower5_epilogue:
1213	ret
1214.cfi_endproc
1215.size	bn_power5_nohw,.-bn_power5_nohw
1216
1217.globl	bn_sqr8x_internal
1218.hidden	bn_sqr8x_internal
1219.type	bn_sqr8x_internal,\@abi-omnipotent
1220.align	32
1221bn_sqr8x_internal:
1222__bn_sqr8x_internal:
1223.cfi_startproc
1224	_CET_ENDBR
1225	##############################################################
1226	# Squaring part:
1227	#
1228	# a) multiply-n-add everything but a[i]*a[i];
1229	# b) shift result of a) by 1 to the left and accumulate
1230	#    a[i]*a[i] products;
1231	#
1232	##############################################################
1233	#                                                     a[1]a[0]
1234	#                                                 a[2]a[0]
1235	#                                             a[3]a[0]
1236	#                                             a[2]a[1]
1237	#                                         a[4]a[0]
1238	#                                         a[3]a[1]
1239	#                                     a[5]a[0]
1240	#                                     a[4]a[1]
1241	#                                     a[3]a[2]
1242	#                                 a[6]a[0]
1243	#                                 a[5]a[1]
1244	#                                 a[4]a[2]
1245	#                             a[7]a[0]
1246	#                             a[6]a[1]
1247	#                             a[5]a[2]
1248	#                             a[4]a[3]
1249	#                         a[7]a[1]
1250	#                         a[6]a[2]
1251	#                         a[5]a[3]
1252	#                     a[7]a[2]
1253	#                     a[6]a[3]
1254	#                     a[5]a[4]
1255	#                 a[7]a[3]
1256	#                 a[6]a[4]
1257	#             a[7]a[4]
1258	#             a[6]a[5]
1259	#         a[7]a[5]
1260	#     a[7]a[6]
1261	#                                                     a[1]a[0]
1262	#                                                 a[2]a[0]
1263	#                                             a[3]a[0]
1264	#                                         a[4]a[0]
1265	#                                     a[5]a[0]
1266	#                                 a[6]a[0]
1267	#                             a[7]a[0]
1268	#                                             a[2]a[1]
1269	#                                         a[3]a[1]
1270	#                                     a[4]a[1]
1271	#                                 a[5]a[1]
1272	#                             a[6]a[1]
1273	#                         a[7]a[1]
1274	#                                     a[3]a[2]
1275	#                                 a[4]a[2]
1276	#                             a[5]a[2]
1277	#                         a[6]a[2]
1278	#                     a[7]a[2]
1279	#                             a[4]a[3]
1280	#                         a[5]a[3]
1281	#                     a[6]a[3]
1282	#                 a[7]a[3]
1283	#                     a[5]a[4]
1284	#                 a[6]a[4]
1285	#             a[7]a[4]
1286	#             a[6]a[5]
1287	#         a[7]a[5]
1288	#     a[7]a[6]
1289	#                                                         a[0]a[0]
1290	#                                                 a[1]a[1]
1291	#                                         a[2]a[2]
1292	#                                 a[3]a[3]
1293	#                         a[4]a[4]
1294	#                 a[5]a[5]
1295	#         a[6]a[6]
1296	# a[7]a[7]
1297
1298	lea	32(%r10),$i		# $i=-($num-32)
1299	lea	($aptr,$num),$aptr	# end of a[] buffer, ($aptr,$i)=&ap[2]
1300
1301	mov	$num,$j			# $j=$num
1302
1303					# comments apply to $num==8 case
1304	mov	-32($aptr,$i),$a0	# a[0]
1305	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
1306	mov	-24($aptr,$i),%rax	# a[1]
1307	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
1308	mov	-16($aptr,$i),$ai	# a[2]
1309	mov	%rax,$a1
1310
1311	mul	$a0			# a[1]*a[0]
1312	mov	%rax,$A0[0]		# a[1]*a[0]
1313	 mov	$ai,%rax		# a[2]
1314	mov	%rdx,$A0[1]
1315	mov	$A0[0],-24($tptr,$i)	# t[1]
1316
1317	mul	$a0			# a[2]*a[0]
1318	add	%rax,$A0[1]
1319	 mov	$ai,%rax
1320	adc	\$0,%rdx
1321	mov	$A0[1],-16($tptr,$i)	# t[2]
1322	mov	%rdx,$A0[0]
1323
1324
1325	 mov	-8($aptr,$i),$ai	# a[3]
1326	mul	$a1			# a[2]*a[1]
1327	mov	%rax,$A1[0]		# a[2]*a[1]+t[3]
1328	 mov	$ai,%rax
1329	mov	%rdx,$A1[1]
1330
1331	 lea	($i),$j
1332	mul	$a0			# a[3]*a[0]
1333	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
1334	 mov	$ai,%rax
1335	mov	%rdx,$A0[1]
1336	adc	\$0,$A0[1]
1337	add	$A1[0],$A0[0]
1338	adc	\$0,$A0[1]
1339	mov	$A0[0],-8($tptr,$j)	# t[3]
1340	jmp	.Lsqr4x_1st
1341
1342.align	32
1343.Lsqr4x_1st:
1344	 mov	($aptr,$j),$ai		# a[4]
1345	mul	$a1			# a[3]*a[1]
1346	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
1347	 mov	$ai,%rax
1348	mov	%rdx,$A1[0]
1349	adc	\$0,$A1[0]
1350
1351	mul	$a0			# a[4]*a[0]
1352	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
1353	 mov	$ai,%rax		# a[3]
1354	 mov	8($aptr,$j),$ai		# a[5]
1355	mov	%rdx,$A0[0]
1356	adc	\$0,$A0[0]
1357	add	$A1[1],$A0[1]
1358	adc	\$0,$A0[0]
1359
1360
1361	mul	$a1			# a[4]*a[3]
1362	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
1363	 mov	$ai,%rax
1364	 mov	$A0[1],($tptr,$j)	# t[4]
1365	mov	%rdx,$A1[1]
1366	adc	\$0,$A1[1]
1367
1368	mul	$a0			# a[5]*a[2]
1369	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
1370	 mov	$ai,%rax
1371	 mov	16($aptr,$j),$ai	# a[6]
1372	mov	%rdx,$A0[1]
1373	adc	\$0,$A0[1]
1374	add	$A1[0],$A0[0]
1375	adc	\$0,$A0[1]
1376
1377	mul	$a1			# a[5]*a[3]
1378	add	%rax,$A1[1]		# a[5]*a[3]+t[6]
1379	 mov	$ai,%rax
1380	 mov	$A0[0],8($tptr,$j)	# t[5]
1381	mov	%rdx,$A1[0]
1382	adc	\$0,$A1[0]
1383
1384	mul	$a0			# a[6]*a[2]
1385	add	%rax,$A0[1]		# a[6]*a[2]+a[5]*a[3]+t[6]
1386	 mov	$ai,%rax		# a[3]
1387	 mov	24($aptr,$j),$ai	# a[7]
1388	mov	%rdx,$A0[0]
1389	adc	\$0,$A0[0]
1390	add	$A1[1],$A0[1]
1391	adc	\$0,$A0[0]
1392
1393
1394	mul	$a1			# a[6]*a[5]
1395	add	%rax,$A1[0]		# a[6]*a[5]+t[7]
1396	 mov	$ai,%rax
1397	 mov	$A0[1],16($tptr,$j)	# t[6]
1398	mov	%rdx,$A1[1]
1399	adc	\$0,$A1[1]
1400	 lea	32($j),$j
1401
1402	mul	$a0			# a[7]*a[4]
1403	add	%rax,$A0[0]		# a[7]*a[4]+a[6]*a[5]+t[6]
1404	 mov	$ai,%rax
1405	mov	%rdx,$A0[1]
1406	adc	\$0,$A0[1]
1407	add	$A1[0],$A0[0]
1408	adc	\$0,$A0[1]
1409	mov	$A0[0],-8($tptr,$j)	# t[7]
1410
1411	cmp	\$0,$j
1412	jne	.Lsqr4x_1st
1413
1414	mul	$a1			# a[7]*a[5]
1415	add	%rax,$A1[1]
1416	lea	16($i),$i
1417	adc	\$0,%rdx
1418	add	$A0[1],$A1[1]
1419	adc	\$0,%rdx
1420
1421	mov	$A1[1],($tptr)		# t[8]
1422	mov	%rdx,$A1[0]
1423	mov	%rdx,8($tptr)		# t[9]
1424	jmp	.Lsqr4x_outer
1425
1426.align	32
1427.Lsqr4x_outer:				# comments apply to $num==6 case
1428	mov	-32($aptr,$i),$a0	# a[0]
1429	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
1430	mov	-24($aptr,$i),%rax	# a[1]
1431	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
1432	mov	-16($aptr,$i),$ai	# a[2]
1433	mov	%rax,$a1
1434
1435	mul	$a0			# a[1]*a[0]
1436	mov	-24($tptr,$i),$A0[0]	# t[1]
1437	add	%rax,$A0[0]		# a[1]*a[0]+t[1]
1438	 mov	$ai,%rax		# a[2]
1439	adc	\$0,%rdx
1440	mov	$A0[0],-24($tptr,$i)	# t[1]
1441	mov	%rdx,$A0[1]
1442
1443	mul	$a0			# a[2]*a[0]
1444	add	%rax,$A0[1]
1445	 mov	$ai,%rax
1446	adc	\$0,%rdx
1447	add	-16($tptr,$i),$A0[1]	# a[2]*a[0]+t[2]
1448	mov	%rdx,$A0[0]
1449	adc	\$0,$A0[0]
1450	mov	$A0[1],-16($tptr,$i)	# t[2]
1451
1452	xor	$A1[0],$A1[0]
1453
1454	 mov	-8($aptr,$i),$ai	# a[3]
1455	mul	$a1			# a[2]*a[1]
1456	add	%rax,$A1[0]		# a[2]*a[1]+t[3]
1457	 mov	$ai,%rax
1458	adc	\$0,%rdx
1459	add	-8($tptr,$i),$A1[0]
1460	mov	%rdx,$A1[1]
1461	adc	\$0,$A1[1]
1462
1463	mul	$a0			# a[3]*a[0]
1464	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
1465	 mov	$ai,%rax
1466	adc	\$0,%rdx
1467	add	$A1[0],$A0[0]
1468	mov	%rdx,$A0[1]
1469	adc	\$0,$A0[1]
1470	mov	$A0[0],-8($tptr,$i)	# t[3]
1471
1472	lea	($i),$j
1473	jmp	.Lsqr4x_inner
1474
1475.align	32
1476.Lsqr4x_inner:
1477	 mov	($aptr,$j),$ai		# a[4]
1478	mul	$a1			# a[3]*a[1]
1479	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
1480	 mov	$ai,%rax
1481	mov	%rdx,$A1[0]
1482	adc	\$0,$A1[0]
1483	add	($tptr,$j),$A1[1]
1484	adc	\$0,$A1[0]
1485
1486	.byte	0x67
1487	mul	$a0			# a[4]*a[0]
1488	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
1489	 mov	$ai,%rax		# a[3]
1490	 mov	8($aptr,$j),$ai		# a[5]
1491	mov	%rdx,$A0[0]
1492	adc	\$0,$A0[0]
1493	add	$A1[1],$A0[1]
1494	adc	\$0,$A0[0]
1495
1496	mul	$a1			# a[4]*a[3]
1497	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
1498	mov	$A0[1],($tptr,$j)	# t[4]
1499	 mov	$ai,%rax
1500	mov	%rdx,$A1[1]
1501	adc	\$0,$A1[1]
1502	add	8($tptr,$j),$A1[0]
1503	lea	16($j),$j		# j++
1504	adc	\$0,$A1[1]
1505
1506	mul	$a0			# a[5]*a[2]
1507	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
1508	 mov	$ai,%rax
1509	adc	\$0,%rdx
1510	add	$A1[0],$A0[0]
1511	mov	%rdx,$A0[1]
1512	adc	\$0,$A0[1]
1513	mov	$A0[0],-8($tptr,$j)	# t[5], "preloaded t[1]" below
1514
1515	cmp	\$0,$j
1516	jne	.Lsqr4x_inner
1517
1518	.byte	0x67
1519	mul	$a1			# a[5]*a[3]
1520	add	%rax,$A1[1]
1521	adc	\$0,%rdx
1522	add	$A0[1],$A1[1]
1523	adc	\$0,%rdx
1524
1525	mov	$A1[1],($tptr)		# t[6], "preloaded t[2]" below
1526	mov	%rdx,$A1[0]
1527	mov	%rdx,8($tptr)		# t[7], "preloaded t[3]" below
1528
1529	add	\$16,$i
1530	jnz	.Lsqr4x_outer
1531
1532					# comments apply to $num==4 case
1533	mov	-32($aptr),$a0		# a[0]
1534	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
1535	mov	-24($aptr),%rax		# a[1]
1536	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
1537	mov	-16($aptr),$ai		# a[2]
1538	mov	%rax,$a1
1539
1540	mul	$a0			# a[1]*a[0]
1541	add	%rax,$A0[0]		# a[1]*a[0]+t[1], preloaded t[1]
1542	 mov	$ai,%rax		# a[2]
1543	mov	%rdx,$A0[1]
1544	adc	\$0,$A0[1]
1545
1546	mul	$a0			# a[2]*a[0]
1547	add	%rax,$A0[1]
1548	 mov	$ai,%rax
1549	 mov	$A0[0],-24($tptr)	# t[1]
1550	mov	%rdx,$A0[0]
1551	adc	\$0,$A0[0]
1552	add	$A1[1],$A0[1]		# a[2]*a[0]+t[2], preloaded t[2]
1553	 mov	-8($aptr),$ai		# a[3]
1554	adc	\$0,$A0[0]
1555
1556	mul	$a1			# a[2]*a[1]
1557	add	%rax,$A1[0]		# a[2]*a[1]+t[3], preloaded t[3]
1558	 mov	$ai,%rax
1559	 mov	$A0[1],-16($tptr)	# t[2]
1560	mov	%rdx,$A1[1]
1561	adc	\$0,$A1[1]
1562
1563	mul	$a0			# a[3]*a[0]
1564	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
1565	 mov	$ai,%rax
1566	mov	%rdx,$A0[1]
1567	adc	\$0,$A0[1]
1568	add	$A1[0],$A0[0]
1569	adc	\$0,$A0[1]
1570	mov	$A0[0],-8($tptr)	# t[3]
1571
1572	mul	$a1			# a[3]*a[1]
1573	add	%rax,$A1[1]
1574	 mov	-16($aptr),%rax		# a[2]
1575	adc	\$0,%rdx
1576	add	$A0[1],$A1[1]
1577	adc	\$0,%rdx
1578
1579	mov	$A1[1],($tptr)		# t[4]
1580	mov	%rdx,$A1[0]
1581	mov	%rdx,8($tptr)		# t[5]
1582
1583	mul	$ai			# a[2]*a[3]
1584___
1585{
1586my ($shift,$carry)=($a0,$a1);
1587my @S=(@A1,$ai,$n0);
1588$code.=<<___;
1589	 add	\$16,$i
1590	 xor	$shift,$shift
1591	 sub	$num,$i			# $i=16-$num
1592	 xor	$carry,$carry
1593
1594	add	$A1[0],%rax		# t[5]
1595	adc	\$0,%rdx
1596	mov	%rax,8($tptr)		# t[5]
1597	mov	%rdx,16($tptr)		# t[6]
1598	mov	$carry,24($tptr)	# t[7]
1599
1600	 mov	-16($aptr,$i),%rax	# a[0]
1601	lea	48+8(%rsp),$tptr
1602	 xor	$A0[0],$A0[0]		# t[0]
1603	 mov	8($tptr),$A0[1]		# t[1]
1604
1605	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1606	shr	\$63,$A0[0]
1607	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1608	shr	\$63,$A0[1]
1609	or	$A0[0],$S[1]		# | t[2*i]>>63
1610	 mov	16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1611	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1612	mul	%rax			# a[i]*a[i]
1613	neg	$carry			# mov $carry,cf
1614	 mov	24($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1615	adc	%rax,$S[0]
1616	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
1617	mov	$S[0],($tptr)
1618	adc	%rdx,$S[1]
1619
1620	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
1621	 mov	$S[1],8($tptr)
1622	 sbb	$carry,$carry		# mov cf,$carry
1623	shr	\$63,$A0[0]
1624	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1625	shr	\$63,$A0[1]
1626	or	$A0[0],$S[3]		# | t[2*i]>>63
1627	 mov	32($tptr),$A0[0]	# t[2*i+2]	# prefetch
1628	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1629	mul	%rax			# a[i]*a[i]
1630	neg	$carry			# mov $carry,cf
1631	 mov	40($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1632	adc	%rax,$S[2]
1633	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
1634	mov	$S[2],16($tptr)
1635	adc	%rdx,$S[3]
1636	lea	16($i),$i
1637	mov	$S[3],24($tptr)
1638	sbb	$carry,$carry		# mov cf,$carry
1639	lea	64($tptr),$tptr
1640	jmp	.Lsqr4x_shift_n_add
1641
1642.align	32
1643.Lsqr4x_shift_n_add:
1644	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1645	shr	\$63,$A0[0]
1646	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1647	shr	\$63,$A0[1]
1648	or	$A0[0],$S[1]		# | t[2*i]>>63
1649	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1650	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1651	mul	%rax			# a[i]*a[i]
1652	neg	$carry			# mov $carry,cf
1653	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1654	adc	%rax,$S[0]
1655	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
1656	mov	$S[0],-32($tptr)
1657	adc	%rdx,$S[1]
1658
1659	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
1660	 mov	$S[1],-24($tptr)
1661	 sbb	$carry,$carry		# mov cf,$carry
1662	shr	\$63,$A0[0]
1663	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1664	shr	\$63,$A0[1]
1665	or	$A0[0],$S[3]		# | t[2*i]>>63
1666	 mov	0($tptr),$A0[0]		# t[2*i+2]	# prefetch
1667	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1668	mul	%rax			# a[i]*a[i]
1669	neg	$carry			# mov $carry,cf
1670	 mov	8($tptr),$A0[1]		# t[2*i+2+1]	# prefetch
1671	adc	%rax,$S[2]
1672	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
1673	mov	$S[2],-16($tptr)
1674	adc	%rdx,$S[3]
1675
1676	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1677	 mov	$S[3],-8($tptr)
1678	 sbb	$carry,$carry		# mov cf,$carry
1679	shr	\$63,$A0[0]
1680	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1681	shr	\$63,$A0[1]
1682	or	$A0[0],$S[1]		# | t[2*i]>>63
1683	 mov	16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1684	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1685	mul	%rax			# a[i]*a[i]
1686	neg	$carry			# mov $carry,cf
1687	 mov	24($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1688	adc	%rax,$S[0]
1689	 mov	8($aptr,$i),%rax	# a[i+1]	# prefetch
1690	mov	$S[0],0($tptr)
1691	adc	%rdx,$S[1]
1692
1693	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
1694	 mov	$S[1],8($tptr)
1695	 sbb	$carry,$carry		# mov cf,$carry
1696	shr	\$63,$A0[0]
1697	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1698	shr	\$63,$A0[1]
1699	or	$A0[0],$S[3]		# | t[2*i]>>63
1700	 mov	32($tptr),$A0[0]	# t[2*i+2]	# prefetch
1701	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1702	mul	%rax			# a[i]*a[i]
1703	neg	$carry			# mov $carry,cf
1704	 mov	40($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1705	adc	%rax,$S[2]
1706	 mov	16($aptr,$i),%rax	# a[i+1]	# prefetch
1707	mov	$S[2],16($tptr)
1708	adc	%rdx,$S[3]
1709	mov	$S[3],24($tptr)
1710	sbb	$carry,$carry		# mov cf,$carry
1711	lea	64($tptr),$tptr
1712	add	\$32,$i
1713	jnz	.Lsqr4x_shift_n_add
1714
1715	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1716	.byte	0x67
1717	shr	\$63,$A0[0]
1718	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1719	shr	\$63,$A0[1]
1720	or	$A0[0],$S[1]		# | t[2*i]>>63
1721	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1722	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1723	mul	%rax			# a[i]*a[i]
1724	neg	$carry			# mov $carry,cf
1725	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1726	adc	%rax,$S[0]
1727	 mov	-8($aptr),%rax		# a[i+1]	# prefetch
1728	mov	$S[0],-32($tptr)
1729	adc	%rdx,$S[1]
1730
1731	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1|shift
1732	 mov	$S[1],-24($tptr)
1733	 sbb	$carry,$carry		# mov cf,$carry
1734	shr	\$63,$A0[0]
1735	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1736	shr	\$63,$A0[1]
1737	or	$A0[0],$S[3]		# | t[2*i]>>63
1738	mul	%rax			# a[i]*a[i]
1739	neg	$carry			# mov $carry,cf
1740	adc	%rax,$S[2]
1741	adc	%rdx,$S[3]
1742	mov	$S[2],-16($tptr)
1743	mov	$S[3],-8($tptr)
1744___
1745}
1746######################################################################
1747# Montgomery reduction part, "word-by-word" algorithm.
1748#
1749# This new path is inspired by multiple submissions from Intel, by
1750# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1751# Vinodh Gopal...
1752{
1753my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1754
1755$code.=<<___;
1756	movq	%xmm2,$nptr
1757__bn_sqr8x_reduction:
1758	xor	%rax,%rax
1759	lea	($nptr,$num),%rcx	# end of n[]
1760	lea	48+8(%rsp,$num,2),%rdx	# end of t[] buffer
1761	mov	%rcx,0+8(%rsp)
1762	lea	48+8(%rsp,$num),$tptr	# end of initial t[] window
1763	mov	%rdx,8+8(%rsp)
1764	neg	$num
1765	jmp	.L8x_reduction_loop
1766
1767.align	32
1768.L8x_reduction_loop:
1769	lea	($tptr,$num),$tptr	# start of current t[] window
1770	.byte	0x66
1771	mov	8*0($tptr),$m0
1772	mov	8*1($tptr),%r9
1773	mov	8*2($tptr),%r10
1774	mov	8*3($tptr),%r11
1775	mov	8*4($tptr),%r12
1776	mov	8*5($tptr),%r13
1777	mov	8*6($tptr),%r14
1778	mov	8*7($tptr),%r15
1779	mov	%rax,(%rdx)		# store top-most carry bit
1780	lea	8*8($tptr),$tptr
1781
1782	.byte	0x67
1783	mov	$m0,%r8
1784	imulq	32+8(%rsp),$m0		# n0*a[0]
1785	mov	8*0($nptr),%rax		# n[0]
1786	mov	\$8,%ecx
1787	jmp	.L8x_reduce
1788
1789.align	32
1790.L8x_reduce:
1791	mulq	$m0
1792	 mov	8*1($nptr),%rax		# n[1]
1793	neg	%r8
1794	mov	%rdx,%r8
1795	adc	\$0,%r8
1796
1797	mulq	$m0
1798	add	%rax,%r9
1799	 mov	8*2($nptr),%rax
1800	adc	\$0,%rdx
1801	add	%r9,%r8
1802	 mov	$m0,48-8+8(%rsp,%rcx,8)	# put aside n0*a[i]
1803	mov	%rdx,%r9
1804	adc	\$0,%r9
1805
1806	mulq	$m0
1807	add	%rax,%r10
1808	 mov	8*3($nptr),%rax
1809	adc	\$0,%rdx
1810	add	%r10,%r9
1811	 mov	32+8(%rsp),$carry	# pull n0, borrow $carry
1812	mov	%rdx,%r10
1813	adc	\$0,%r10
1814
1815	mulq	$m0
1816	add	%rax,%r11
1817	 mov	8*4($nptr),%rax
1818	adc	\$0,%rdx
1819	 imulq	%r8,$carry		# modulo-scheduled
1820	add	%r11,%r10
1821	mov	%rdx,%r11
1822	adc	\$0,%r11
1823
1824	mulq	$m0
1825	add	%rax,%r12
1826	 mov	8*5($nptr),%rax
1827	adc	\$0,%rdx
1828	add	%r12,%r11
1829	mov	%rdx,%r12
1830	adc	\$0,%r12
1831
1832	mulq	$m0
1833	add	%rax,%r13
1834	 mov	8*6($nptr),%rax
1835	adc	\$0,%rdx
1836	add	%r13,%r12
1837	mov	%rdx,%r13
1838	adc	\$0,%r13
1839
1840	mulq	$m0
1841	add	%rax,%r14
1842	 mov	8*7($nptr),%rax
1843	adc	\$0,%rdx
1844	add	%r14,%r13
1845	mov	%rdx,%r14
1846	adc	\$0,%r14
1847
1848	mulq	$m0
1849	 mov	$carry,$m0		# n0*a[i]
1850	add	%rax,%r15
1851	 mov	8*0($nptr),%rax		# n[0]
1852	adc	\$0,%rdx
1853	add	%r15,%r14
1854	mov	%rdx,%r15
1855	adc	\$0,%r15
1856
1857	dec	%ecx
1858	jnz	.L8x_reduce
1859
1860	lea	8*8($nptr),$nptr
1861	xor	%rax,%rax
1862	mov	8+8(%rsp),%rdx		# pull end of t[]
1863	cmp	0+8(%rsp),$nptr		# end of n[]?
1864	jae	.L8x_no_tail
1865
1866	.byte	0x66
1867	add	8*0($tptr),%r8
1868	adc	8*1($tptr),%r9
1869	adc	8*2($tptr),%r10
1870	adc	8*3($tptr),%r11
1871	adc	8*4($tptr),%r12
1872	adc	8*5($tptr),%r13
1873	adc	8*6($tptr),%r14
1874	adc	8*7($tptr),%r15
1875	sbb	$carry,$carry		# top carry
1876
1877	mov	48+56+8(%rsp),$m0	# pull n0*a[0]
1878	mov	\$8,%ecx
1879	mov	8*0($nptr),%rax
1880	jmp	.L8x_tail
1881
1882.align	32
1883.L8x_tail:
1884	mulq	$m0
1885	add	%rax,%r8
1886	 mov	8*1($nptr),%rax
1887	 mov	%r8,($tptr)		# save result
1888	mov	%rdx,%r8
1889	adc	\$0,%r8
1890
1891	mulq	$m0
1892	add	%rax,%r9
1893	 mov	8*2($nptr),%rax
1894	adc	\$0,%rdx
1895	add	%r9,%r8
1896	 lea	8($tptr),$tptr		# $tptr++
1897	mov	%rdx,%r9
1898	adc	\$0,%r9
1899
1900	mulq	$m0
1901	add	%rax,%r10
1902	 mov	8*3($nptr),%rax
1903	adc	\$0,%rdx
1904	add	%r10,%r9
1905	mov	%rdx,%r10
1906	adc	\$0,%r10
1907
1908	mulq	$m0
1909	add	%rax,%r11
1910	 mov	8*4($nptr),%rax
1911	adc	\$0,%rdx
1912	add	%r11,%r10
1913	mov	%rdx,%r11
1914	adc	\$0,%r11
1915
1916	mulq	$m0
1917	add	%rax,%r12
1918	 mov	8*5($nptr),%rax
1919	adc	\$0,%rdx
1920	add	%r12,%r11
1921	mov	%rdx,%r12
1922	adc	\$0,%r12
1923
1924	mulq	$m0
1925	add	%rax,%r13
1926	 mov	8*6($nptr),%rax
1927	adc	\$0,%rdx
1928	add	%r13,%r12
1929	mov	%rdx,%r13
1930	adc	\$0,%r13
1931
1932	mulq	$m0
1933	add	%rax,%r14
1934	 mov	8*7($nptr),%rax
1935	adc	\$0,%rdx
1936	add	%r14,%r13
1937	mov	%rdx,%r14
1938	adc	\$0,%r14
1939
1940	mulq	$m0
1941	 mov	48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1942	add	%rax,%r15
1943	adc	\$0,%rdx
1944	add	%r15,%r14
1945	 mov	8*0($nptr),%rax		# pull n[0]
1946	mov	%rdx,%r15
1947	adc	\$0,%r15
1948
1949	dec	%ecx
1950	jnz	.L8x_tail
1951
1952	lea	8*8($nptr),$nptr
1953	mov	8+8(%rsp),%rdx		# pull end of t[]
1954	cmp	0+8(%rsp),$nptr		# end of n[]?
1955	jae	.L8x_tail_done		# break out of loop
1956
1957	 mov	48+56+8(%rsp),$m0	# pull n0*a[0]
1958	neg	$carry
1959	 mov	8*0($nptr),%rax		# pull n[0]
1960	adc	8*0($tptr),%r8
1961	adc	8*1($tptr),%r9
1962	adc	8*2($tptr),%r10
1963	adc	8*3($tptr),%r11
1964	adc	8*4($tptr),%r12
1965	adc	8*5($tptr),%r13
1966	adc	8*6($tptr),%r14
1967	adc	8*7($tptr),%r15
1968	sbb	$carry,$carry		# top carry
1969
1970	mov	\$8,%ecx
1971	jmp	.L8x_tail
1972
1973.align	32
1974.L8x_tail_done:
1975	xor	%rax,%rax
1976	add	(%rdx),%r8		# can this overflow?
1977	adc	\$0,%r9
1978	adc	\$0,%r10
1979	adc	\$0,%r11
1980	adc	\$0,%r12
1981	adc	\$0,%r13
1982	adc	\$0,%r14
1983	adc	\$0,%r15
1984	adc	\$0,%rax
1985
1986	neg	$carry
1987.L8x_no_tail:
1988	adc	8*0($tptr),%r8
1989	adc	8*1($tptr),%r9
1990	adc	8*2($tptr),%r10
1991	adc	8*3($tptr),%r11
1992	adc	8*4($tptr),%r12
1993	adc	8*5($tptr),%r13
1994	adc	8*6($tptr),%r14
1995	adc	8*7($tptr),%r15
1996	adc	\$0,%rax		# top-most carry
1997	 mov	-8($nptr),%rcx		# np[num-1]
1998	 xor	$carry,$carry
1999
2000	movq	%xmm2,$nptr		# restore $nptr
2001
2002	mov	%r8,8*0($tptr)		# store top 512 bits
2003	mov	%r9,8*1($tptr)
2004	 movq	%xmm3,$num		# $num is %r9, can't be moved upwards
2005	mov	%r10,8*2($tptr)
2006	mov	%r11,8*3($tptr)
2007	mov	%r12,8*4($tptr)
2008	mov	%r13,8*5($tptr)
2009	mov	%r14,8*6($tptr)
2010	mov	%r15,8*7($tptr)
2011	lea	8*8($tptr),$tptr
2012
2013	cmp	%rdx,$tptr		# end of t[]?
2014	jb	.L8x_reduction_loop
2015	ret
2016.cfi_endproc
2017.size	bn_sqr8x_internal,.-bn_sqr8x_internal
2018___
2019}
2020##############################################################
2021# Post-condition, 4x unrolled
2022#
2023{
2024my ($tptr,$nptr)=("%rbx","%rbp");
2025$code.=<<___;
2026.type	__bn_post4x_internal,\@abi-omnipotent
2027.align	32
2028__bn_post4x_internal:
2029.cfi_startproc
2030	mov	8*0($nptr),%r12
2031	lea	(%rdi,$num),$tptr	# %rdi was $tptr above
2032	mov	$num,%rcx
2033	movq	%xmm1,$rptr		# restore $rptr
2034	neg	%rax
2035	movq	%xmm1,$aptr		# prepare for back-to-back call
2036	sar	\$3+2,%rcx
2037	dec	%r12			# so that after 'not' we get -n[0]
2038	xor	%r10,%r10
2039	mov	8*1($nptr),%r13
2040	mov	8*2($nptr),%r14
2041	mov	8*3($nptr),%r15
2042	jmp	.Lsqr4x_sub_entry
2043
2044.align	16
2045.Lsqr4x_sub:
2046	mov	8*0($nptr),%r12
2047	mov	8*1($nptr),%r13
2048	mov	8*2($nptr),%r14
2049	mov	8*3($nptr),%r15
2050.Lsqr4x_sub_entry:
2051	lea	8*4($nptr),$nptr
2052	not	%r12
2053	not	%r13
2054	not	%r14
2055	not	%r15
2056	and	%rax,%r12
2057	and	%rax,%r13
2058	and	%rax,%r14
2059	and	%rax,%r15
2060
2061	neg	%r10			# mov %r10,%cf
2062	adc	8*0($tptr),%r12
2063	adc	8*1($tptr),%r13
2064	adc	8*2($tptr),%r14
2065	adc	8*3($tptr),%r15
2066	mov	%r12,8*0($rptr)
2067	lea	8*4($tptr),$tptr
2068	mov	%r13,8*1($rptr)
2069	sbb	%r10,%r10		# mov %cf,%r10
2070	mov	%r14,8*2($rptr)
2071	mov	%r15,8*3($rptr)
2072	lea	8*4($rptr),$rptr
2073
2074	inc	%rcx			# pass %cf
2075	jnz	.Lsqr4x_sub
2076
2077	mov	$num,%r10		# prepare for back-to-back call
2078	neg	$num			# restore $num
2079	ret
2080.cfi_endproc
2081.size	__bn_post4x_internal,.-__bn_post4x_internal
2082___
2083}
2084}}}
2085
2086if ($addx) {{{
2087my $bp="%rdx";	# restore original value
2088
2089$code.=<<___;
2090.globl	bn_mulx4x_mont_gather5
2091.type	bn_mulx4x_mont_gather5,\@function,6
2092.align	32
2093bn_mulx4x_mont_gather5:
2094.cfi_startproc
2095	_CET_ENDBR
2096	mov	%rsp,%rax
2097.cfi_def_cfa_register	%rax
2098	push	%rbx
2099.cfi_push	%rbx
2100	push	%rbp
2101.cfi_push	%rbp
2102	push	%r12
2103.cfi_push	%r12
2104	push	%r13
2105.cfi_push	%r13
2106	push	%r14
2107.cfi_push	%r14
2108	push	%r15
2109.cfi_push	%r15
2110.Lmulx4x_prologue:
2111
2112	# num is declared as an int, a 32-bit parameter, so the upper half is
2113	# undefined. It is important that this write to ${num}, which zeros the
2114	# upper half, predates the first access.
2115	shl	\$3,${num}d		# convert $num to bytes
2116	lea	($num,$num,2),%r10	# 3*$num in bytes
2117	neg	$num			# -$num
2118	mov	($n0),$n0		# *n0
2119
2120	##############################################################
2121	# Ensure that stack frame doesn't alias with $rptr+3*$num
2122	# modulo 4096, which covers ret[num], am[num] and n[num]
2123	# (see bn_exp.c). This is done to allow memory disambiguation
2124	# logic do its magic. [Extra [num] is allocated in order
2125	# to align with bn_power5's frame, which is cleansed after
2126	# completing exponentiation. Extra 256 bytes is for power mask
2127	# calculated from 7th argument, the index.]
2128	#
2129	lea	-320(%rsp,$num,2),%r11
2130	mov	%rsp,%rbp
2131	sub	$rp,%r11
2132	and	\$4095,%r11
2133	cmp	%r11,%r10
2134	jb	.Lmulx4xsp_alt
2135	sub	%r11,%rbp		# align with $aptr
2136	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2137	jmp	.Lmulx4xsp_done
2138
2139.Lmulx4xsp_alt:
2140	lea	4096-320(,$num,2),%r10
2141	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2142	sub	%r10,%r11
2143	mov	\$0,%r10
2144	cmovc	%r10,%r11
2145	sub	%r11,%rbp
2146.Lmulx4xsp_done:
2147	and	\$-64,%rbp		# ensure alignment
2148	mov	%rsp,%r11
2149	sub	%rbp,%r11
2150	and	\$-4096,%r11
2151	lea	(%rbp,%r11),%rsp
2152	mov	(%rsp),%r10
2153	cmp	%rbp,%rsp
2154	ja	.Lmulx4x_page_walk
2155	jmp	.Lmulx4x_page_walk_done
2156
2157.Lmulx4x_page_walk:
2158	lea	-4096(%rsp),%rsp
2159	mov	(%rsp),%r10
2160	cmp	%rbp,%rsp
2161	ja	.Lmulx4x_page_walk
2162.Lmulx4x_page_walk_done:
2163
2164	##############################################################
2165	# Stack layout
2166	# +0	-num
2167	# +8	off-loaded &b[i]
2168	# +16	end of b[num]
2169	# +24	inner counter
2170	# +32	saved n0
2171	# +40	saved %rsp
2172	# +48
2173	# +56	saved rp
2174	# +64	tmp[num+1]
2175	#
2176	mov	$n0, 32(%rsp)		# save *n0
2177	mov	%rax,40(%rsp)		# save original %rsp
2178.cfi_cfa_expression	%rsp+40,deref,+8
2179.Lmulx4x_body:
2180	call	mulx4x_internal
2181
2182	mov	40(%rsp),%rsi		# restore %rsp
2183.cfi_def_cfa	%rsi,8
2184	mov	\$1,%rax
2185
2186	mov	-48(%rsi),%r15
2187.cfi_restore	%r15
2188	mov	-40(%rsi),%r14
2189.cfi_restore	%r14
2190	mov	-32(%rsi),%r13
2191.cfi_restore	%r13
2192	mov	-24(%rsi),%r12
2193.cfi_restore	%r12
2194	mov	-16(%rsi),%rbp
2195.cfi_restore	%rbp
2196	mov	-8(%rsi),%rbx
2197.cfi_restore	%rbx
2198	lea	(%rsi),%rsp
2199.cfi_def_cfa_register	%rsp
2200.Lmulx4x_epilogue:
2201	ret
2202.cfi_endproc
2203.size	bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2204
2205.type	mulx4x_internal,\@abi-omnipotent
2206.align	32
2207mulx4x_internal:
2208.cfi_startproc
2209	mov	$num,8(%rsp)		# save -$num (it was in bytes)
2210	mov	$num,%r10
2211	neg	$num			# restore $num
2212	shl	\$5,$num
2213	neg	%r10			# restore $num
2214	lea	128($bp,$num),%r13	# end of powers table (+size optimization)
2215	shr	\$5+5,$num
2216	movd	`($win64?56:8)`(%rax),%xmm5	# load 7th argument
2217	sub	\$1,$num
2218	lea	.Linc(%rip),%rax
2219	mov	%r13,16+8(%rsp)		# end of b[num]
2220	mov	$num,24+8(%rsp)		# inner counter
2221	mov	$rp, 56+8(%rsp)		# save $rp
2222___
2223my ($aptr, $bptr, $nptr, $tptr, $mi,  $bi,  $zero, $num)=
2224   ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2225my $rptr=$bptr;
2226my $STRIDE=2**5*8;		# 5 is "window size"
2227$code.=<<___;
2228	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
2229	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
2230	lea	88-112(%rsp,%r10),%r10	# place the mask after tp[num+1] (+ICache optimization)
2231	lea	128($bp),$bptr		# size optimization
2232
2233	pshufd	\$0,%xmm5,%xmm5		# broadcast index
2234	movdqa	%xmm1,%xmm4
2235	.byte	0x67
2236	movdqa	%xmm1,%xmm2
2237___
2238########################################################################
2239# Calculate masks by comparing 0..31 to $idx and save result to stack.
2240#
2241# We compute sixteen 16-byte masks and store them on the stack. Mask i is stored
2242# in `16*i - 128`(%rax) and contains the comparisons for idx == 2*i and
2243# idx == 2*i + 1 in its lower and upper halves, respectively. Mask calculations
2244# are scheduled in groups of four.
2245$code.=<<___;
2246	.byte	0x67
2247	paddd	%xmm0,%xmm1
2248	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
2249	movdqa	%xmm4,%xmm3
2250___
2251for($i=0;$i<$STRIDE/16-4;$i+=4) {
2252$code.=<<___;
2253	paddd	%xmm1,%xmm2
2254	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
2255	movdqa	%xmm0,`16*($i+0)+112`(%r10)
2256	movdqa	%xmm4,%xmm0
2257
2258	paddd	%xmm2,%xmm3
2259	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
2260	movdqa	%xmm1,`16*($i+1)+112`(%r10)
2261	movdqa	%xmm4,%xmm1
2262
2263	paddd	%xmm3,%xmm0
2264	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
2265	movdqa	%xmm2,`16*($i+2)+112`(%r10)
2266	movdqa	%xmm4,%xmm2
2267
2268	paddd	%xmm0,%xmm1
2269	pcmpeqd	%xmm5,%xmm0
2270	movdqa	%xmm3,`16*($i+3)+112`(%r10)
2271	movdqa	%xmm4,%xmm3
2272___
2273}
2274$code.=<<___;				# last iteration can be optimized
2275	.byte	0x67
2276	paddd	%xmm1,%xmm2
2277	pcmpeqd	%xmm5,%xmm1
2278	movdqa	%xmm0,`16*($i+0)+112`(%r10)
2279
2280	paddd	%xmm2,%xmm3
2281	pcmpeqd	%xmm5,%xmm2
2282	movdqa	%xmm1,`16*($i+1)+112`(%r10)
2283
2284	pcmpeqd	%xmm5,%xmm3
2285	movdqa	%xmm2,`16*($i+2)+112`(%r10)
2286
2287	pand	`16*($i+0)-128`($bptr),%xmm0	# while it's still in register
2288	pand	`16*($i+1)-128`($bptr),%xmm1
2289	pand	`16*($i+2)-128`($bptr),%xmm2
2290	movdqa	%xmm3,`16*($i+3)+112`(%r10)
2291	pand	`16*($i+3)-128`($bptr),%xmm3
2292	por	%xmm2,%xmm0
2293	por	%xmm3,%xmm1
2294___
2295for($i=0;$i<$STRIDE/16-4;$i+=4) {
2296$code.=<<___;
2297	movdqa	`16*($i+0)-128`($bptr),%xmm4
2298	movdqa	`16*($i+1)-128`($bptr),%xmm5
2299	movdqa	`16*($i+2)-128`($bptr),%xmm2
2300	pand	`16*($i+0)+112`(%r10),%xmm4
2301	movdqa	`16*($i+3)-128`($bptr),%xmm3
2302	pand	`16*($i+1)+112`(%r10),%xmm5
2303	por	%xmm4,%xmm0
2304	pand	`16*($i+2)+112`(%r10),%xmm2
2305	por	%xmm5,%xmm1
2306	pand	`16*($i+3)+112`(%r10),%xmm3
2307	por	%xmm2,%xmm0
2308	por	%xmm3,%xmm1
2309___
2310}
2311$code.=<<___;
2312	pxor	%xmm1,%xmm0
2313	# Combine the upper and lower halves of %xmm0.
2314	pshufd	\$0x4e,%xmm0,%xmm1	# Swap upper and lower halves.
2315	por	%xmm1,%xmm0
2316	lea	$STRIDE($bptr),$bptr
2317	movq	%xmm0,%rdx		# bp[0]
2318	lea	64+8*4+8(%rsp),$tptr
2319
2320	mov	%rdx,$bi
2321	mulx	0*8($aptr),$mi,%rax	# a[0]*b[0]
2322	mulx	1*8($aptr),%r11,%r12	# a[1]*b[0]
2323	add	%rax,%r11
2324	mulx	2*8($aptr),%rax,%r13	# ...
2325	adc	%rax,%r12
2326	adc	\$0,%r13
2327	mulx	3*8($aptr),%rax,%r14
2328
2329	mov	$mi,%r15
2330	imulq	32+8(%rsp),$mi		# "t[0]"*n0
2331	xor	$zero,$zero		# cf=0, of=0
2332	mov	$mi,%rdx
2333
2334	mov	$bptr,8+8(%rsp)		# off-load &b[i]
2335
2336	lea	4*8($aptr),$aptr
2337	adcx	%rax,%r13
2338	adcx	$zero,%r14		# cf=0
2339
2340	mulx	0*8($nptr),%rax,%r10
2341	adcx	%rax,%r15		# discarded
2342	adox	%r11,%r10
2343	mulx	1*8($nptr),%rax,%r11
2344	adcx	%rax,%r10
2345	adox	%r12,%r11
2346	mulx	2*8($nptr),%rax,%r12
2347	mov	24+8(%rsp),$bptr	# counter value
2348	mov	%r10,-8*4($tptr)
2349	adcx	%rax,%r11
2350	adox	%r13,%r12
2351	mulx	3*8($nptr),%rax,%r15
2352	 mov	$bi,%rdx
2353	mov	%r11,-8*3($tptr)
2354	adcx	%rax,%r12
2355	adox	$zero,%r15		# of=0
2356	lea	4*8($nptr),$nptr
2357	mov	%r12,-8*2($tptr)
2358	jmp	.Lmulx4x_1st
2359
2360.align	32
2361.Lmulx4x_1st:
2362	adcx	$zero,%r15		# cf=0, modulo-scheduled
2363	mulx	0*8($aptr),%r10,%rax	# a[4]*b[0]
2364	adcx	%r14,%r10
2365	mulx	1*8($aptr),%r11,%r14	# a[5]*b[0]
2366	adcx	%rax,%r11
2367	mulx	2*8($aptr),%r12,%rax	# ...
2368	adcx	%r14,%r12
2369	mulx	3*8($aptr),%r13,%r14
2370	 .byte	0x67,0x67
2371	 mov	$mi,%rdx
2372	adcx	%rax,%r13
2373	adcx	$zero,%r14		# cf=0
2374	lea	4*8($aptr),$aptr
2375	lea	4*8($tptr),$tptr
2376
2377	adox	%r15,%r10
2378	mulx	0*8($nptr),%rax,%r15
2379	adcx	%rax,%r10
2380	adox	%r15,%r11
2381	mulx	1*8($nptr),%rax,%r15
2382	adcx	%rax,%r11
2383	adox	%r15,%r12
2384	mulx	2*8($nptr),%rax,%r15
2385	mov	%r10,-5*8($tptr)
2386	adcx	%rax,%r12
2387	mov	%r11,-4*8($tptr)
2388	adox	%r15,%r13
2389	mulx	3*8($nptr),%rax,%r15
2390	 mov	$bi,%rdx
2391	mov	%r12,-3*8($tptr)
2392	adcx	%rax,%r13
2393	adox	$zero,%r15
2394	lea	4*8($nptr),$nptr
2395	mov	%r13,-2*8($tptr)
2396
2397	dec	$bptr			# of=0, pass cf
2398	jnz	.Lmulx4x_1st
2399
2400	mov	8(%rsp),$num		# load -num
2401	adc	$zero,%r15		# modulo-scheduled
2402	lea	($aptr,$num),$aptr	# rewind $aptr
2403	add	%r15,%r14
2404	mov	8+8(%rsp),$bptr		# re-load &b[i]
2405	adc	$zero,$zero		# top-most carry
2406	mov	%r14,-1*8($tptr)
2407	jmp	.Lmulx4x_outer
2408
2409.align	32
2410.Lmulx4x_outer:
2411	lea	16-256($tptr),%r10	# where 256-byte mask is (+density control)
2412	pxor	%xmm4,%xmm4
2413	.byte	0x67,0x67
2414	pxor	%xmm5,%xmm5
2415___
2416for($i=0;$i<$STRIDE/16;$i+=4) {
2417$code.=<<___;
2418	movdqa	`16*($i+0)-128`($bptr),%xmm0
2419	movdqa	`16*($i+1)-128`($bptr),%xmm1
2420	movdqa	`16*($i+2)-128`($bptr),%xmm2
2421	pand	`16*($i+0)+256`(%r10),%xmm0
2422	movdqa	`16*($i+3)-128`($bptr),%xmm3
2423	pand	`16*($i+1)+256`(%r10),%xmm1
2424	por	%xmm0,%xmm4
2425	pand	`16*($i+2)+256`(%r10),%xmm2
2426	por	%xmm1,%xmm5
2427	pand	`16*($i+3)+256`(%r10),%xmm3
2428	por	%xmm2,%xmm4
2429	por	%xmm3,%xmm5
2430___
2431}
2432$code.=<<___;
2433	por	%xmm5,%xmm4
2434	# Combine the upper and lower halves of %xmm4 as %xmm0.
2435	pshufd	\$0x4e,%xmm4,%xmm0	# Swap upper and lower halves.
2436	por	%xmm4,%xmm0
2437	lea	$STRIDE($bptr),$bptr
2438	movq	%xmm0,%rdx		# m0=bp[i]
2439
2440	mov	$zero,($tptr)		# save top-most carry
2441	lea	4*8($tptr,$num),$tptr	# rewind $tptr
2442	mulx	0*8($aptr),$mi,%r11	# a[0]*b[i]
2443	xor	$zero,$zero		# cf=0, of=0
2444	mov	%rdx,$bi
2445	mulx	1*8($aptr),%r14,%r12	# a[1]*b[i]
2446	adox	-4*8($tptr),$mi		# +t[0]
2447	adcx	%r14,%r11
2448	mulx	2*8($aptr),%r15,%r13	# ...
2449	adox	-3*8($tptr),%r11
2450	adcx	%r15,%r12
2451	mulx	3*8($aptr),%rdx,%r14
2452	adox	-2*8($tptr),%r12
2453	adcx	%rdx,%r13
2454	lea	($nptr,$num),$nptr	# rewind $nptr
2455	lea	4*8($aptr),$aptr
2456	adox	-1*8($tptr),%r13
2457	adcx	$zero,%r14
2458	adox	$zero,%r14
2459
2460	mov	$mi,%r15
2461	imulq	32+8(%rsp),$mi		# "t[0]"*n0
2462
2463	mov	$mi,%rdx
2464	xor	$zero,$zero		# cf=0, of=0
2465	mov	$bptr,8+8(%rsp)		# off-load &b[i]
2466
2467	mulx	0*8($nptr),%rax,%r10
2468	adcx	%rax,%r15		# discarded
2469	adox	%r11,%r10
2470	mulx	1*8($nptr),%rax,%r11
2471	adcx	%rax,%r10
2472	adox	%r12,%r11
2473	mulx	2*8($nptr),%rax,%r12
2474	adcx	%rax,%r11
2475	adox	%r13,%r12
2476	mulx	3*8($nptr),%rax,%r15
2477	 mov	$bi,%rdx
2478	mov	24+8(%rsp),$bptr	# counter value
2479	mov	%r10,-8*4($tptr)
2480	adcx	%rax,%r12
2481	mov	%r11,-8*3($tptr)
2482	adox	$zero,%r15		# of=0
2483	mov	%r12,-8*2($tptr)
2484	lea	4*8($nptr),$nptr
2485	jmp	.Lmulx4x_inner
2486
2487.align	32
2488.Lmulx4x_inner:
2489	mulx	0*8($aptr),%r10,%rax	# a[4]*b[i]
2490	adcx	$zero,%r15		# cf=0, modulo-scheduled
2491	adox	%r14,%r10
2492	mulx	1*8($aptr),%r11,%r14	# a[5]*b[i]
2493	adcx	0*8($tptr),%r10
2494	adox	%rax,%r11
2495	mulx	2*8($aptr),%r12,%rax	# ...
2496	adcx	1*8($tptr),%r11
2497	adox	%r14,%r12
2498	mulx	3*8($aptr),%r13,%r14
2499	 mov	$mi,%rdx
2500	adcx	2*8($tptr),%r12
2501	adox	%rax,%r13
2502	adcx	3*8($tptr),%r13
2503	adox	$zero,%r14		# of=0
2504	lea	4*8($aptr),$aptr
2505	lea	4*8($tptr),$tptr
2506	adcx	$zero,%r14		# cf=0
2507
2508	adox	%r15,%r10
2509	mulx	0*8($nptr),%rax,%r15
2510	adcx	%rax,%r10
2511	adox	%r15,%r11
2512	mulx	1*8($nptr),%rax,%r15
2513	adcx	%rax,%r11
2514	adox	%r15,%r12
2515	mulx	2*8($nptr),%rax,%r15
2516	mov	%r10,-5*8($tptr)
2517	adcx	%rax,%r12
2518	adox	%r15,%r13
2519	mov	%r11,-4*8($tptr)
2520	mulx	3*8($nptr),%rax,%r15
2521	 mov	$bi,%rdx
2522	lea	4*8($nptr),$nptr
2523	mov	%r12,-3*8($tptr)
2524	adcx	%rax,%r13
2525	adox	$zero,%r15
2526	mov	%r13,-2*8($tptr)
2527
2528	dec	$bptr			# of=0, pass cf
2529	jnz	.Lmulx4x_inner
2530
2531	mov	0+8(%rsp),$num		# load -num
2532	adc	$zero,%r15		# modulo-scheduled
2533	sub	0*8($tptr),$bptr	# pull top-most carry to %cf
2534	mov	8+8(%rsp),$bptr		# re-load &b[i]
2535	mov	16+8(%rsp),%r10
2536	adc	%r15,%r14
2537	lea	($aptr,$num),$aptr	# rewind $aptr
2538	adc	$zero,$zero		# top-most carry
2539	mov	%r14,-1*8($tptr)
2540
2541	cmp	%r10,$bptr
2542	jb	.Lmulx4x_outer
2543
2544	mov	-8($nptr),%r10
2545	mov	$zero,%r8
2546	mov	($nptr,$num),%r12
2547	lea	($nptr,$num),%rbp	# rewind $nptr
2548	mov	$num,%rcx
2549	lea	($tptr,$num),%rdi	# rewind $tptr
2550	xor	%eax,%eax
2551	xor	%r15,%r15
2552	sub	%r14,%r10		# compare top-most words
2553	adc	%r15,%r15
2554	or	%r15,%r8
2555	sar	\$3+2,%rcx
2556	sub	%r8,%rax		# %rax=-%r8
2557	mov	56+8(%rsp),%rdx		# restore rp
2558	dec	%r12			# so that after 'not' we get -n[0]
2559	mov	8*1(%rbp),%r13
2560	xor	%r8,%r8
2561	mov	8*2(%rbp),%r14
2562	mov	8*3(%rbp),%r15
2563	jmp	.Lsqrx4x_sub_entry	# common post-condition
2564.cfi_endproc
2565.size	mulx4x_internal,.-mulx4x_internal
2566___
2567}{
2568######################################################################
2569# void bn_powerx5(
2570my $rptr="%rdi";	# BN_ULONG *rptr,
2571my $aptr="%rsi";	# const BN_ULONG *aptr,
2572my $bptr="%rdx";	# const BN_ULONG *table,
2573my $nptr="%rcx";	# const BN_ULONG *nptr,
2574my $n0  ="%r8";		# const BN_ULONG *n0);
2575my $num ="%r9";		# int num, has to be divisible by 8
2576			# int pwr);
2577
2578my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2579my @A0=("%r10","%r11");
2580my @A1=("%r12","%r13");
2581my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2582
2583$code.=<<___;
2584.globl	bn_powerx5
2585.type	bn_powerx5,\@function,6
2586.align	32
2587bn_powerx5:
2588.cfi_startproc
2589	_CET_ENDBR
2590	mov	%rsp,%rax
2591.cfi_def_cfa_register	%rax
2592	push	%rbx
2593.cfi_push	%rbx
2594	push	%rbp
2595.cfi_push	%rbp
2596	push	%r12
2597.cfi_push	%r12
2598	push	%r13
2599.cfi_push	%r13
2600	push	%r14
2601.cfi_push	%r14
2602	push	%r15
2603.cfi_push	%r15
2604.Lpowerx5_prologue:
2605
2606	# num is declared as an int, a 32-bit parameter, so the upper half is
2607	# undefined. It is important that this write to ${num}, which zeros the
2608	# upper half, predates the first access.
2609	shl	\$3,${num}d		# convert $num to bytes
2610	lea	($num,$num,2),%r10	# 3*$num in bytes
2611	neg	$num
2612	mov	($n0),$n0		# *n0
2613
2614	##############################################################
2615	# Ensure that stack frame doesn't alias with $rptr+3*$num
2616	# modulo 4096, which covers ret[num], am[num] and n[num]
2617	# (see bn_exp.c). This is done to allow memory disambiguation
2618	# logic do its magic. [Extra 256 bytes is for power mask
2619	# calculated from 7th argument, the index.]
2620	#
2621	lea	-320(%rsp,$num,2),%r11
2622	mov	%rsp,%rbp
2623	sub	$rptr,%r11
2624	and	\$4095,%r11
2625	cmp	%r11,%r10
2626	jb	.Lpwrx_sp_alt
2627	sub	%r11,%rbp		# align with $aptr
2628	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2629	jmp	.Lpwrx_sp_done
2630
2631.align	32
2632.Lpwrx_sp_alt:
2633	lea	4096-320(,$num,2),%r10
2634	lea	-320(%rbp,$num,2),%rbp	# alloca(frame+2*$num*8+256)
2635	sub	%r10,%r11
2636	mov	\$0,%r10
2637	cmovc	%r10,%r11
2638	sub	%r11,%rbp
2639.Lpwrx_sp_done:
2640	and	\$-64,%rbp
2641	mov	%rsp,%r11
2642	sub	%rbp,%r11
2643	and	\$-4096,%r11
2644	lea	(%rbp,%r11),%rsp
2645	mov	(%rsp),%r10
2646	cmp	%rbp,%rsp
2647	ja	.Lpwrx_page_walk
2648	jmp	.Lpwrx_page_walk_done
2649
2650.Lpwrx_page_walk:
2651	lea	-4096(%rsp),%rsp
2652	mov	(%rsp),%r10
2653	cmp	%rbp,%rsp
2654	ja	.Lpwrx_page_walk
2655.Lpwrx_page_walk_done:
2656
2657	mov	$num,%r10
2658	neg	$num
2659
2660	##############################################################
2661	# Stack layout
2662	#
2663	# +0	saved $num, used in reduction section
2664	# +8	&t[2*$num], used in reduction section
2665	# +16	intermediate carry bit
2666	# +24	top-most carry bit, used in reduction section
2667	# +32	saved *n0
2668	# +40	saved %rsp
2669	# +48	t[2*$num]
2670	#
2671	pxor	%xmm0,%xmm0
2672	movq	$rptr,%xmm1		# save $rptr
2673	movq	$nptr,%xmm2		# save $nptr
2674	movq	%r10, %xmm3		# -$num
2675	movq	$bptr,%xmm4
2676	mov	$n0,  32(%rsp)
2677	mov	%rax, 40(%rsp)		# save original %rsp
2678.cfi_cfa_expression	%rsp+40,deref,+8
2679.Lpowerx5_body:
2680
2681	call	__bn_sqrx8x_internal
2682	call	__bn_postx4x_internal
2683	call	__bn_sqrx8x_internal
2684	call	__bn_postx4x_internal
2685	call	__bn_sqrx8x_internal
2686	call	__bn_postx4x_internal
2687	call	__bn_sqrx8x_internal
2688	call	__bn_postx4x_internal
2689	call	__bn_sqrx8x_internal
2690	call	__bn_postx4x_internal
2691
2692	mov	%r10,$num		# -num
2693	mov	$aptr,$rptr
2694	movq	%xmm2,$nptr
2695	movq	%xmm4,$bptr
2696	mov	40(%rsp),%rax
2697
2698	call	mulx4x_internal
2699
2700	mov	40(%rsp),%rsi		# restore %rsp
2701.cfi_def_cfa	%rsi,8
2702	mov	\$1,%rax
2703
2704	mov	-48(%rsi),%r15
2705.cfi_restore	%r15
2706	mov	-40(%rsi),%r14
2707.cfi_restore	%r14
2708	mov	-32(%rsi),%r13
2709.cfi_restore	%r13
2710	mov	-24(%rsi),%r12
2711.cfi_restore	%r12
2712	mov	-16(%rsi),%rbp
2713.cfi_restore	%rbp
2714	mov	-8(%rsi),%rbx
2715.cfi_restore	%rbx
2716	lea	(%rsi),%rsp
2717.cfi_def_cfa_register	%rsp
2718.Lpowerx5_epilogue:
2719	ret
2720.cfi_endproc
2721.size	bn_powerx5,.-bn_powerx5
2722
2723.globl	bn_sqrx8x_internal
2724.hidden	bn_sqrx8x_internal
2725.type	bn_sqrx8x_internal,\@abi-omnipotent
2726.align	32
2727bn_sqrx8x_internal:
2728__bn_sqrx8x_internal:
2729.cfi_startproc
2730	_CET_ENDBR
2731	##################################################################
2732	# Squaring part:
2733	#
2734	# a) multiply-n-add everything but a[i]*a[i];
2735	# b) shift result of a) by 1 to the left and accumulate
2736	#    a[i]*a[i] products;
2737	#
2738	##################################################################
2739	# a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2740	#                                                     a[1]a[0]
2741	#                                                 a[2]a[0]
2742	#                                             a[3]a[0]
2743	#                                             a[2]a[1]
2744	#                                         a[3]a[1]
2745	#                                     a[3]a[2]
2746	#
2747	#                                         a[4]a[0]
2748	#                                     a[5]a[0]
2749	#                                 a[6]a[0]
2750	#                             a[7]a[0]
2751	#                                     a[4]a[1]
2752	#                                 a[5]a[1]
2753	#                             a[6]a[1]
2754	#                         a[7]a[1]
2755	#                                 a[4]a[2]
2756	#                             a[5]a[2]
2757	#                         a[6]a[2]
2758	#                     a[7]a[2]
2759	#                             a[4]a[3]
2760	#                         a[5]a[3]
2761	#                     a[6]a[3]
2762	#                 a[7]a[3]
2763	#
2764	#                     a[5]a[4]
2765	#                 a[6]a[4]
2766	#             a[7]a[4]
2767	#             a[6]a[5]
2768	#         a[7]a[5]
2769	#     a[7]a[6]
2770	# a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2771___
2772{
2773my ($zero,$carry)=("%rbp","%rcx");
2774my $aaptr=$zero;
2775$code.=<<___;
2776	lea	48+8(%rsp),$tptr
2777	lea	($aptr,$num),$aaptr
2778	mov	$num,0+8(%rsp)			# save $num
2779	mov	$aaptr,8+8(%rsp)		# save end of $aptr
2780	jmp	.Lsqr8x_zero_start
2781
2782.align	32
2783.byte	0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2784.Lsqrx8x_zero:
2785	.byte	0x3e
2786	movdqa	%xmm0,0*8($tptr)
2787	movdqa	%xmm0,2*8($tptr)
2788	movdqa	%xmm0,4*8($tptr)
2789	movdqa	%xmm0,6*8($tptr)
2790.Lsqr8x_zero_start:			# aligned at 32
2791	movdqa	%xmm0,8*8($tptr)
2792	movdqa	%xmm0,10*8($tptr)
2793	movdqa	%xmm0,12*8($tptr)
2794	movdqa	%xmm0,14*8($tptr)
2795	lea	16*8($tptr),$tptr
2796	sub	\$64,$num
2797	jnz	.Lsqrx8x_zero
2798
2799	mov	0*8($aptr),%rdx		# a[0], modulo-scheduled
2800	#xor	%r9,%r9			# t[1], ex-$num, zero already
2801	xor	%r10,%r10
2802	xor	%r11,%r11
2803	xor	%r12,%r12
2804	xor	%r13,%r13
2805	xor	%r14,%r14
2806	xor	%r15,%r15
2807	lea	48+8(%rsp),$tptr
2808	xor	$zero,$zero		# cf=0, cf=0
2809	jmp	.Lsqrx8x_outer_loop
2810
2811.align	32
2812.Lsqrx8x_outer_loop:
2813	mulx	1*8($aptr),%r8,%rax	# a[1]*a[0]
2814	adcx	%r9,%r8			# a[1]*a[0]+=t[1]
2815	adox	%rax,%r10
2816	mulx	2*8($aptr),%r9,%rax	# a[2]*a[0]
2817	adcx	%r10,%r9
2818	adox	%rax,%r11
2819	.byte	0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00	# mulx	3*8($aptr),%r10,%rax	# ...
2820	adcx	%r11,%r10
2821	adox	%rax,%r12
2822	.byte	0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00	# mulx	4*8($aptr),%r11,%rax
2823	adcx	%r12,%r11
2824	adox	%rax,%r13
2825	mulx	5*8($aptr),%r12,%rax
2826	adcx	%r13,%r12
2827	adox	%rax,%r14
2828	mulx	6*8($aptr),%r13,%rax
2829	adcx	%r14,%r13
2830	adox	%r15,%rax
2831	mulx	7*8($aptr),%r14,%r15
2832	 mov	1*8($aptr),%rdx		# a[1]
2833	adcx	%rax,%r14
2834	adox	$zero,%r15
2835	adc	8*8($tptr),%r15
2836	mov	%r8,1*8($tptr)		# t[1]
2837	mov	%r9,2*8($tptr)		# t[2]
2838	sbb	$carry,$carry		# mov %cf,$carry
2839	xor	$zero,$zero		# cf=0, of=0
2840
2841
2842	mulx	2*8($aptr),%r8,%rbx	# a[2]*a[1]
2843	mulx	3*8($aptr),%r9,%rax	# a[3]*a[1]
2844	adcx	%r10,%r8
2845	adox	%rbx,%r9
2846	mulx	4*8($aptr),%r10,%rbx	# ...
2847	adcx	%r11,%r9
2848	adox	%rax,%r10
2849	.byte	0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00	# mulx	5*8($aptr),%r11,%rax
2850	adcx	%r12,%r10
2851	adox	%rbx,%r11
2852	.byte	0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00	# mulx	6*8($aptr),%r12,%rbx
2853	adcx	%r13,%r11
2854	adox	%r14,%r12
2855	.byte	0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00	# mulx	7*8($aptr),%r13,%r14
2856	 mov	2*8($aptr),%rdx		# a[2]
2857	adcx	%rax,%r12
2858	adox	%rbx,%r13
2859	adcx	%r15,%r13
2860	adox	$zero,%r14		# of=0
2861	adcx	$zero,%r14		# cf=0
2862
2863	mov	%r8,3*8($tptr)		# t[3]
2864	mov	%r9,4*8($tptr)		# t[4]
2865
2866	mulx	3*8($aptr),%r8,%rbx	# a[3]*a[2]
2867	mulx	4*8($aptr),%r9,%rax	# a[4]*a[2]
2868	adcx	%r10,%r8
2869	adox	%rbx,%r9
2870	mulx	5*8($aptr),%r10,%rbx	# ...
2871	adcx	%r11,%r9
2872	adox	%rax,%r10
2873	.byte	0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00	# mulx	6*8($aptr),%r11,%rax
2874	adcx	%r12,%r10
2875	adox	%r13,%r11
2876	.byte	0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00	# mulx	7*8($aptr),%r12,%r13
2877	.byte	0x3e
2878	 mov	3*8($aptr),%rdx		# a[3]
2879	adcx	%rbx,%r11
2880	adox	%rax,%r12
2881	adcx	%r14,%r12
2882	mov	%r8,5*8($tptr)		# t[5]
2883	mov	%r9,6*8($tptr)		# t[6]
2884	 mulx	4*8($aptr),%r8,%rax	# a[4]*a[3]
2885	adox	$zero,%r13		# of=0
2886	adcx	$zero,%r13		# cf=0
2887
2888	mulx	5*8($aptr),%r9,%rbx	# a[5]*a[3]
2889	adcx	%r10,%r8
2890	adox	%rax,%r9
2891	mulx	6*8($aptr),%r10,%rax	# ...
2892	adcx	%r11,%r9
2893	adox	%r12,%r10
2894	mulx	7*8($aptr),%r11,%r12
2895	 mov	4*8($aptr),%rdx		# a[4]
2896	 mov	5*8($aptr),%r14		# a[5]
2897	adcx	%rbx,%r10
2898	adox	%rax,%r11
2899	 mov	6*8($aptr),%r15		# a[6]
2900	adcx	%r13,%r11
2901	adox	$zero,%r12		# of=0
2902	adcx	$zero,%r12		# cf=0
2903
2904	mov	%r8,7*8($tptr)		# t[7]
2905	mov	%r9,8*8($tptr)		# t[8]
2906
2907	mulx	%r14,%r9,%rax		# a[5]*a[4]
2908	 mov	7*8($aptr),%r8		# a[7]
2909	adcx	%r10,%r9
2910	mulx	%r15,%r10,%rbx		# a[6]*a[4]
2911	adox	%rax,%r10
2912	adcx	%r11,%r10
2913	mulx	%r8,%r11,%rax		# a[7]*a[4]
2914	 mov	%r14,%rdx		# a[5]
2915	adox	%rbx,%r11
2916	adcx	%r12,%r11
2917	#adox	$zero,%rax		# of=0
2918	adcx	$zero,%rax		# cf=0
2919
2920	mulx	%r15,%r14,%rbx		# a[6]*a[5]
2921	mulx	%r8,%r12,%r13		# a[7]*a[5]
2922	 mov	%r15,%rdx		# a[6]
2923	 lea	8*8($aptr),$aptr
2924	adcx	%r14,%r11
2925	adox	%rbx,%r12
2926	adcx	%rax,%r12
2927	adox	$zero,%r13
2928
2929	.byte	0x67,0x67
2930	mulx	%r8,%r8,%r14		# a[7]*a[6]
2931	adcx	%r8,%r13
2932	adcx	$zero,%r14
2933
2934	cmp	8+8(%rsp),$aptr
2935	je	.Lsqrx8x_outer_break
2936
2937	neg	$carry			# mov $carry,%cf
2938	mov	\$-8,%rcx
2939	mov	$zero,%r15
2940	mov	8*8($tptr),%r8
2941	adcx	9*8($tptr),%r9		# +=t[9]
2942	adcx	10*8($tptr),%r10	# ...
2943	adcx	11*8($tptr),%r11
2944	adc	12*8($tptr),%r12
2945	adc	13*8($tptr),%r13
2946	adc	14*8($tptr),%r14
2947	adc	15*8($tptr),%r15
2948	lea	($aptr),$aaptr
2949	lea	2*64($tptr),$tptr
2950	sbb	%rax,%rax		# mov %cf,$carry
2951
2952	mov	-64($aptr),%rdx		# a[0]
2953	mov	%rax,16+8(%rsp)		# offload $carry
2954	mov	$tptr,24+8(%rsp)
2955
2956	#lea	8*8($tptr),$tptr	# see 2*8*8($tptr) above
2957	xor	%eax,%eax		# cf=0, of=0
2958	jmp	.Lsqrx8x_loop
2959
2960.align	32
2961.Lsqrx8x_loop:
2962	mov	%r8,%rbx
2963	mulx	0*8($aaptr),%rax,%r8	# a[8]*a[i]
2964	adcx	%rax,%rbx		# +=t[8]
2965	adox	%r9,%r8
2966
2967	mulx	1*8($aaptr),%rax,%r9	# ...
2968	adcx	%rax,%r8
2969	adox	%r10,%r9
2970
2971	mulx	2*8($aaptr),%rax,%r10
2972	adcx	%rax,%r9
2973	adox	%r11,%r10
2974
2975	mulx	3*8($aaptr),%rax,%r11
2976	adcx	%rax,%r10
2977	adox	%r12,%r11
2978
2979	.byte	0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00	# mulx	4*8($aaptr),%rax,%r12
2980	adcx	%rax,%r11
2981	adox	%r13,%r12
2982
2983	mulx	5*8($aaptr),%rax,%r13
2984	adcx	%rax,%r12
2985	adox	%r14,%r13
2986
2987	mulx	6*8($aaptr),%rax,%r14
2988	 mov	%rbx,($tptr,%rcx,8)	# store t[8+i]
2989	 mov	\$0,%ebx
2990	adcx	%rax,%r13
2991	adox	%r15,%r14
2992
2993	.byte	0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00	# mulx	7*8($aaptr),%rax,%r15
2994	 mov	8($aptr,%rcx,8),%rdx	# a[i]
2995	adcx	%rax,%r14
2996	adox	%rbx,%r15		# %rbx is 0, of=0
2997	adcx	%rbx,%r15		# cf=0
2998
2999	.byte	0x67
3000	inc	%rcx			# of=0
3001	jnz	.Lsqrx8x_loop
3002
3003	lea	8*8($aaptr),$aaptr
3004	mov	\$-8,%rcx
3005	cmp	8+8(%rsp),$aaptr	# done?
3006	je	.Lsqrx8x_break
3007
3008	sub	16+8(%rsp),%rbx		# mov 16(%rsp),%cf
3009	.byte	0x66
3010	mov	-64($aptr),%rdx
3011	adcx	0*8($tptr),%r8
3012	adcx	1*8($tptr),%r9
3013	adc	2*8($tptr),%r10
3014	adc	3*8($tptr),%r11
3015	adc	4*8($tptr),%r12
3016	adc	5*8($tptr),%r13
3017	adc	6*8($tptr),%r14
3018	adc	7*8($tptr),%r15
3019	lea	8*8($tptr),$tptr
3020	.byte	0x67
3021	sbb	%rax,%rax		# mov %cf,%rax
3022	xor	%ebx,%ebx		# cf=0, of=0
3023	mov	%rax,16+8(%rsp)		# offload carry
3024	jmp	.Lsqrx8x_loop
3025
3026.align	32
3027.Lsqrx8x_break:
3028	xor	$zero,$zero
3029	sub	16+8(%rsp),%rbx		# mov 16(%rsp),%cf
3030	adcx	$zero,%r8
3031	mov	24+8(%rsp),$carry	# initial $tptr, borrow $carry
3032	adcx	$zero,%r9
3033	mov	0*8($aptr),%rdx		# a[8], modulo-scheduled
3034	adc	\$0,%r10
3035	mov	%r8,0*8($tptr)
3036	adc	\$0,%r11
3037	adc	\$0,%r12
3038	adc	\$0,%r13
3039	adc	\$0,%r14
3040	adc	\$0,%r15
3041	cmp	$carry,$tptr		# cf=0, of=0
3042	je	.Lsqrx8x_outer_loop
3043
3044	mov	%r9,1*8($tptr)
3045	 mov	1*8($carry),%r9
3046	mov	%r10,2*8($tptr)
3047	 mov	2*8($carry),%r10
3048	mov	%r11,3*8($tptr)
3049	 mov	3*8($carry),%r11
3050	mov	%r12,4*8($tptr)
3051	 mov	4*8($carry),%r12
3052	mov	%r13,5*8($tptr)
3053	 mov	5*8($carry),%r13
3054	mov	%r14,6*8($tptr)
3055	 mov	6*8($carry),%r14
3056	mov	%r15,7*8($tptr)
3057	 mov	7*8($carry),%r15
3058	mov	$carry,$tptr
3059	jmp	.Lsqrx8x_outer_loop
3060
3061.align	32
3062.Lsqrx8x_outer_break:
3063	mov	%r9,9*8($tptr)		# t[9]
3064	 movq	%xmm3,%rcx		# -$num
3065	mov	%r10,10*8($tptr)	# ...
3066	mov	%r11,11*8($tptr)
3067	mov	%r12,12*8($tptr)
3068	mov	%r13,13*8($tptr)
3069	mov	%r14,14*8($tptr)
3070___
3071}{
3072my $i="%rcx";
3073$code.=<<___;
3074	lea	48+8(%rsp),$tptr
3075	mov	($aptr,$i),%rdx		# a[0]
3076
3077	mov	8($tptr),$A0[1]		# t[1]
3078	xor	$A0[0],$A0[0]		# t[0], of=0, cf=0
3079	mov	0+8(%rsp),$num		# restore $num
3080	adox	$A0[1],$A0[1]
3081	 mov	16($tptr),$A1[0]	# t[2]	# prefetch
3082	 mov	24($tptr),$A1[1]	# t[3]	# prefetch
3083	#jmp	.Lsqrx4x_shift_n_add	# happens to be aligned
3084
3085.align	32
3086.Lsqrx4x_shift_n_add:
3087	mulx	%rdx,%rax,%rbx
3088	 adox	$A1[0],$A1[0]
3089	adcx	$A0[0],%rax
3090	 .byte	0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00	# mov	8($aptr,$i),%rdx	# a[i+1]	# prefetch
3091	 .byte	0x4c,0x8b,0x97,0x20,0x00,0x00,0x00	# mov	32($tptr),$A0[0]	# t[2*i+4]	# prefetch
3092	 adox	$A1[1],$A1[1]
3093	adcx	$A0[1],%rbx
3094	 mov	40($tptr),$A0[1]		# t[2*i+4+1]	# prefetch
3095	mov	%rax,0($tptr)
3096	mov	%rbx,8($tptr)
3097
3098	mulx	%rdx,%rax,%rbx
3099	 adox	$A0[0],$A0[0]
3100	adcx	$A1[0],%rax
3101	 mov	16($aptr,$i),%rdx	# a[i+2]	# prefetch
3102	 mov	48($tptr),$A1[0]	# t[2*i+6]	# prefetch
3103	 adox	$A0[1],$A0[1]
3104	adcx	$A1[1],%rbx
3105	 mov	56($tptr),$A1[1]	# t[2*i+6+1]	# prefetch
3106	mov	%rax,16($tptr)
3107	mov	%rbx,24($tptr)
3108
3109	mulx	%rdx,%rax,%rbx
3110	 adox	$A1[0],$A1[0]
3111	adcx	$A0[0],%rax
3112	 mov	24($aptr,$i),%rdx	# a[i+3]	# prefetch
3113	 lea	32($i),$i
3114	 mov	64($tptr),$A0[0]	# t[2*i+8]	# prefetch
3115	 adox	$A1[1],$A1[1]
3116	adcx	$A0[1],%rbx
3117	 mov	72($tptr),$A0[1]	# t[2*i+8+1]	# prefetch
3118	mov	%rax,32($tptr)
3119	mov	%rbx,40($tptr)
3120
3121	mulx	%rdx,%rax,%rbx
3122	 adox	$A0[0],$A0[0]
3123	adcx	$A1[0],%rax
3124	jrcxz	.Lsqrx4x_shift_n_add_break
3125	 .byte	0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00	# mov	0($aptr,$i),%rdx	# a[i+4]	# prefetch
3126	 adox	$A0[1],$A0[1]
3127	adcx	$A1[1],%rbx
3128	 mov	80($tptr),$A1[0]	# t[2*i+10]	# prefetch
3129	 mov	88($tptr),$A1[1]	# t[2*i+10+1]	# prefetch
3130	mov	%rax,48($tptr)
3131	mov	%rbx,56($tptr)
3132	lea	64($tptr),$tptr
3133	nop
3134	jmp	.Lsqrx4x_shift_n_add
3135
3136.align	32
3137.Lsqrx4x_shift_n_add_break:
3138	adcx	$A1[1],%rbx
3139	mov	%rax,48($tptr)
3140	mov	%rbx,56($tptr)
3141	lea	64($tptr),$tptr		# end of t[] buffer
3142___
3143}
3144######################################################################
3145# Montgomery reduction part, "word-by-word" algorithm.
3146#
3147# This new path is inspired by multiple submissions from Intel, by
3148# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3149# Vinodh Gopal...
3150{
3151my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3152
3153$code.=<<___;
3154	movq	%xmm2,$nptr
3155__bn_sqrx8x_reduction:
3156	xor	%eax,%eax		# initial top-most carry bit
3157	mov	32+8(%rsp),%rbx		# n0
3158	mov	48+8(%rsp),%rdx		# "%r8", 8*0($tptr)
3159	lea	-8*8($nptr,$num),%rcx	# end of n[]
3160	#lea	48+8(%rsp,$num,2),$tptr	# end of t[] buffer
3161	mov	%rcx, 0+8(%rsp)		# save end of n[]
3162	mov	$tptr,8+8(%rsp)		# save end of t[]
3163
3164	lea	48+8(%rsp),$tptr		# initial t[] window
3165	jmp	.Lsqrx8x_reduction_loop
3166
3167.align	32
3168.Lsqrx8x_reduction_loop:
3169	mov	8*1($tptr),%r9
3170	mov	8*2($tptr),%r10
3171	mov	8*3($tptr),%r11
3172	mov	8*4($tptr),%r12
3173	mov	%rdx,%r8
3174	imulq	%rbx,%rdx		# n0*a[i]
3175	mov	8*5($tptr),%r13
3176	mov	8*6($tptr),%r14
3177	mov	8*7($tptr),%r15
3178	mov	%rax,24+8(%rsp)		# store top-most carry bit
3179
3180	lea	8*8($tptr),$tptr
3181	xor	$carry,$carry		# cf=0,of=0
3182	mov	\$-8,%rcx
3183	jmp	.Lsqrx8x_reduce
3184
3185.align	32
3186.Lsqrx8x_reduce:
3187	mov	%r8, %rbx
3188	mulx	8*0($nptr),%rax,%r8	# n[0]
3189	adcx	%rbx,%rax		# discarded
3190	adox	%r9,%r8
3191
3192	mulx	8*1($nptr),%rbx,%r9	# n[1]
3193	adcx	%rbx,%r8
3194	adox	%r10,%r9
3195
3196	mulx	8*2($nptr),%rbx,%r10
3197	adcx	%rbx,%r9
3198	adox	%r11,%r10
3199
3200	mulx	8*3($nptr),%rbx,%r11
3201	adcx	%rbx,%r10
3202	adox	%r12,%r11
3203
3204	.byte	0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00	# mulx	8*4($nptr),%rbx,%r12
3205	 mov	%rdx,%rax
3206	 mov	%r8,%rdx
3207	adcx	%rbx,%r11
3208	adox	%r13,%r12
3209
3210	 mulx	32+8(%rsp),%rbx,%rdx	# %rdx discarded
3211	 mov	%rax,%rdx
3212	 mov	%rax,64+48+8(%rsp,%rcx,8)	# put aside n0*a[i]
3213
3214	mulx	8*5($nptr),%rax,%r13
3215	adcx	%rax,%r12
3216	adox	%r14,%r13
3217
3218	mulx	8*6($nptr),%rax,%r14
3219	adcx	%rax,%r13
3220	adox	%r15,%r14
3221
3222	mulx	8*7($nptr),%rax,%r15
3223	 mov	%rbx,%rdx
3224	adcx	%rax,%r14
3225	adox	$carry,%r15		# $carry is 0
3226	adcx	$carry,%r15		# cf=0
3227
3228	.byte	0x67,0x67,0x67
3229	inc	%rcx			# of=0
3230	jnz	.Lsqrx8x_reduce
3231
3232	mov	$carry,%rax		# xor	%rax,%rax
3233	cmp	0+8(%rsp),$nptr		# end of n[]?
3234	jae	.Lsqrx8x_no_tail
3235
3236	mov	48+8(%rsp),%rdx		# pull n0*a[0]
3237	add	8*0($tptr),%r8
3238	lea	8*8($nptr),$nptr
3239	mov	\$-8,%rcx
3240	adcx	8*1($tptr),%r9
3241	adcx	8*2($tptr),%r10
3242	adc	8*3($tptr),%r11
3243	adc	8*4($tptr),%r12
3244	adc	8*5($tptr),%r13
3245	adc	8*6($tptr),%r14
3246	adc	8*7($tptr),%r15
3247	lea	8*8($tptr),$tptr
3248	sbb	%rax,%rax		# top carry
3249
3250	xor	$carry,$carry		# of=0, cf=0
3251	mov	%rax,16+8(%rsp)
3252	jmp	.Lsqrx8x_tail
3253
3254.align	32
3255.Lsqrx8x_tail:
3256	mov	%r8,%rbx
3257	mulx	8*0($nptr),%rax,%r8
3258	adcx	%rax,%rbx
3259	adox	%r9,%r8
3260
3261	mulx	8*1($nptr),%rax,%r9
3262	adcx	%rax,%r8
3263	adox	%r10,%r9
3264
3265	mulx	8*2($nptr),%rax,%r10
3266	adcx	%rax,%r9
3267	adox	%r11,%r10
3268
3269	mulx	8*3($nptr),%rax,%r11
3270	adcx	%rax,%r10
3271	adox	%r12,%r11
3272
3273	.byte	0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00	# mulx	8*4($nptr),%rax,%r12
3274	adcx	%rax,%r11
3275	adox	%r13,%r12
3276
3277	mulx	8*5($nptr),%rax,%r13
3278	adcx	%rax,%r12
3279	adox	%r14,%r13
3280
3281	mulx	8*6($nptr),%rax,%r14
3282	adcx	%rax,%r13
3283	adox	%r15,%r14
3284
3285	mulx	8*7($nptr),%rax,%r15
3286	 mov	72+48+8(%rsp,%rcx,8),%rdx	# pull n0*a[i]
3287	adcx	%rax,%r14
3288	adox	$carry,%r15
3289	 mov	%rbx,($tptr,%rcx,8)	# save result
3290	 mov	%r8,%rbx
3291	adcx	$carry,%r15		# cf=0
3292
3293	inc	%rcx			# of=0
3294	jnz	.Lsqrx8x_tail
3295
3296	cmp	0+8(%rsp),$nptr		# end of n[]?
3297	jae	.Lsqrx8x_tail_done	# break out of loop
3298
3299	sub	16+8(%rsp),$carry	# mov 16(%rsp),%cf
3300	 mov	48+8(%rsp),%rdx		# pull n0*a[0]
3301	 lea	8*8($nptr),$nptr
3302	adc	8*0($tptr),%r8
3303	adc	8*1($tptr),%r9
3304	adc	8*2($tptr),%r10
3305	adc	8*3($tptr),%r11
3306	adc	8*4($tptr),%r12
3307	adc	8*5($tptr),%r13
3308	adc	8*6($tptr),%r14
3309	adc	8*7($tptr),%r15
3310	lea	8*8($tptr),$tptr
3311	sbb	%rax,%rax
3312	sub	\$8,%rcx		# mov	\$-8,%rcx
3313
3314	xor	$carry,$carry		# of=0, cf=0
3315	mov	%rax,16+8(%rsp)
3316	jmp	.Lsqrx8x_tail
3317
3318.align	32
3319.Lsqrx8x_tail_done:
3320	xor	%rax,%rax
3321	add	24+8(%rsp),%r8		# can this overflow?
3322	adc	\$0,%r9
3323	adc	\$0,%r10
3324	adc	\$0,%r11
3325	adc	\$0,%r12
3326	adc	\$0,%r13
3327	adc	\$0,%r14
3328	adc	\$0,%r15
3329	adc	\$0,%rax
3330
3331	sub	16+8(%rsp),$carry	# mov 16(%rsp),%cf
3332.Lsqrx8x_no_tail:			# %cf is 0 if jumped here
3333	adc	8*0($tptr),%r8
3334	 movq	%xmm3,%rcx
3335	adc	8*1($tptr),%r9
3336	 mov	8*7($nptr),$carry
3337	 movq	%xmm2,$nptr		# restore $nptr
3338	adc	8*2($tptr),%r10
3339	adc	8*3($tptr),%r11
3340	adc	8*4($tptr),%r12
3341	adc	8*5($tptr),%r13
3342	adc	8*6($tptr),%r14
3343	adc	8*7($tptr),%r15
3344	adc	\$0,%rax		# top-most carry
3345
3346	mov	32+8(%rsp),%rbx		# n0
3347	mov	8*8($tptr,%rcx),%rdx	# modulo-scheduled "%r8"
3348
3349	mov	%r8,8*0($tptr)		# store top 512 bits
3350	 lea	8*8($tptr),%r8		# borrow %r8
3351	mov	%r9,8*1($tptr)
3352	mov	%r10,8*2($tptr)
3353	mov	%r11,8*3($tptr)
3354	mov	%r12,8*4($tptr)
3355	mov	%r13,8*5($tptr)
3356	mov	%r14,8*6($tptr)
3357	mov	%r15,8*7($tptr)
3358
3359	lea	8*8($tptr,%rcx),$tptr	# start of current t[] window
3360	cmp	8+8(%rsp),%r8		# end of t[]?
3361	jb	.Lsqrx8x_reduction_loop
3362	ret
3363.cfi_endproc
3364.size	bn_sqrx8x_internal,.-bn_sqrx8x_internal
3365___
3366}
3367##############################################################
3368# Post-condition, 4x unrolled
3369#
3370{
3371my ($rptr,$nptr)=("%rdx","%rbp");
3372$code.=<<___;
3373.align	32
3374.type	__bn_postx4x_internal,\@abi-omnipotent
3375__bn_postx4x_internal:
3376.cfi_startproc
3377	mov	8*0($nptr),%r12
3378	mov	%rcx,%r10		# -$num
3379	mov	%rcx,%r9		# -$num
3380	neg	%rax
3381	sar	\$3+2,%rcx
3382	#lea	48+8(%rsp,%r9),$tptr
3383	movq	%xmm1,$rptr		# restore $rptr
3384	movq	%xmm1,$aptr		# prepare for back-to-back call
3385	dec	%r12			# so that after 'not' we get -n[0]
3386	mov	8*1($nptr),%r13
3387	xor	%r8,%r8
3388	mov	8*2($nptr),%r14
3389	mov	8*3($nptr),%r15
3390	jmp	.Lsqrx4x_sub_entry
3391
3392.align	16
3393.Lsqrx4x_sub:
3394	mov	8*0($nptr),%r12
3395	mov	8*1($nptr),%r13
3396	mov	8*2($nptr),%r14
3397	mov	8*3($nptr),%r15
3398.Lsqrx4x_sub_entry:
3399	andn	%rax,%r12,%r12
3400	lea	8*4($nptr),$nptr
3401	andn	%rax,%r13,%r13
3402	andn	%rax,%r14,%r14
3403	andn	%rax,%r15,%r15
3404
3405	neg	%r8			# mov %r8,%cf
3406	adc	8*0($tptr),%r12
3407	adc	8*1($tptr),%r13
3408	adc	8*2($tptr),%r14
3409	adc	8*3($tptr),%r15
3410	mov	%r12,8*0($rptr)
3411	lea	8*4($tptr),$tptr
3412	mov	%r13,8*1($rptr)
3413	sbb	%r8,%r8			# mov %cf,%r8
3414	mov	%r14,8*2($rptr)
3415	mov	%r15,8*3($rptr)
3416	lea	8*4($rptr),$rptr
3417
3418	inc	%rcx
3419	jnz	.Lsqrx4x_sub
3420
3421	neg	%r9			# restore $num
3422
3423	ret
3424.cfi_endproc
3425.size	__bn_postx4x_internal,.-__bn_postx4x_internal
3426___
3427}
3428}}}
3429{
3430my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3431				("%rdi","%esi","%rdx","%ecx");  # Unix order
3432my $out=$inp;
3433my $STRIDE=2**5*8;
3434my $N=$STRIDE/4;
3435
3436$code.=<<___;
3437.globl	bn_scatter5
3438.type	bn_scatter5,\@abi-omnipotent
3439.align	16
3440bn_scatter5:
3441.cfi_startproc
3442	_CET_ENDBR
3443	cmp	\$0, $num
3444	jz	.Lscatter_epilogue
3445
3446	# $tbl stores 32 entries, t0 through t31. Each entry has $num words.
3447	# They are interleaved in memory as follows:
3448	#
3449	#  t0[0]      t1[0]      t2[0]      ... t31[0]
3450	#  t0[1]      t1[1]      t2[1]      ... t31[1]
3451	#  ...
3452	#  t0[$num-1] t1[$num-1] t2[$num-1] ... t31[$num-1]
3453
3454	lea	($tbl,$idx,8),$tbl
3455.Lscatter:
3456	mov	($inp),%rax
3457	lea	8($inp),$inp
3458	mov	%rax,($tbl)
3459	lea	32*8($tbl),$tbl
3460	sub	\$1,$num
3461	jnz	.Lscatter
3462.Lscatter_epilogue:
3463	ret
3464.cfi_endproc
3465.size	bn_scatter5,.-bn_scatter5
3466
3467.globl	bn_gather5
3468.type	bn_gather5,\@abi-omnipotent
3469.align	32
3470bn_gather5:
3471.cfi_startproc
3472.LSEH_begin_bn_gather5:			# Win64 thing, but harmless in other cases
3473	_CET_ENDBR
3474	# I can't trust assembler to use specific encoding:-(
3475	.byte	0x4c,0x8d,0x14,0x24			#lea    (%rsp),%r10
3476.cfi_def_cfa_register	%r10
3477	.byte	0x48,0x81,0xec,0x08,0x01,0x00,0x00	#sub	$0x108,%rsp
3478	lea	.Linc(%rip),%rax
3479	and	\$-16,%rsp		# shouldn't be formally required
3480
3481	movd	$idx,%xmm5
3482	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
3483	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
3484	lea	128($tbl),%r11		# size optimization
3485	lea	128(%rsp),%rax		# size optimization
3486
3487	pshufd	\$0,%xmm5,%xmm5		# broadcast $idx
3488	movdqa	%xmm1,%xmm4
3489	movdqa	%xmm1,%xmm2
3490___
3491########################################################################
3492# Calculate masks by comparing 0..31 to $idx and save result to stack.
3493#
3494# We compute sixteen 16-byte masks and store them on the stack. Mask i is stored
3495# in `16*i - 128`(%rax) and contains the comparisons for idx == 2*i and
3496# idx == 2*i + 1 in its lower and upper halves, respectively. Mask calculations
3497# are scheduled in groups of four.
3498for($i=0;$i<$STRIDE/16;$i+=4) {
3499$code.=<<___;
3500	paddd	%xmm0,%xmm1
3501	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
3502___
3503$code.=<<___	if ($i);
3504	movdqa	%xmm3,`16*($i-1)-128`(%rax)
3505___
3506$code.=<<___;
3507	movdqa	%xmm4,%xmm3
3508
3509	paddd	%xmm1,%xmm2
3510	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
3511	movdqa	%xmm0,`16*($i+0)-128`(%rax)
3512	movdqa	%xmm4,%xmm0
3513
3514	paddd	%xmm2,%xmm3
3515	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
3516	movdqa	%xmm1,`16*($i+1)-128`(%rax)
3517	movdqa	%xmm4,%xmm1
3518
3519	paddd	%xmm3,%xmm0
3520	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
3521	movdqa	%xmm2,`16*($i+2)-128`(%rax)
3522	movdqa	%xmm4,%xmm2
3523___
3524}
3525$code.=<<___;
3526	movdqa	%xmm3,`16*($i-1)-128`(%rax)
3527	jmp	.Lgather
3528
3529.align	32
3530.Lgather:
3531	pxor	%xmm4,%xmm4
3532	pxor	%xmm5,%xmm5
3533___
3534for($i=0;$i<$STRIDE/16;$i+=4) {
3535# Combine the masks with the corresponding table entries to select the correct
3536# entry.
3537$code.=<<___;
3538	movdqa	`16*($i+0)-128`(%r11),%xmm0
3539	movdqa	`16*($i+1)-128`(%r11),%xmm1
3540	movdqa	`16*($i+2)-128`(%r11),%xmm2
3541	pand	`16*($i+0)-128`(%rax),%xmm0
3542	movdqa	`16*($i+3)-128`(%r11),%xmm3
3543	pand	`16*($i+1)-128`(%rax),%xmm1
3544	por	%xmm0,%xmm4
3545	pand	`16*($i+2)-128`(%rax),%xmm2
3546	por	%xmm1,%xmm5
3547	pand	`16*($i+3)-128`(%rax),%xmm3
3548	por	%xmm2,%xmm4
3549	por	%xmm3,%xmm5
3550___
3551}
3552$code.=<<___;
3553	por	%xmm5,%xmm4
3554	lea	$STRIDE(%r11),%r11
3555	# Combine the upper and lower halves of %xmm0.
3556	pshufd	\$0x4e,%xmm4,%xmm0	# Swap upper and lower halves.
3557	por	%xmm4,%xmm0
3558	movq	%xmm0,($out)		# m0=bp[0]
3559	lea	8($out),$out
3560	sub	\$1,$num
3561	jnz	.Lgather
3562
3563	lea	(%r10),%rsp
3564.cfi_def_cfa_register	%rsp
3565	ret
3566.LSEH_end_bn_gather5:
3567.cfi_endproc
3568.size	bn_gather5,.-bn_gather5
3569___
3570}
3571$code.=<<___;
3572.section .rodata
3573.align	64
3574.Linc:
3575	.long	0,0, 1,1
3576	.long	2,2, 2,2
3577.asciz	"Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3578.text
3579___
3580
3581# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3582#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
3583if ($win64) {
3584$rec="%rcx";
3585$frame="%rdx";
3586$context="%r8";
3587$disp="%r9";
3588
3589$code.=<<___;
3590.extern	__imp_RtlVirtualUnwind
3591.type	mul_handler,\@abi-omnipotent
3592.align	16
3593mul_handler:
3594	push	%rsi
3595	push	%rdi
3596	push	%rbx
3597	push	%rbp
3598	push	%r12
3599	push	%r13
3600	push	%r14
3601	push	%r15
3602	pushfq
3603	sub	\$64,%rsp
3604
3605	mov	120($context),%rax	# pull context->Rax
3606	mov	248($context),%rbx	# pull context->Rip
3607
3608	mov	8($disp),%rsi		# disp->ImageBase
3609	mov	56($disp),%r11		# disp->HandlerData
3610
3611	mov	0(%r11),%r10d		# HandlerData[0]
3612	lea	(%rsi,%r10),%r10	# end of prologue label
3613	cmp	%r10,%rbx		# context->Rip<end of prologue label
3614	jb	.Lcommon_seh_tail
3615
3616	mov	4(%r11),%r10d		# HandlerData[1]
3617	lea	(%rsi,%r10),%r10	# beginning of body label
3618	cmp	%r10,%rbx		# context->Rip<body label
3619	jb	.Lcommon_pop_regs
3620
3621	mov	152($context),%rax	# pull context->Rsp
3622
3623	mov	8(%r11),%r10d		# HandlerData[2]
3624	lea	(%rsi,%r10),%r10	# epilogue label
3625	cmp	%r10,%rbx		# context->Rip>=epilogue label
3626	jae	.Lcommon_seh_tail
3627
3628	lea	.Lmul_epilogue(%rip),%r10
3629	cmp	%r10,%rbx
3630	ja	.Lbody_40
3631
3632	mov	192($context),%r10	# pull $num
3633	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
3634
3635	jmp	.Lcommon_pop_regs
3636
3637.Lbody_40:
3638	mov	40(%rax),%rax		# pull saved stack pointer
3639.Lcommon_pop_regs:
3640	mov	-8(%rax),%rbx
3641	mov	-16(%rax),%rbp
3642	mov	-24(%rax),%r12
3643	mov	-32(%rax),%r13
3644	mov	-40(%rax),%r14
3645	mov	-48(%rax),%r15
3646	mov	%rbx,144($context)	# restore context->Rbx
3647	mov	%rbp,160($context)	# restore context->Rbp
3648	mov	%r12,216($context)	# restore context->R12
3649	mov	%r13,224($context)	# restore context->R13
3650	mov	%r14,232($context)	# restore context->R14
3651	mov	%r15,240($context)	# restore context->R15
3652
3653.Lcommon_seh_tail:
3654	mov	8(%rax),%rdi
3655	mov	16(%rax),%rsi
3656	mov	%rax,152($context)	# restore context->Rsp
3657	mov	%rsi,168($context)	# restore context->Rsi
3658	mov	%rdi,176($context)	# restore context->Rdi
3659
3660	mov	40($disp),%rdi		# disp->ContextRecord
3661	mov	$context,%rsi		# context
3662	mov	\$154,%ecx		# sizeof(CONTEXT)
3663	.long	0xa548f3fc		# cld; rep movsq
3664
3665	mov	$disp,%rsi
3666	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
3667	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
3668	mov	0(%rsi),%r8		# arg3, disp->ControlPc
3669	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
3670	mov	40(%rsi),%r10		# disp->ContextRecord
3671	lea	56(%rsi),%r11		# &disp->HandlerData
3672	lea	24(%rsi),%r12		# &disp->EstablisherFrame
3673	mov	%r10,32(%rsp)		# arg5
3674	mov	%r11,40(%rsp)		# arg6
3675	mov	%r12,48(%rsp)		# arg7
3676	mov	%rcx,56(%rsp)		# arg8, (NULL)
3677	call	*__imp_RtlVirtualUnwind(%rip)
3678
3679	mov	\$1,%eax		# ExceptionContinueSearch
3680	add	\$64,%rsp
3681	popfq
3682	pop	%r15
3683	pop	%r14
3684	pop	%r13
3685	pop	%r12
3686	pop	%rbp
3687	pop	%rbx
3688	pop	%rdi
3689	pop	%rsi
3690	ret
3691.size	mul_handler,.-mul_handler
3692
3693.section	.pdata
3694.align	4
3695	.rva	.LSEH_begin_bn_mul_mont_gather5_nohw
3696	.rva	.LSEH_end_bn_mul_mont_gather5_nohw
3697	.rva	.LSEH_info_bn_mul_mont_gather5_nohw
3698
3699	.rva	.LSEH_begin_bn_mul4x_mont_gather5
3700	.rva	.LSEH_end_bn_mul4x_mont_gather5
3701	.rva	.LSEH_info_bn_mul4x_mont_gather5
3702
3703	.rva	.LSEH_begin_bn_power5_nohw
3704	.rva	.LSEH_end_bn_power5_nohw
3705	.rva	.LSEH_info_bn_power5_nohw
3706___
3707$code.=<<___ if ($addx);
3708	.rva	.LSEH_begin_bn_mulx4x_mont_gather5
3709	.rva	.LSEH_end_bn_mulx4x_mont_gather5
3710	.rva	.LSEH_info_bn_mulx4x_mont_gather5
3711
3712	.rva	.LSEH_begin_bn_powerx5
3713	.rva	.LSEH_end_bn_powerx5
3714	.rva	.LSEH_info_bn_powerx5
3715___
3716$code.=<<___;
3717	.rva	.LSEH_begin_bn_gather5
3718	.rva	.LSEH_end_bn_gather5
3719	.rva	.LSEH_info_bn_gather5
3720
3721.section	.xdata
3722.align	8
3723.LSEH_info_bn_mul_mont_gather5_nohw:
3724	.byte	9,0,0,0
3725	.rva	mul_handler
3726	.rva	.Lmul_body,.Lmul_body,.Lmul_epilogue		# HandlerData[]
3727.align	8
3728.LSEH_info_bn_mul4x_mont_gather5:
3729	.byte	9,0,0,0
3730	.rva	mul_handler
3731	.rva	.Lmul4x_prologue,.Lmul4x_body,.Lmul4x_epilogue		# HandlerData[]
3732.align	8
3733.LSEH_info_bn_power5_nohw:
3734	.byte	9,0,0,0
3735	.rva	mul_handler
3736	.rva	.Lpower5_prologue,.Lpower5_body,.Lpower5_epilogue	# HandlerData[]
3737___
3738$code.=<<___ if ($addx);
3739.align	8
3740.LSEH_info_bn_mulx4x_mont_gather5:
3741	.byte	9,0,0,0
3742	.rva	mul_handler
3743	.rva	.Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue	# HandlerData[]
3744.align	8
3745.LSEH_info_bn_powerx5:
3746	.byte	9,0,0,0
3747	.rva	mul_handler
3748	.rva	.Lpowerx5_prologue,.Lpowerx5_body,.Lpowerx5_epilogue	# HandlerData[]
3749___
3750$code.=<<___;
3751.align	8
3752.LSEH_info_bn_gather5:
3753	.byte	0x01,0x0b,0x03,0x0a
3754	.byte	0x0b,0x01,0x21,0x00	# sub	rsp,0x108
3755	.byte	0x04,0xa3,0x00,0x00	# lea	r10,(rsp)
3756.align	8
3757___
3758}
3759
3760$code =~ s/\`([^\`]*)\`/eval($1)/gem;
3761
3762print $code;
3763close STDOUT or die "error closing STDOUT: $!";
3764