• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# November 2014
18#
19# ChaCha20 for x86_64.
20#
21# December 2016
22#
23# Add AVX512F code path.
24#
25# Performance in cycles per byte out of large buffer.
26#
27#		IALU/gcc 4.8(i)	1xSSSE3/SSE2	4xSSSE3	    NxAVX(v)
28#
29# P4		9.48/+99%	-/22.7(ii)	-
30# Core2		7.83/+55%	7.90/8.08	4.35
31# Westmere	7.19/+50%	5.60/6.70	3.00
32# Sandy Bridge	8.31/+42%	5.45/6.76	2.72
33# Ivy Bridge	6.71/+46%	5.40/6.49	2.41
34# Haswell	5.92/+43%	5.20/6.45	2.42	    1.23
35# Skylake[-X]	5.87/+39%	4.70/-		2.31	    1.19[0.57]
36# Silvermont	12.0/+33%	7.75/7.40	7.03(iii)
37# Knights L	11.7/-		-		9.60(iii)   0.80
38# Goldmont	10.6/+17%	5.10/-		3.28
39# Sledgehammer	7.28/+52%	-/14.2(ii)	-
40# Bulldozer	9.66/+28%	9.85/11.1	3.06(iv)
41# Ryzen		5.96/+50%	5.19/-		2.40        2.09
42# VIA Nano	10.5/+46%	6.72/8.60	6.05
43#
44# (i)	compared to older gcc 3.x one can observe >2x improvement on
45#	most platforms;
46# (ii)	as it can be seen, SSE2 performance is too low on legacy
47#	processors; NxSSE2 results are naturally better, but not
48#	impressively better than IALU ones, which is why you won't
49#	find SSE2 code below;
50# (iii)	this is not optimal result for Atom because of MSROM
51#	limitations, SSE2 can do better, but gain is considered too
52#	low to justify the [maintenance] effort;
53# (iv)	Bulldozer actually executes 4xXOP code path that delivers 2.20;
54#
55# Modified from upstream OpenSSL to remove the XOP code.
56
57$flavour = shift;
58$output  = shift;
59if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
60
61$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
62
63$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
65( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
66die "can't locate x86_64-xlate.pl";
67
68$avx = 2;
69
70open OUT,"| \"$^X\" $xlate $flavour $output";
71*STDOUT=*OUT;
72
73# input parameter block
74($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
75
76$code.=<<___;
77.text
78
79.extern GFp_ia32cap_P
80
81.align	64
82.Lzero:
83.long	0,0,0,0
84.Lone:
85.long	1,0,0,0
86.Linc:
87.long	0,1,2,3
88.Lfour:
89.long	4,4,4,4
90.Lincy:
91.long	0,2,4,6,1,3,5,7
92.Leight:
93.long	8,8,8,8,8,8,8,8
94.Lrot16:
95.byte	0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
96.Lrot24:
97.byte	0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
98.Lsigma:
99.asciz	"expand 32-byte k"
100.align	64
101.Lzeroz:
102.long	0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
103.Lfourz:
104.long	4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
105.Lincz:
106.long	0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
107.Lsixteen:
108.long	16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
109.asciz	"ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
110___
111
112sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
113{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
114  my $arg = pop;
115    $arg = "\$$arg" if ($arg*1 eq $arg);
116    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
117}
118
119@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
120    "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
121@t=("%esi","%edi");
122
123sub ROUND {			# critical path is 24 cycles per round
124my ($a0,$b0,$c0,$d0)=@_;
125my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
126my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
127my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
128my ($xc,$xc_)=map("\"$_\"",@t);
129my @x=map("\"$_\"",@x);
130
131	# Consider order in which variables are addressed by their
132	# index:
133	#
134	#	a   b   c   d
135	#
136	#	0   4   8  12 < even round
137	#	1   5   9  13
138	#	2   6  10  14
139	#	3   7  11  15
140	#	0   5  10  15 < odd round
141	#	1   6  11  12
142	#	2   7   8  13
143	#	3   4   9  14
144	#
145	# 'a', 'b' and 'd's are permanently allocated in registers,
146	# @x[0..7,12..15], while 'c's are maintained in memory. If
147	# you observe 'c' column, you'll notice that pair of 'c's is
148	# invariant between rounds. This means that we have to reload
149	# them once per round, in the middle. This is why you'll see
150	# bunch of 'c' stores and loads in the middle, but none in
151	# the beginning or end.
152
153	# Normally instructions would be interleaved to favour in-order
154	# execution. Generally out-of-order cores manage it gracefully,
155	# but not this time for some reason. As in-order execution
156	# cores are dying breed, old Atom is the only one around,
157	# instructions are left uninterleaved. Besides, Atom is better
158	# off executing 1xSSSE3 code anyway...
159
160	(
161	"&add	(@x[$a0],@x[$b0])",	# Q1
162	"&xor	(@x[$d0],@x[$a0])",
163	"&rol	(@x[$d0],16)",
164	 "&add	(@x[$a1],@x[$b1])",	# Q2
165	 "&xor	(@x[$d1],@x[$a1])",
166	 "&rol	(@x[$d1],16)",
167
168	"&add	($xc,@x[$d0])",
169	"&xor	(@x[$b0],$xc)",
170	"&rol	(@x[$b0],12)",
171	 "&add	($xc_,@x[$d1])",
172	 "&xor	(@x[$b1],$xc_)",
173	 "&rol	(@x[$b1],12)",
174
175	"&add	(@x[$a0],@x[$b0])",
176	"&xor	(@x[$d0],@x[$a0])",
177	"&rol	(@x[$d0],8)",
178	 "&add	(@x[$a1],@x[$b1])",
179	 "&xor	(@x[$d1],@x[$a1])",
180	 "&rol	(@x[$d1],8)",
181
182	"&add	($xc,@x[$d0])",
183	"&xor	(@x[$b0],$xc)",
184	"&rol	(@x[$b0],7)",
185	 "&add	($xc_,@x[$d1])",
186	 "&xor	(@x[$b1],$xc_)",
187	 "&rol	(@x[$b1],7)",
188
189	"&mov	(\"4*$c0(%rsp)\",$xc)",	# reload pair of 'c's
190	 "&mov	(\"4*$c1(%rsp)\",$xc_)",
191	"&mov	($xc,\"4*$c2(%rsp)\")",
192	 "&mov	($xc_,\"4*$c3(%rsp)\")",
193
194	"&add	(@x[$a2],@x[$b2])",	# Q3
195	"&xor	(@x[$d2],@x[$a2])",
196	"&rol	(@x[$d2],16)",
197	 "&add	(@x[$a3],@x[$b3])",	# Q4
198	 "&xor	(@x[$d3],@x[$a3])",
199	 "&rol	(@x[$d3],16)",
200
201	"&add	($xc,@x[$d2])",
202	"&xor	(@x[$b2],$xc)",
203	"&rol	(@x[$b2],12)",
204	 "&add	($xc_,@x[$d3])",
205	 "&xor	(@x[$b3],$xc_)",
206	 "&rol	(@x[$b3],12)",
207
208	"&add	(@x[$a2],@x[$b2])",
209	"&xor	(@x[$d2],@x[$a2])",
210	"&rol	(@x[$d2],8)",
211	 "&add	(@x[$a3],@x[$b3])",
212	 "&xor	(@x[$d3],@x[$a3])",
213	 "&rol	(@x[$d3],8)",
214
215	"&add	($xc,@x[$d2])",
216	"&xor	(@x[$b2],$xc)",
217	"&rol	(@x[$b2],7)",
218	 "&add	($xc_,@x[$d3])",
219	 "&xor	(@x[$b3],$xc_)",
220	 "&rol	(@x[$b3],7)"
221	);
222}
223
224########################################################################
225# Generic code path that handles all lengths on pre-SSSE3 processors.
226$code.=<<___;
227.globl	GFp_ChaCha20_ctr32
228.type	GFp_ChaCha20_ctr32,\@function,5
229.align	64
230GFp_ChaCha20_ctr32:
231.cfi_startproc
232	cmp	\$0,$len
233	je	.Lno_data
234	mov	GFp_ia32cap_P+4(%rip),%r10
235___
236$code.=<<___;
237	test	\$`1<<(41-32)`,%r10d
238	jnz	.LChaCha20_ssse3
239
240	push	%rbx
241.cfi_push	rbx
242	push	%rbp
243.cfi_push	rbp
244	push	%r12
245.cfi_push	r12
246	push	%r13
247.cfi_push	r13
248	push	%r14
249.cfi_push	r14
250	push	%r15
251.cfi_push	r15
252	sub	\$64+24,%rsp
253.cfi_adjust_cfa_offset	`64+24`
254.Lctr32_body:
255
256	#movdqa	.Lsigma(%rip),%xmm0
257	movdqu	($key),%xmm1
258	movdqu	16($key),%xmm2
259	movdqu	($counter),%xmm3
260	movdqa	.Lone(%rip),%xmm4
261
262	#movdqa	%xmm0,4*0(%rsp)		# key[0]
263	movdqa	%xmm1,4*4(%rsp)		# key[1]
264	movdqa	%xmm2,4*8(%rsp)		# key[2]
265	movdqa	%xmm3,4*12(%rsp)	# key[3]
266	mov	$len,%rbp		# reassign $len
267	jmp	.Loop_outer
268
269.align	32
270.Loop_outer:
271	mov	\$0x61707865,@x[0]      # 'expa'
272	mov	\$0x3320646e,@x[1]      # 'nd 3'
273	mov	\$0x79622d32,@x[2]      # '2-by'
274	mov	\$0x6b206574,@x[3]      # 'te k'
275	mov	4*4(%rsp),@x[4]
276	mov	4*5(%rsp),@x[5]
277	mov	4*6(%rsp),@x[6]
278	mov	4*7(%rsp),@x[7]
279	movd	%xmm3,@x[12]
280	mov	4*13(%rsp),@x[13]
281	mov	4*14(%rsp),@x[14]
282	mov	4*15(%rsp),@x[15]
283
284	mov	%rbp,64+0(%rsp)		# save len
285	mov	\$10,%ebp
286	mov	$inp,64+8(%rsp)		# save inp
287	movq	%xmm2,%rsi		# "@x[8]"
288	mov	$out,64+16(%rsp)	# save out
289	mov	%rsi,%rdi
290	shr	\$32,%rdi		# "@x[9]"
291	jmp	.Loop
292
293.align	32
294.Loop:
295___
296	foreach (&ROUND (0, 4, 8,12)) { eval; }
297	foreach (&ROUND	(0, 5,10,15)) { eval; }
298	&dec	("%ebp");
299	&jnz	(".Loop");
300
301$code.=<<___;
302	mov	@t[1],4*9(%rsp)		# modulo-scheduled
303	mov	@t[0],4*8(%rsp)
304	mov	64(%rsp),%rbp		# load len
305	movdqa	%xmm2,%xmm1
306	mov	64+8(%rsp),$inp		# load inp
307	paddd	%xmm4,%xmm3		# increment counter
308	mov	64+16(%rsp),$out	# load out
309
310	add	\$0x61707865,@x[0]      # 'expa'
311	add	\$0x3320646e,@x[1]      # 'nd 3'
312	add	\$0x79622d32,@x[2]      # '2-by'
313	add	\$0x6b206574,@x[3]      # 'te k'
314	add	4*4(%rsp),@x[4]
315	add	4*5(%rsp),@x[5]
316	add	4*6(%rsp),@x[6]
317	add	4*7(%rsp),@x[7]
318	add	4*12(%rsp),@x[12]
319	add	4*13(%rsp),@x[13]
320	add	4*14(%rsp),@x[14]
321	add	4*15(%rsp),@x[15]
322	paddd	4*8(%rsp),%xmm1
323
324	cmp	\$64,%rbp
325	jb	.Ltail
326
327	xor	4*0($inp),@x[0]		# xor with input
328	xor	4*1($inp),@x[1]
329	xor	4*2($inp),@x[2]
330	xor	4*3($inp),@x[3]
331	xor	4*4($inp),@x[4]
332	xor	4*5($inp),@x[5]
333	xor	4*6($inp),@x[6]
334	xor	4*7($inp),@x[7]
335	movdqu	4*8($inp),%xmm0
336	xor	4*12($inp),@x[12]
337	xor	4*13($inp),@x[13]
338	xor	4*14($inp),@x[14]
339	xor	4*15($inp),@x[15]
340	lea	4*16($inp),$inp		# inp+=64
341	pxor	%xmm1,%xmm0
342
343	movdqa	%xmm2,4*8(%rsp)
344	movd	%xmm3,4*12(%rsp)
345
346	mov	@x[0],4*0($out)		# write output
347	mov	@x[1],4*1($out)
348	mov	@x[2],4*2($out)
349	mov	@x[3],4*3($out)
350	mov	@x[4],4*4($out)
351	mov	@x[5],4*5($out)
352	mov	@x[6],4*6($out)
353	mov	@x[7],4*7($out)
354	movdqu	%xmm0,4*8($out)
355	mov	@x[12],4*12($out)
356	mov	@x[13],4*13($out)
357	mov	@x[14],4*14($out)
358	mov	@x[15],4*15($out)
359	lea	4*16($out),$out		# out+=64
360
361	sub	\$64,%rbp
362	jnz	.Loop_outer
363
364	jmp	.Ldone
365
366.align	16
367.Ltail:
368	mov	@x[0],4*0(%rsp)
369	mov	@x[1],4*1(%rsp)
370	xor	%rbx,%rbx
371	mov	@x[2],4*2(%rsp)
372	mov	@x[3],4*3(%rsp)
373	mov	@x[4],4*4(%rsp)
374	mov	@x[5],4*5(%rsp)
375	mov	@x[6],4*6(%rsp)
376	mov	@x[7],4*7(%rsp)
377	movdqa	%xmm1,4*8(%rsp)
378	mov	@x[12],4*12(%rsp)
379	mov	@x[13],4*13(%rsp)
380	mov	@x[14],4*14(%rsp)
381	mov	@x[15],4*15(%rsp)
382
383.Loop_tail:
384	movzb	($inp,%rbx),%eax
385	movzb	(%rsp,%rbx),%edx
386	lea	1(%rbx),%rbx
387	xor	%edx,%eax
388	mov	%al,-1($out,%rbx)
389	dec	%rbp
390	jnz	.Loop_tail
391
392.Ldone:
393	lea	64+24+48(%rsp),%rsi
394	mov	-48(%rsi),%r15
395.cfi_restore	r15
396	mov	-40(%rsi),%r14
397.cfi_restore	r14
398	mov	-32(%rsi),%r13
399.cfi_restore	r13
400	mov	-24(%rsi),%r12
401.cfi_restore	r12
402	mov	-16(%rsi),%rbp
403.cfi_restore	rbp
404	mov	-8(%rsi),%rbx
405.cfi_restore	rbx
406	lea	(%rsi),%rsp
407.cfi_adjust_cfa_offset	`-64-24-48`
408.Lno_data:
409	ret
410.cfi_endproc
411.size	GFp_ChaCha20_ctr32,.-GFp_ChaCha20_ctr32
412___
413
414########################################################################
415# SSSE3 code path that handles shorter lengths
416{
417my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
418
419sub SSSE3ROUND {	# critical path is 20 "SIMD ticks" per round
420	&paddd	($a,$b);
421	&pxor	($d,$a);
422	&pshufb	($d,$rot16);
423
424	&paddd	($c,$d);
425	&pxor	($b,$c);
426	&movdqa	($t,$b);
427	&psrld	($b,20);
428	&pslld	($t,12);
429	&por	($b,$t);
430
431	&paddd	($a,$b);
432	&pxor	($d,$a);
433	&pshufb	($d,$rot24);
434
435	&paddd	($c,$d);
436	&pxor	($b,$c);
437	&movdqa	($t,$b);
438	&psrld	($b,25);
439	&pslld	($t,7);
440	&por	($b,$t);
441}
442
443my $xframe = $win64 ? 32+8 : 8;
444
445$code.=<<___;
446.type	ChaCha20_ssse3,\@function,5
447.align	32
448ChaCha20_ssse3:
449.LChaCha20_ssse3:
450.cfi_startproc
451	mov	%rsp,%r9		# frame pointer
452.cfi_def_cfa_register	r9
453___
454$code.=<<___;
455	cmp	\$128,$len		# we might throw away some data,
456	ja	.LChaCha20_4x		# but overall it won't be slower
457
458.Ldo_sse3_after_all:
459	sub	\$64+$xframe,%rsp
460___
461$code.=<<___	if ($win64);
462	movaps	%xmm6,-0x28(%r9)
463	movaps	%xmm7,-0x18(%r9)
464.Lssse3_body:
465___
466$code.=<<___;
467	movdqa	.Lsigma(%rip),$a
468	movdqu	($key),$b
469	movdqu	16($key),$c
470	movdqu	($counter),$d
471	movdqa	.Lrot16(%rip),$rot16
472	movdqa	.Lrot24(%rip),$rot24
473
474	movdqa	$a,0x00(%rsp)
475	movdqa	$b,0x10(%rsp)
476	movdqa	$c,0x20(%rsp)
477	movdqa	$d,0x30(%rsp)
478	mov	\$10,$counter		# reuse $counter
479	jmp	.Loop_ssse3
480
481.align	32
482.Loop_outer_ssse3:
483	movdqa	.Lone(%rip),$d
484	movdqa	0x00(%rsp),$a
485	movdqa	0x10(%rsp),$b
486	movdqa	0x20(%rsp),$c
487	paddd	0x30(%rsp),$d
488	mov	\$10,$counter
489	movdqa	$d,0x30(%rsp)
490	jmp	.Loop_ssse3
491
492.align	32
493.Loop_ssse3:
494___
495	&SSSE3ROUND();
496	&pshufd	($c,$c,0b01001110);
497	&pshufd	($b,$b,0b00111001);
498	&pshufd	($d,$d,0b10010011);
499	&nop	();
500
501	&SSSE3ROUND();
502	&pshufd	($c,$c,0b01001110);
503	&pshufd	($b,$b,0b10010011);
504	&pshufd	($d,$d,0b00111001);
505
506	&dec	($counter);
507	&jnz	(".Loop_ssse3");
508
509$code.=<<___;
510	paddd	0x00(%rsp),$a
511	paddd	0x10(%rsp),$b
512	paddd	0x20(%rsp),$c
513	paddd	0x30(%rsp),$d
514
515	cmp	\$64,$len
516	jb	.Ltail_ssse3
517
518	movdqu	0x00($inp),$t
519	movdqu	0x10($inp),$t1
520	pxor	$t,$a			# xor with input
521	movdqu	0x20($inp),$t
522	pxor	$t1,$b
523	movdqu	0x30($inp),$t1
524	lea	0x40($inp),$inp		# inp+=64
525	pxor	$t,$c
526	pxor	$t1,$d
527
528	movdqu	$a,0x00($out)		# write output
529	movdqu	$b,0x10($out)
530	movdqu	$c,0x20($out)
531	movdqu	$d,0x30($out)
532	lea	0x40($out),$out		# out+=64
533
534	sub	\$64,$len
535	jnz	.Loop_outer_ssse3
536
537	jmp	.Ldone_ssse3
538
539.align	16
540.Ltail_ssse3:
541	movdqa	$a,0x00(%rsp)
542	movdqa	$b,0x10(%rsp)
543	movdqa	$c,0x20(%rsp)
544	movdqa	$d,0x30(%rsp)
545	xor	$counter,$counter
546
547.Loop_tail_ssse3:
548	movzb	($inp,$counter),%eax
549	movzb	(%rsp,$counter),%ecx
550	lea	1($counter),$counter
551	xor	%ecx,%eax
552	mov	%al,-1($out,$counter)
553	dec	$len
554	jnz	.Loop_tail_ssse3
555
556.Ldone_ssse3:
557___
558$code.=<<___	if ($win64);
559	movaps	-0x28(%r9),%xmm6
560	movaps	-0x18(%r9),%xmm7
561___
562$code.=<<___;
563	lea	(%r9),%rsp
564.cfi_def_cfa_register	rsp
565.Lssse3_epilogue:
566	ret
567.cfi_endproc
568.size	ChaCha20_ssse3,.-ChaCha20_ssse3
569___
570}
571
572########################################################################
573# SSSE3 code path that handles longer messages.
574{
575# assign variables to favor Atom front-end
576my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
577    $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
578my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
579	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
580
581sub SSSE3_lane_ROUND {
582my ($a0,$b0,$c0,$d0)=@_;
583my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
584my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
585my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
586my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
587my @x=map("\"$_\"",@xx);
588
589	# Consider order in which variables are addressed by their
590	# index:
591	#
592	#	a   b   c   d
593	#
594	#	0   4   8  12 < even round
595	#	1   5   9  13
596	#	2   6  10  14
597	#	3   7  11  15
598	#	0   5  10  15 < odd round
599	#	1   6  11  12
600	#	2   7   8  13
601	#	3   4   9  14
602	#
603	# 'a', 'b' and 'd's are permanently allocated in registers,
604	# @x[0..7,12..15], while 'c's are maintained in memory. If
605	# you observe 'c' column, you'll notice that pair of 'c's is
606	# invariant between rounds. This means that we have to reload
607	# them once per round, in the middle. This is why you'll see
608	# bunch of 'c' stores and loads in the middle, but none in
609	# the beginning or end.
610
611	(
612	"&paddd		(@x[$a0],@x[$b0])",	# Q1
613	 "&paddd	(@x[$a1],@x[$b1])",	# Q2
614	"&pxor		(@x[$d0],@x[$a0])",
615	 "&pxor		(@x[$d1],@x[$a1])",
616	"&pshufb	(@x[$d0],$t1)",
617	 "&pshufb	(@x[$d1],$t1)",
618
619	"&paddd		($xc,@x[$d0])",
620	 "&paddd	($xc_,@x[$d1])",
621	"&pxor		(@x[$b0],$xc)",
622	 "&pxor		(@x[$b1],$xc_)",
623	"&movdqa	($t0,@x[$b0])",
624	"&pslld		(@x[$b0],12)",
625	"&psrld		($t0,20)",
626	 "&movdqa	($t1,@x[$b1])",
627	 "&pslld	(@x[$b1],12)",
628	"&por		(@x[$b0],$t0)",
629	 "&psrld	($t1,20)",
630	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
631	 "&por		(@x[$b1],$t1)",
632
633	"&paddd		(@x[$a0],@x[$b0])",
634	 "&paddd	(@x[$a1],@x[$b1])",
635	"&pxor		(@x[$d0],@x[$a0])",
636	 "&pxor		(@x[$d1],@x[$a1])",
637	"&pshufb	(@x[$d0],$t0)",
638	 "&pshufb	(@x[$d1],$t0)",
639
640	"&paddd		($xc,@x[$d0])",
641	 "&paddd	($xc_,@x[$d1])",
642	"&pxor		(@x[$b0],$xc)",
643	 "&pxor		(@x[$b1],$xc_)",
644	"&movdqa	($t1,@x[$b0])",
645	"&pslld		(@x[$b0],7)",
646	"&psrld		($t1,25)",
647	 "&movdqa	($t0,@x[$b1])",
648	 "&pslld	(@x[$b1],7)",
649	"&por		(@x[$b0],$t1)",
650	 "&psrld	($t0,25)",
651	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
652	 "&por		(@x[$b1],$t0)",
653
654	"&movdqa	(\"`16*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
655	 "&movdqa	(\"`16*($c1-8)`(%rsp)\",$xc_)",
656	"&movdqa	($xc,\"`16*($c2-8)`(%rsp)\")",
657	 "&movdqa	($xc_,\"`16*($c3-8)`(%rsp)\")",
658
659	"&paddd		(@x[$a2],@x[$b2])",	# Q3
660	 "&paddd	(@x[$a3],@x[$b3])",	# Q4
661	"&pxor		(@x[$d2],@x[$a2])",
662	 "&pxor		(@x[$d3],@x[$a3])",
663	"&pshufb	(@x[$d2],$t1)",
664	 "&pshufb	(@x[$d3],$t1)",
665
666	"&paddd		($xc,@x[$d2])",
667	 "&paddd	($xc_,@x[$d3])",
668	"&pxor		(@x[$b2],$xc)",
669	 "&pxor		(@x[$b3],$xc_)",
670	"&movdqa	($t0,@x[$b2])",
671	"&pslld		(@x[$b2],12)",
672	"&psrld		($t0,20)",
673	 "&movdqa	($t1,@x[$b3])",
674	 "&pslld	(@x[$b3],12)",
675	"&por		(@x[$b2],$t0)",
676	 "&psrld	($t1,20)",
677	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
678	 "&por		(@x[$b3],$t1)",
679
680	"&paddd		(@x[$a2],@x[$b2])",
681	 "&paddd	(@x[$a3],@x[$b3])",
682	"&pxor		(@x[$d2],@x[$a2])",
683	 "&pxor		(@x[$d3],@x[$a3])",
684	"&pshufb	(@x[$d2],$t0)",
685	 "&pshufb	(@x[$d3],$t0)",
686
687	"&paddd		($xc,@x[$d2])",
688	 "&paddd	($xc_,@x[$d3])",
689	"&pxor		(@x[$b2],$xc)",
690	 "&pxor		(@x[$b3],$xc_)",
691	"&movdqa	($t1,@x[$b2])",
692	"&pslld		(@x[$b2],7)",
693	"&psrld		($t1,25)",
694	 "&movdqa	($t0,@x[$b3])",
695	 "&pslld	(@x[$b3],7)",
696	"&por		(@x[$b2],$t1)",
697	 "&psrld	($t0,25)",
698	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
699	 "&por		(@x[$b3],$t0)"
700	);
701}
702
703my $xframe = $win64 ? 0xa8 : 8;
704
705$code.=<<___;
706.type	ChaCha20_4x,\@function,5
707.align	32
708ChaCha20_4x:
709.LChaCha20_4x:
710.cfi_startproc
711	mov		%rsp,%r9		# frame pointer
712.cfi_def_cfa_register	r9
713	mov		%r10,%r11
714___
715$code.=<<___	if ($avx>1);
716	shr		\$32,%r10		# GFp_ia32cap_P+8
717	test		\$`1<<5`,%r10		# test AVX2
718	jnz		.LChaCha20_8x
719___
720$code.=<<___;
721	cmp		\$192,$len
722	ja		.Lproceed4x
723
724	and		\$`1<<26|1<<22`,%r11	# isolate XSAVE+MOVBE
725	cmp		\$`1<<22`,%r11		# check for MOVBE without XSAVE
726	je		.Ldo_sse3_after_all	# to detect Atom
727
728.Lproceed4x:
729	sub		\$0x140+$xframe,%rsp
730___
731	################ stack layout
732	# +0x00		SIMD equivalent of @x[8-12]
733	# ...
734	# +0x40		constant copy of key[0-2] smashed by lanes
735	# ...
736	# +0x100	SIMD counters (with nonce smashed by lanes)
737	# ...
738	# +0x140
739$code.=<<___	if ($win64);
740	movaps		%xmm6,-0xa8(%r9)
741	movaps		%xmm7,-0x98(%r9)
742	movaps		%xmm8,-0x88(%r9)
743	movaps		%xmm9,-0x78(%r9)
744	movaps		%xmm10,-0x68(%r9)
745	movaps		%xmm11,-0x58(%r9)
746	movaps		%xmm12,-0x48(%r9)
747	movaps		%xmm13,-0x38(%r9)
748	movaps		%xmm14,-0x28(%r9)
749	movaps		%xmm15,-0x18(%r9)
750.L4x_body:
751___
752$code.=<<___;
753	movdqa		.Lsigma(%rip),$xa3	# key[0]
754	movdqu		($key),$xb3		# key[1]
755	movdqu		16($key),$xt3		# key[2]
756	movdqu		($counter),$xd3		# key[3]
757	lea		0x100(%rsp),%rcx	# size optimization
758	lea		.Lrot16(%rip),%r10
759	lea		.Lrot24(%rip),%r11
760
761	pshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
762	pshufd		\$0x55,$xa3,$xa1
763	movdqa		$xa0,0x40(%rsp)		# ... and offload
764	pshufd		\$0xaa,$xa3,$xa2
765	movdqa		$xa1,0x50(%rsp)
766	pshufd		\$0xff,$xa3,$xa3
767	movdqa		$xa2,0x60(%rsp)
768	movdqa		$xa3,0x70(%rsp)
769
770	pshufd		\$0x00,$xb3,$xb0
771	pshufd		\$0x55,$xb3,$xb1
772	movdqa		$xb0,0x80-0x100(%rcx)
773	pshufd		\$0xaa,$xb3,$xb2
774	movdqa		$xb1,0x90-0x100(%rcx)
775	pshufd		\$0xff,$xb3,$xb3
776	movdqa		$xb2,0xa0-0x100(%rcx)
777	movdqa		$xb3,0xb0-0x100(%rcx)
778
779	pshufd		\$0x00,$xt3,$xt0	# "$xc0"
780	pshufd		\$0x55,$xt3,$xt1	# "$xc1"
781	movdqa		$xt0,0xc0-0x100(%rcx)
782	pshufd		\$0xaa,$xt3,$xt2	# "$xc2"
783	movdqa		$xt1,0xd0-0x100(%rcx)
784	pshufd		\$0xff,$xt3,$xt3	# "$xc3"
785	movdqa		$xt2,0xe0-0x100(%rcx)
786	movdqa		$xt3,0xf0-0x100(%rcx)
787
788	pshufd		\$0x00,$xd3,$xd0
789	pshufd		\$0x55,$xd3,$xd1
790	paddd		.Linc(%rip),$xd0	# don't save counters yet
791	pshufd		\$0xaa,$xd3,$xd2
792	movdqa		$xd1,0x110-0x100(%rcx)
793	pshufd		\$0xff,$xd3,$xd3
794	movdqa		$xd2,0x120-0x100(%rcx)
795	movdqa		$xd3,0x130-0x100(%rcx)
796
797	jmp		.Loop_enter4x
798
799.align	32
800.Loop_outer4x:
801	movdqa		0x40(%rsp),$xa0		# re-load smashed key
802	movdqa		0x50(%rsp),$xa1
803	movdqa		0x60(%rsp),$xa2
804	movdqa		0x70(%rsp),$xa3
805	movdqa		0x80-0x100(%rcx),$xb0
806	movdqa		0x90-0x100(%rcx),$xb1
807	movdqa		0xa0-0x100(%rcx),$xb2
808	movdqa		0xb0-0x100(%rcx),$xb3
809	movdqa		0xc0-0x100(%rcx),$xt0	# "$xc0"
810	movdqa		0xd0-0x100(%rcx),$xt1	# "$xc1"
811	movdqa		0xe0-0x100(%rcx),$xt2	# "$xc2"
812	movdqa		0xf0-0x100(%rcx),$xt3	# "$xc3"
813	movdqa		0x100-0x100(%rcx),$xd0
814	movdqa		0x110-0x100(%rcx),$xd1
815	movdqa		0x120-0x100(%rcx),$xd2
816	movdqa		0x130-0x100(%rcx),$xd3
817	paddd		.Lfour(%rip),$xd0	# next SIMD counters
818
819.Loop_enter4x:
820	movdqa		$xt2,0x20(%rsp)		# SIMD equivalent of "@x[10]"
821	movdqa		$xt3,0x30(%rsp)		# SIMD equivalent of "@x[11]"
822	movdqa		(%r10),$xt3		# .Lrot16(%rip)
823	mov		\$10,%eax
824	movdqa		$xd0,0x100-0x100(%rcx)	# save SIMD counters
825	jmp		.Loop4x
826
827.align	32
828.Loop4x:
829___
830	foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
831	foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
832$code.=<<___;
833	dec		%eax
834	jnz		.Loop4x
835
836	paddd		0x40(%rsp),$xa0		# accumulate key material
837	paddd		0x50(%rsp),$xa1
838	paddd		0x60(%rsp),$xa2
839	paddd		0x70(%rsp),$xa3
840
841	movdqa		$xa0,$xt2		# "de-interlace" data
842	punpckldq	$xa1,$xa0
843	movdqa		$xa2,$xt3
844	punpckldq	$xa3,$xa2
845	punpckhdq	$xa1,$xt2
846	punpckhdq	$xa3,$xt3
847	movdqa		$xa0,$xa1
848	punpcklqdq	$xa2,$xa0		# "a0"
849	movdqa		$xt2,$xa3
850	punpcklqdq	$xt3,$xt2		# "a2"
851	punpckhqdq	$xa2,$xa1		# "a1"
852	punpckhqdq	$xt3,$xa3		# "a3"
853___
854	($xa2,$xt2)=($xt2,$xa2);
855$code.=<<___;
856	paddd		0x80-0x100(%rcx),$xb0
857	paddd		0x90-0x100(%rcx),$xb1
858	paddd		0xa0-0x100(%rcx),$xb2
859	paddd		0xb0-0x100(%rcx),$xb3
860
861	movdqa		$xa0,0x00(%rsp)		# offload $xaN
862	movdqa		$xa1,0x10(%rsp)
863	movdqa		0x20(%rsp),$xa0		# "xc2"
864	movdqa		0x30(%rsp),$xa1		# "xc3"
865
866	movdqa		$xb0,$xt2
867	punpckldq	$xb1,$xb0
868	movdqa		$xb2,$xt3
869	punpckldq	$xb3,$xb2
870	punpckhdq	$xb1,$xt2
871	punpckhdq	$xb3,$xt3
872	movdqa		$xb0,$xb1
873	punpcklqdq	$xb2,$xb0		# "b0"
874	movdqa		$xt2,$xb3
875	punpcklqdq	$xt3,$xt2		# "b2"
876	punpckhqdq	$xb2,$xb1		# "b1"
877	punpckhqdq	$xt3,$xb3		# "b3"
878___
879	($xb2,$xt2)=($xt2,$xb2);
880	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
881$code.=<<___;
882	paddd		0xc0-0x100(%rcx),$xc0
883	paddd		0xd0-0x100(%rcx),$xc1
884	paddd		0xe0-0x100(%rcx),$xc2
885	paddd		0xf0-0x100(%rcx),$xc3
886
887	movdqa		$xa2,0x20(%rsp)		# keep offloading $xaN
888	movdqa		$xa3,0x30(%rsp)
889
890	movdqa		$xc0,$xt2
891	punpckldq	$xc1,$xc0
892	movdqa		$xc2,$xt3
893	punpckldq	$xc3,$xc2
894	punpckhdq	$xc1,$xt2
895	punpckhdq	$xc3,$xt3
896	movdqa		$xc0,$xc1
897	punpcklqdq	$xc2,$xc0		# "c0"
898	movdqa		$xt2,$xc3
899	punpcklqdq	$xt3,$xt2		# "c2"
900	punpckhqdq	$xc2,$xc1		# "c1"
901	punpckhqdq	$xt3,$xc3		# "c3"
902___
903	($xc2,$xt2)=($xt2,$xc2);
904	($xt0,$xt1)=($xa2,$xa3);		# use $xaN as temporary
905$code.=<<___;
906	paddd		0x100-0x100(%rcx),$xd0
907	paddd		0x110-0x100(%rcx),$xd1
908	paddd		0x120-0x100(%rcx),$xd2
909	paddd		0x130-0x100(%rcx),$xd3
910
911	movdqa		$xd0,$xt2
912	punpckldq	$xd1,$xd0
913	movdqa		$xd2,$xt3
914	punpckldq	$xd3,$xd2
915	punpckhdq	$xd1,$xt2
916	punpckhdq	$xd3,$xt3
917	movdqa		$xd0,$xd1
918	punpcklqdq	$xd2,$xd0		# "d0"
919	movdqa		$xt2,$xd3
920	punpcklqdq	$xt3,$xt2		# "d2"
921	punpckhqdq	$xd2,$xd1		# "d1"
922	punpckhqdq	$xt3,$xd3		# "d3"
923___
924	($xd2,$xt2)=($xt2,$xd2);
925$code.=<<___;
926	cmp		\$64*4,$len
927	jb		.Ltail4x
928
929	movdqu		0x00($inp),$xt0		# xor with input
930	movdqu		0x10($inp),$xt1
931	movdqu		0x20($inp),$xt2
932	movdqu		0x30($inp),$xt3
933	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
934	pxor		$xb0,$xt1
935	pxor		$xc0,$xt2
936	pxor		$xd0,$xt3
937
938	 movdqu		$xt0,0x00($out)
939	movdqu		0x40($inp),$xt0
940	 movdqu		$xt1,0x10($out)
941	movdqu		0x50($inp),$xt1
942	 movdqu		$xt2,0x20($out)
943	movdqu		0x60($inp),$xt2
944	 movdqu		$xt3,0x30($out)
945	movdqu		0x70($inp),$xt3
946	lea		0x80($inp),$inp		# size optimization
947	pxor		0x10(%rsp),$xt0
948	pxor		$xb1,$xt1
949	pxor		$xc1,$xt2
950	pxor		$xd1,$xt3
951
952	 movdqu		$xt0,0x40($out)
953	movdqu		0x00($inp),$xt0
954	 movdqu		$xt1,0x50($out)
955	movdqu		0x10($inp),$xt1
956	 movdqu		$xt2,0x60($out)
957	movdqu		0x20($inp),$xt2
958	 movdqu		$xt3,0x70($out)
959	 lea		0x80($out),$out		# size optimization
960	movdqu		0x30($inp),$xt3
961	pxor		0x20(%rsp),$xt0
962	pxor		$xb2,$xt1
963	pxor		$xc2,$xt2
964	pxor		$xd2,$xt3
965
966	 movdqu		$xt0,0x00($out)
967	movdqu		0x40($inp),$xt0
968	 movdqu		$xt1,0x10($out)
969	movdqu		0x50($inp),$xt1
970	 movdqu		$xt2,0x20($out)
971	movdqu		0x60($inp),$xt2
972	 movdqu		$xt3,0x30($out)
973	movdqu		0x70($inp),$xt3
974	lea		0x80($inp),$inp		# inp+=64*4
975	pxor		0x30(%rsp),$xt0
976	pxor		$xb3,$xt1
977	pxor		$xc3,$xt2
978	pxor		$xd3,$xt3
979	movdqu		$xt0,0x40($out)
980	movdqu		$xt1,0x50($out)
981	movdqu		$xt2,0x60($out)
982	movdqu		$xt3,0x70($out)
983	lea		0x80($out),$out		# out+=64*4
984
985	sub		\$64*4,$len
986	jnz		.Loop_outer4x
987
988	jmp		.Ldone4x
989
990.Ltail4x:
991	cmp		\$192,$len
992	jae		.L192_or_more4x
993	cmp		\$128,$len
994	jae		.L128_or_more4x
995	cmp		\$64,$len
996	jae		.L64_or_more4x
997
998	#movdqa		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
999	xor		%r10,%r10
1000	#movdqa		$xt0,0x00(%rsp)
1001	movdqa		$xb0,0x10(%rsp)
1002	movdqa		$xc0,0x20(%rsp)
1003	movdqa		$xd0,0x30(%rsp)
1004	jmp		.Loop_tail4x
1005
1006.align	32
1007.L64_or_more4x:
1008	movdqu		0x00($inp),$xt0		# xor with input
1009	movdqu		0x10($inp),$xt1
1010	movdqu		0x20($inp),$xt2
1011	movdqu		0x30($inp),$xt3
1012	pxor		0x00(%rsp),$xt0		# $xaxN is offloaded, remember?
1013	pxor		$xb0,$xt1
1014	pxor		$xc0,$xt2
1015	pxor		$xd0,$xt3
1016	movdqu		$xt0,0x00($out)
1017	movdqu		$xt1,0x10($out)
1018	movdqu		$xt2,0x20($out)
1019	movdqu		$xt3,0x30($out)
1020	je		.Ldone4x
1021
1022	movdqa		0x10(%rsp),$xt0		# $xaN is offloaded, remember?
1023	lea		0x40($inp),$inp		# inp+=64*1
1024	xor		%r10,%r10
1025	movdqa		$xt0,0x00(%rsp)
1026	movdqa		$xb1,0x10(%rsp)
1027	lea		0x40($out),$out		# out+=64*1
1028	movdqa		$xc1,0x20(%rsp)
1029	sub		\$64,$len		# len-=64*1
1030	movdqa		$xd1,0x30(%rsp)
1031	jmp		.Loop_tail4x
1032
1033.align	32
1034.L128_or_more4x:
1035	movdqu		0x00($inp),$xt0		# xor with input
1036	movdqu		0x10($inp),$xt1
1037	movdqu		0x20($inp),$xt2
1038	movdqu		0x30($inp),$xt3
1039	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
1040	pxor		$xb0,$xt1
1041	pxor		$xc0,$xt2
1042	pxor		$xd0,$xt3
1043
1044	 movdqu		$xt0,0x00($out)
1045	movdqu		0x40($inp),$xt0
1046	 movdqu		$xt1,0x10($out)
1047	movdqu		0x50($inp),$xt1
1048	 movdqu		$xt2,0x20($out)
1049	movdqu		0x60($inp),$xt2
1050	 movdqu		$xt3,0x30($out)
1051	movdqu		0x70($inp),$xt3
1052	pxor		0x10(%rsp),$xt0
1053	pxor		$xb1,$xt1
1054	pxor		$xc1,$xt2
1055	pxor		$xd1,$xt3
1056	movdqu		$xt0,0x40($out)
1057	movdqu		$xt1,0x50($out)
1058	movdqu		$xt2,0x60($out)
1059	movdqu		$xt3,0x70($out)
1060	je		.Ldone4x
1061
1062	movdqa		0x20(%rsp),$xt0		# $xaN is offloaded, remember?
1063	lea		0x80($inp),$inp		# inp+=64*2
1064	xor		%r10,%r10
1065	movdqa		$xt0,0x00(%rsp)
1066	movdqa		$xb2,0x10(%rsp)
1067	lea		0x80($out),$out		# out+=64*2
1068	movdqa		$xc2,0x20(%rsp)
1069	sub		\$128,$len		# len-=64*2
1070	movdqa		$xd2,0x30(%rsp)
1071	jmp		.Loop_tail4x
1072
1073.align	32
1074.L192_or_more4x:
1075	movdqu		0x00($inp),$xt0		# xor with input
1076	movdqu		0x10($inp),$xt1
1077	movdqu		0x20($inp),$xt2
1078	movdqu		0x30($inp),$xt3
1079	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
1080	pxor		$xb0,$xt1
1081	pxor		$xc0,$xt2
1082	pxor		$xd0,$xt3
1083
1084	 movdqu		$xt0,0x00($out)
1085	movdqu		0x40($inp),$xt0
1086	 movdqu		$xt1,0x10($out)
1087	movdqu		0x50($inp),$xt1
1088	 movdqu		$xt2,0x20($out)
1089	movdqu		0x60($inp),$xt2
1090	 movdqu		$xt3,0x30($out)
1091	movdqu		0x70($inp),$xt3
1092	lea		0x80($inp),$inp		# size optimization
1093	pxor		0x10(%rsp),$xt0
1094	pxor		$xb1,$xt1
1095	pxor		$xc1,$xt2
1096	pxor		$xd1,$xt3
1097
1098	 movdqu		$xt0,0x40($out)
1099	movdqu		0x00($inp),$xt0
1100	 movdqu		$xt1,0x50($out)
1101	movdqu		0x10($inp),$xt1
1102	 movdqu		$xt2,0x60($out)
1103	movdqu		0x20($inp),$xt2
1104	 movdqu		$xt3,0x70($out)
1105	 lea		0x80($out),$out		# size optimization
1106	movdqu		0x30($inp),$xt3
1107	pxor		0x20(%rsp),$xt0
1108	pxor		$xb2,$xt1
1109	pxor		$xc2,$xt2
1110	pxor		$xd2,$xt3
1111	movdqu		$xt0,0x00($out)
1112	movdqu		$xt1,0x10($out)
1113	movdqu		$xt2,0x20($out)
1114	movdqu		$xt3,0x30($out)
1115	je		.Ldone4x
1116
1117	movdqa		0x30(%rsp),$xt0		# $xaN is offloaded, remember?
1118	lea		0x40($inp),$inp		# inp+=64*3
1119	xor		%r10,%r10
1120	movdqa		$xt0,0x00(%rsp)
1121	movdqa		$xb3,0x10(%rsp)
1122	lea		0x40($out),$out		# out+=64*3
1123	movdqa		$xc3,0x20(%rsp)
1124	sub		\$192,$len		# len-=64*3
1125	movdqa		$xd3,0x30(%rsp)
1126
1127.Loop_tail4x:
1128	movzb		($inp,%r10),%eax
1129	movzb		(%rsp,%r10),%ecx
1130	lea		1(%r10),%r10
1131	xor		%ecx,%eax
1132	mov		%al,-1($out,%r10)
1133	dec		$len
1134	jnz		.Loop_tail4x
1135
1136.Ldone4x:
1137___
1138$code.=<<___	if ($win64);
1139	movaps		-0xa8(%r9),%xmm6
1140	movaps		-0x98(%r9),%xmm7
1141	movaps		-0x88(%r9),%xmm8
1142	movaps		-0x78(%r9),%xmm9
1143	movaps		-0x68(%r9),%xmm10
1144	movaps		-0x58(%r9),%xmm11
1145	movaps		-0x48(%r9),%xmm12
1146	movaps		-0x38(%r9),%xmm13
1147	movaps		-0x28(%r9),%xmm14
1148	movaps		-0x18(%r9),%xmm15
1149___
1150$code.=<<___;
1151	lea		(%r9),%rsp
1152.cfi_def_cfa_register	rsp
1153.L4x_epilogue:
1154	ret
1155.cfi_endproc
1156.size	ChaCha20_4x,.-ChaCha20_4x
1157___
1158}
1159
1160########################################################################
1161# AVX2 code path
1162if ($avx>1) {
1163my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1164    $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1165my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1166	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1167
1168sub AVX2_lane_ROUND {
1169my ($a0,$b0,$c0,$d0)=@_;
1170my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1171my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1172my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1173my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1174my @x=map("\"$_\"",@xx);
1175
1176	# Consider order in which variables are addressed by their
1177	# index:
1178	#
1179	#	a   b   c   d
1180	#
1181	#	0   4   8  12 < even round
1182	#	1   5   9  13
1183	#	2   6  10  14
1184	#	3   7  11  15
1185	#	0   5  10  15 < odd round
1186	#	1   6  11  12
1187	#	2   7   8  13
1188	#	3   4   9  14
1189	#
1190	# 'a', 'b' and 'd's are permanently allocated in registers,
1191	# @x[0..7,12..15], while 'c's are maintained in memory. If
1192	# you observe 'c' column, you'll notice that pair of 'c's is
1193	# invariant between rounds. This means that we have to reload
1194	# them once per round, in the middle. This is why you'll see
1195	# bunch of 'c' stores and loads in the middle, but none in
1196	# the beginning or end.
1197
1198	(
1199	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
1200	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
1201	"&vpshufb	(@x[$d0],@x[$d0],$t1)",
1202	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
1203	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
1204	 "&vpshufb	(@x[$d1],@x[$d1],$t1)",
1205
1206	"&vpaddd	($xc,$xc,@x[$d0])",
1207	"&vpxor		(@x[$b0],$xc,@x[$b0])",
1208	"&vpslld	($t0,@x[$b0],12)",
1209	"&vpsrld	(@x[$b0],@x[$b0],20)",
1210	"&vpor		(@x[$b0],$t0,@x[$b0])",
1211	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
1212	 "&vpaddd	($xc_,$xc_,@x[$d1])",
1213	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
1214	 "&vpslld	($t1,@x[$b1],12)",
1215	 "&vpsrld	(@x[$b1],@x[$b1],20)",
1216	 "&vpor		(@x[$b1],$t1,@x[$b1])",
1217
1218	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",
1219	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
1220	"&vpshufb	(@x[$d0],@x[$d0],$t0)",
1221	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",
1222	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
1223	 "&vpshufb	(@x[$d1],@x[$d1],$t0)",
1224
1225	"&vpaddd	($xc,$xc,@x[$d0])",
1226	"&vpxor		(@x[$b0],$xc,@x[$b0])",
1227	"&vpslld	($t1,@x[$b0],7)",
1228	"&vpsrld	(@x[$b0],@x[$b0],25)",
1229	"&vpor		(@x[$b0],$t1,@x[$b0])",
1230	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
1231	 "&vpaddd	($xc_,$xc_,@x[$d1])",
1232	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
1233	 "&vpslld	($t0,@x[$b1],7)",
1234	 "&vpsrld	(@x[$b1],@x[$b1],25)",
1235	 "&vpor		(@x[$b1],$t0,@x[$b1])",
1236
1237	"&vmovdqa	(\"`32*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
1238	 "&vmovdqa	(\"`32*($c1-8)`(%rsp)\",$xc_)",
1239	"&vmovdqa	($xc,\"`32*($c2-8)`(%rsp)\")",
1240	 "&vmovdqa	($xc_,\"`32*($c3-8)`(%rsp)\")",
1241
1242	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
1243	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
1244	"&vpshufb	(@x[$d2],@x[$d2],$t1)",
1245	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
1246	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
1247	 "&vpshufb	(@x[$d3],@x[$d3],$t1)",
1248
1249	"&vpaddd	($xc,$xc,@x[$d2])",
1250	"&vpxor		(@x[$b2],$xc,@x[$b2])",
1251	"&vpslld	($t0,@x[$b2],12)",
1252	"&vpsrld	(@x[$b2],@x[$b2],20)",
1253	"&vpor		(@x[$b2],$t0,@x[$b2])",
1254	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
1255	 "&vpaddd	($xc_,$xc_,@x[$d3])",
1256	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
1257	 "&vpslld	($t1,@x[$b3],12)",
1258	 "&vpsrld	(@x[$b3],@x[$b3],20)",
1259	 "&vpor		(@x[$b3],$t1,@x[$b3])",
1260
1261	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
1262	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
1263	"&vpshufb	(@x[$d2],@x[$d2],$t0)",
1264	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
1265	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
1266	 "&vpshufb	(@x[$d3],@x[$d3],$t0)",
1267
1268	"&vpaddd	($xc,$xc,@x[$d2])",
1269	"&vpxor		(@x[$b2],$xc,@x[$b2])",
1270	"&vpslld	($t1,@x[$b2],7)",
1271	"&vpsrld	(@x[$b2],@x[$b2],25)",
1272	"&vpor		(@x[$b2],$t1,@x[$b2])",
1273	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
1274	 "&vpaddd	($xc_,$xc_,@x[$d3])",
1275	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
1276	 "&vpslld	($t0,@x[$b3],7)",
1277	 "&vpsrld	(@x[$b3],@x[$b3],25)",
1278	 "&vpor		(@x[$b3],$t0,@x[$b3])"
1279	);
1280}
1281
1282my $xframe = $win64 ? 0xa8 : 8;
1283
1284$code.=<<___;
1285.type	ChaCha20_8x,\@function,5
1286.align	32
1287ChaCha20_8x:
1288.LChaCha20_8x:
1289.cfi_startproc
1290	mov		%rsp,%r9		# frame register
1291.cfi_def_cfa_register	r9
1292	sub		\$0x280+$xframe,%rsp
1293	and		\$-32,%rsp
1294___
1295$code.=<<___	if ($win64);
1296	movaps		%xmm6,-0xa8(%r9)
1297	movaps		%xmm7,-0x98(%r9)
1298	movaps		%xmm8,-0x88(%r9)
1299	movaps		%xmm9,-0x78(%r9)
1300	movaps		%xmm10,-0x68(%r9)
1301	movaps		%xmm11,-0x58(%r9)
1302	movaps		%xmm12,-0x48(%r9)
1303	movaps		%xmm13,-0x38(%r9)
1304	movaps		%xmm14,-0x28(%r9)
1305	movaps		%xmm15,-0x18(%r9)
1306.L8x_body:
1307___
1308$code.=<<___;
1309	vzeroupper
1310
1311	################ stack layout
1312	# +0x00		SIMD equivalent of @x[8-12]
1313	# ...
1314	# +0x80		constant copy of key[0-2] smashed by lanes
1315	# ...
1316	# +0x200	SIMD counters (with nonce smashed by lanes)
1317	# ...
1318	# +0x280
1319
1320	vbroadcasti128	.Lsigma(%rip),$xa3	# key[0]
1321	vbroadcasti128	($key),$xb3		# key[1]
1322	vbroadcasti128	16($key),$xt3		# key[2]
1323	vbroadcasti128	($counter),$xd3		# key[3]
1324	lea		0x100(%rsp),%rcx	# size optimization
1325	lea		0x200(%rsp),%rax	# size optimization
1326	lea		.Lrot16(%rip),%r10
1327	lea		.Lrot24(%rip),%r11
1328
1329	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
1330	vpshufd		\$0x55,$xa3,$xa1
1331	vmovdqa		$xa0,0x80-0x100(%rcx)	# ... and offload
1332	vpshufd		\$0xaa,$xa3,$xa2
1333	vmovdqa		$xa1,0xa0-0x100(%rcx)
1334	vpshufd		\$0xff,$xa3,$xa3
1335	vmovdqa		$xa2,0xc0-0x100(%rcx)
1336	vmovdqa		$xa3,0xe0-0x100(%rcx)
1337
1338	vpshufd		\$0x00,$xb3,$xb0
1339	vpshufd		\$0x55,$xb3,$xb1
1340	vmovdqa		$xb0,0x100-0x100(%rcx)
1341	vpshufd		\$0xaa,$xb3,$xb2
1342	vmovdqa		$xb1,0x120-0x100(%rcx)
1343	vpshufd		\$0xff,$xb3,$xb3
1344	vmovdqa		$xb2,0x140-0x100(%rcx)
1345	vmovdqa		$xb3,0x160-0x100(%rcx)
1346
1347	vpshufd		\$0x00,$xt3,$xt0	# "xc0"
1348	vpshufd		\$0x55,$xt3,$xt1	# "xc1"
1349	vmovdqa		$xt0,0x180-0x200(%rax)
1350	vpshufd		\$0xaa,$xt3,$xt2	# "xc2"
1351	vmovdqa		$xt1,0x1a0-0x200(%rax)
1352	vpshufd		\$0xff,$xt3,$xt3	# "xc3"
1353	vmovdqa		$xt2,0x1c0-0x200(%rax)
1354	vmovdqa		$xt3,0x1e0-0x200(%rax)
1355
1356	vpshufd		\$0x00,$xd3,$xd0
1357	vpshufd		\$0x55,$xd3,$xd1
1358	vpaddd		.Lincy(%rip),$xd0,$xd0	# don't save counters yet
1359	vpshufd		\$0xaa,$xd3,$xd2
1360	vmovdqa		$xd1,0x220-0x200(%rax)
1361	vpshufd		\$0xff,$xd3,$xd3
1362	vmovdqa		$xd2,0x240-0x200(%rax)
1363	vmovdqa		$xd3,0x260-0x200(%rax)
1364
1365	jmp		.Loop_enter8x
1366
1367.align	32
1368.Loop_outer8x:
1369	vmovdqa		0x80-0x100(%rcx),$xa0	# re-load smashed key
1370	vmovdqa		0xa0-0x100(%rcx),$xa1
1371	vmovdqa		0xc0-0x100(%rcx),$xa2
1372	vmovdqa		0xe0-0x100(%rcx),$xa3
1373	vmovdqa		0x100-0x100(%rcx),$xb0
1374	vmovdqa		0x120-0x100(%rcx),$xb1
1375	vmovdqa		0x140-0x100(%rcx),$xb2
1376	vmovdqa		0x160-0x100(%rcx),$xb3
1377	vmovdqa		0x180-0x200(%rax),$xt0	# "xc0"
1378	vmovdqa		0x1a0-0x200(%rax),$xt1	# "xc1"
1379	vmovdqa		0x1c0-0x200(%rax),$xt2	# "xc2"
1380	vmovdqa		0x1e0-0x200(%rax),$xt3	# "xc3"
1381	vmovdqa		0x200-0x200(%rax),$xd0
1382	vmovdqa		0x220-0x200(%rax),$xd1
1383	vmovdqa		0x240-0x200(%rax),$xd2
1384	vmovdqa		0x260-0x200(%rax),$xd3
1385	vpaddd		.Leight(%rip),$xd0,$xd0	# next SIMD counters
1386
1387.Loop_enter8x:
1388	vmovdqa		$xt2,0x40(%rsp)		# SIMD equivalent of "@x[10]"
1389	vmovdqa		$xt3,0x60(%rsp)		# SIMD equivalent of "@x[11]"
1390	vbroadcasti128	(%r10),$xt3
1391	vmovdqa		$xd0,0x200-0x200(%rax)	# save SIMD counters
1392	mov		\$10,%eax
1393	jmp		.Loop8x
1394
1395.align	32
1396.Loop8x:
1397___
1398	foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1399	foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1400$code.=<<___;
1401	dec		%eax
1402	jnz		.Loop8x
1403
1404	lea		0x200(%rsp),%rax	# size optimization
1405	vpaddd		0x80-0x100(%rcx),$xa0,$xa0	# accumulate key
1406	vpaddd		0xa0-0x100(%rcx),$xa1,$xa1
1407	vpaddd		0xc0-0x100(%rcx),$xa2,$xa2
1408	vpaddd		0xe0-0x100(%rcx),$xa3,$xa3
1409
1410	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
1411	vpunpckldq	$xa3,$xa2,$xt3
1412	vpunpckhdq	$xa1,$xa0,$xa0
1413	vpunpckhdq	$xa3,$xa2,$xa2
1414	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
1415	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
1416	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
1417	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
1418___
1419	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1420$code.=<<___;
1421	vpaddd		0x100-0x100(%rcx),$xb0,$xb0
1422	vpaddd		0x120-0x100(%rcx),$xb1,$xb1
1423	vpaddd		0x140-0x100(%rcx),$xb2,$xb2
1424	vpaddd		0x160-0x100(%rcx),$xb3,$xb3
1425
1426	vpunpckldq	$xb1,$xb0,$xt2
1427	vpunpckldq	$xb3,$xb2,$xt3
1428	vpunpckhdq	$xb1,$xb0,$xb0
1429	vpunpckhdq	$xb3,$xb2,$xb2
1430	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
1431	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
1432	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
1433	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
1434___
1435	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1436$code.=<<___;
1437	vperm2i128	\$0x20,$xb0,$xa0,$xt3	# "de-interlace" further
1438	vperm2i128	\$0x31,$xb0,$xa0,$xb0
1439	vperm2i128	\$0x20,$xb1,$xa1,$xa0
1440	vperm2i128	\$0x31,$xb1,$xa1,$xb1
1441	vperm2i128	\$0x20,$xb2,$xa2,$xa1
1442	vperm2i128	\$0x31,$xb2,$xa2,$xb2
1443	vperm2i128	\$0x20,$xb3,$xa3,$xa2
1444	vperm2i128	\$0x31,$xb3,$xa3,$xb3
1445___
1446	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1447	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1448$code.=<<___;
1449	vmovdqa		$xa0,0x00(%rsp)		# offload $xaN
1450	vmovdqa		$xa1,0x20(%rsp)
1451	vmovdqa		0x40(%rsp),$xc2		# $xa0
1452	vmovdqa		0x60(%rsp),$xc3		# $xa1
1453
1454	vpaddd		0x180-0x200(%rax),$xc0,$xc0
1455	vpaddd		0x1a0-0x200(%rax),$xc1,$xc1
1456	vpaddd		0x1c0-0x200(%rax),$xc2,$xc2
1457	vpaddd		0x1e0-0x200(%rax),$xc3,$xc3
1458
1459	vpunpckldq	$xc1,$xc0,$xt2
1460	vpunpckldq	$xc3,$xc2,$xt3
1461	vpunpckhdq	$xc1,$xc0,$xc0
1462	vpunpckhdq	$xc3,$xc2,$xc2
1463	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
1464	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
1465	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
1466	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
1467___
1468	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1469$code.=<<___;
1470	vpaddd		0x200-0x200(%rax),$xd0,$xd0
1471	vpaddd		0x220-0x200(%rax),$xd1,$xd1
1472	vpaddd		0x240-0x200(%rax),$xd2,$xd2
1473	vpaddd		0x260-0x200(%rax),$xd3,$xd3
1474
1475	vpunpckldq	$xd1,$xd0,$xt2
1476	vpunpckldq	$xd3,$xd2,$xt3
1477	vpunpckhdq	$xd1,$xd0,$xd0
1478	vpunpckhdq	$xd3,$xd2,$xd2
1479	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
1480	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
1481	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
1482	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
1483___
1484	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1485$code.=<<___;
1486	vperm2i128	\$0x20,$xd0,$xc0,$xt3	# "de-interlace" further
1487	vperm2i128	\$0x31,$xd0,$xc0,$xd0
1488	vperm2i128	\$0x20,$xd1,$xc1,$xc0
1489	vperm2i128	\$0x31,$xd1,$xc1,$xd1
1490	vperm2i128	\$0x20,$xd2,$xc2,$xc1
1491	vperm2i128	\$0x31,$xd2,$xc2,$xd2
1492	vperm2i128	\$0x20,$xd3,$xc3,$xc2
1493	vperm2i128	\$0x31,$xd3,$xc3,$xd3
1494___
1495	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1496	($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1497	($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1498	($xa0,$xa1)=($xt2,$xt3);
1499$code.=<<___;
1500	vmovdqa		0x00(%rsp),$xa0		# $xaN was offloaded, remember?
1501	vmovdqa		0x20(%rsp),$xa1
1502
1503	cmp		\$64*8,$len
1504	jb		.Ltail8x
1505
1506	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1507	vpxor		0x20($inp),$xb0,$xb0
1508	vpxor		0x40($inp),$xc0,$xc0
1509	vpxor		0x60($inp),$xd0,$xd0
1510	lea		0x80($inp),$inp		# size optimization
1511	vmovdqu		$xa0,0x00($out)
1512	vmovdqu		$xb0,0x20($out)
1513	vmovdqu		$xc0,0x40($out)
1514	vmovdqu		$xd0,0x60($out)
1515	lea		0x80($out),$out		# size optimization
1516
1517	vpxor		0x00($inp),$xa1,$xa1
1518	vpxor		0x20($inp),$xb1,$xb1
1519	vpxor		0x40($inp),$xc1,$xc1
1520	vpxor		0x60($inp),$xd1,$xd1
1521	lea		0x80($inp),$inp		# size optimization
1522	vmovdqu		$xa1,0x00($out)
1523	vmovdqu		$xb1,0x20($out)
1524	vmovdqu		$xc1,0x40($out)
1525	vmovdqu		$xd1,0x60($out)
1526	lea		0x80($out),$out		# size optimization
1527
1528	vpxor		0x00($inp),$xa2,$xa2
1529	vpxor		0x20($inp),$xb2,$xb2
1530	vpxor		0x40($inp),$xc2,$xc2
1531	vpxor		0x60($inp),$xd2,$xd2
1532	lea		0x80($inp),$inp		# size optimization
1533	vmovdqu		$xa2,0x00($out)
1534	vmovdqu		$xb2,0x20($out)
1535	vmovdqu		$xc2,0x40($out)
1536	vmovdqu		$xd2,0x60($out)
1537	lea		0x80($out),$out		# size optimization
1538
1539	vpxor		0x00($inp),$xa3,$xa3
1540	vpxor		0x20($inp),$xb3,$xb3
1541	vpxor		0x40($inp),$xc3,$xc3
1542	vpxor		0x60($inp),$xd3,$xd3
1543	lea		0x80($inp),$inp		# size optimization
1544	vmovdqu		$xa3,0x00($out)
1545	vmovdqu		$xb3,0x20($out)
1546	vmovdqu		$xc3,0x40($out)
1547	vmovdqu		$xd3,0x60($out)
1548	lea		0x80($out),$out		# size optimization
1549
1550	sub		\$64*8,$len
1551	jnz		.Loop_outer8x
1552
1553	jmp		.Ldone8x
1554
1555.Ltail8x:
1556	cmp		\$448,$len
1557	jae		.L448_or_more8x
1558	cmp		\$384,$len
1559	jae		.L384_or_more8x
1560	cmp		\$320,$len
1561	jae		.L320_or_more8x
1562	cmp		\$256,$len
1563	jae		.L256_or_more8x
1564	cmp		\$192,$len
1565	jae		.L192_or_more8x
1566	cmp		\$128,$len
1567	jae		.L128_or_more8x
1568	cmp		\$64,$len
1569	jae		.L64_or_more8x
1570
1571	xor		%r10,%r10
1572	vmovdqa		$xa0,0x00(%rsp)
1573	vmovdqa		$xb0,0x20(%rsp)
1574	jmp		.Loop_tail8x
1575
1576.align	32
1577.L64_or_more8x:
1578	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1579	vpxor		0x20($inp),$xb0,$xb0
1580	vmovdqu		$xa0,0x00($out)
1581	vmovdqu		$xb0,0x20($out)
1582	je		.Ldone8x
1583
1584	lea		0x40($inp),$inp		# inp+=64*1
1585	xor		%r10,%r10
1586	vmovdqa		$xc0,0x00(%rsp)
1587	lea		0x40($out),$out		# out+=64*1
1588	sub		\$64,$len		# len-=64*1
1589	vmovdqa		$xd0,0x20(%rsp)
1590	jmp		.Loop_tail8x
1591
1592.align	32
1593.L128_or_more8x:
1594	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1595	vpxor		0x20($inp),$xb0,$xb0
1596	vpxor		0x40($inp),$xc0,$xc0
1597	vpxor		0x60($inp),$xd0,$xd0
1598	vmovdqu		$xa0,0x00($out)
1599	vmovdqu		$xb0,0x20($out)
1600	vmovdqu		$xc0,0x40($out)
1601	vmovdqu		$xd0,0x60($out)
1602	je		.Ldone8x
1603
1604	lea		0x80($inp),$inp		# inp+=64*2
1605	xor		%r10,%r10
1606	vmovdqa		$xa1,0x00(%rsp)
1607	lea		0x80($out),$out		# out+=64*2
1608	sub		\$128,$len		# len-=64*2
1609	vmovdqa		$xb1,0x20(%rsp)
1610	jmp		.Loop_tail8x
1611
1612.align	32
1613.L192_or_more8x:
1614	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1615	vpxor		0x20($inp),$xb0,$xb0
1616	vpxor		0x40($inp),$xc0,$xc0
1617	vpxor		0x60($inp),$xd0,$xd0
1618	vpxor		0x80($inp),$xa1,$xa1
1619	vpxor		0xa0($inp),$xb1,$xb1
1620	vmovdqu		$xa0,0x00($out)
1621	vmovdqu		$xb0,0x20($out)
1622	vmovdqu		$xc0,0x40($out)
1623	vmovdqu		$xd0,0x60($out)
1624	vmovdqu		$xa1,0x80($out)
1625	vmovdqu		$xb1,0xa0($out)
1626	je		.Ldone8x
1627
1628	lea		0xc0($inp),$inp		# inp+=64*3
1629	xor		%r10,%r10
1630	vmovdqa		$xc1,0x00(%rsp)
1631	lea		0xc0($out),$out		# out+=64*3
1632	sub		\$192,$len		# len-=64*3
1633	vmovdqa		$xd1,0x20(%rsp)
1634	jmp		.Loop_tail8x
1635
1636.align	32
1637.L256_or_more8x:
1638	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1639	vpxor		0x20($inp),$xb0,$xb0
1640	vpxor		0x40($inp),$xc0,$xc0
1641	vpxor		0x60($inp),$xd0,$xd0
1642	vpxor		0x80($inp),$xa1,$xa1
1643	vpxor		0xa0($inp),$xb1,$xb1
1644	vpxor		0xc0($inp),$xc1,$xc1
1645	vpxor		0xe0($inp),$xd1,$xd1
1646	vmovdqu		$xa0,0x00($out)
1647	vmovdqu		$xb0,0x20($out)
1648	vmovdqu		$xc0,0x40($out)
1649	vmovdqu		$xd0,0x60($out)
1650	vmovdqu		$xa1,0x80($out)
1651	vmovdqu		$xb1,0xa0($out)
1652	vmovdqu		$xc1,0xc0($out)
1653	vmovdqu		$xd1,0xe0($out)
1654	je		.Ldone8x
1655
1656	lea		0x100($inp),$inp	# inp+=64*4
1657	xor		%r10,%r10
1658	vmovdqa		$xa2,0x00(%rsp)
1659	lea		0x100($out),$out	# out+=64*4
1660	sub		\$256,$len		# len-=64*4
1661	vmovdqa		$xb2,0x20(%rsp)
1662	jmp		.Loop_tail8x
1663
1664.align	32
1665.L320_or_more8x:
1666	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1667	vpxor		0x20($inp),$xb0,$xb0
1668	vpxor		0x40($inp),$xc0,$xc0
1669	vpxor		0x60($inp),$xd0,$xd0
1670	vpxor		0x80($inp),$xa1,$xa1
1671	vpxor		0xa0($inp),$xb1,$xb1
1672	vpxor		0xc0($inp),$xc1,$xc1
1673	vpxor		0xe0($inp),$xd1,$xd1
1674	vpxor		0x100($inp),$xa2,$xa2
1675	vpxor		0x120($inp),$xb2,$xb2
1676	vmovdqu		$xa0,0x00($out)
1677	vmovdqu		$xb0,0x20($out)
1678	vmovdqu		$xc0,0x40($out)
1679	vmovdqu		$xd0,0x60($out)
1680	vmovdqu		$xa1,0x80($out)
1681	vmovdqu		$xb1,0xa0($out)
1682	vmovdqu		$xc1,0xc0($out)
1683	vmovdqu		$xd1,0xe0($out)
1684	vmovdqu		$xa2,0x100($out)
1685	vmovdqu		$xb2,0x120($out)
1686	je		.Ldone8x
1687
1688	lea		0x140($inp),$inp	# inp+=64*5
1689	xor		%r10,%r10
1690	vmovdqa		$xc2,0x00(%rsp)
1691	lea		0x140($out),$out	# out+=64*5
1692	sub		\$320,$len		# len-=64*5
1693	vmovdqa		$xd2,0x20(%rsp)
1694	jmp		.Loop_tail8x
1695
1696.align	32
1697.L384_or_more8x:
1698	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1699	vpxor		0x20($inp),$xb0,$xb0
1700	vpxor		0x40($inp),$xc0,$xc0
1701	vpxor		0x60($inp),$xd0,$xd0
1702	vpxor		0x80($inp),$xa1,$xa1
1703	vpxor		0xa0($inp),$xb1,$xb1
1704	vpxor		0xc0($inp),$xc1,$xc1
1705	vpxor		0xe0($inp),$xd1,$xd1
1706	vpxor		0x100($inp),$xa2,$xa2
1707	vpxor		0x120($inp),$xb2,$xb2
1708	vpxor		0x140($inp),$xc2,$xc2
1709	vpxor		0x160($inp),$xd2,$xd2
1710	vmovdqu		$xa0,0x00($out)
1711	vmovdqu		$xb0,0x20($out)
1712	vmovdqu		$xc0,0x40($out)
1713	vmovdqu		$xd0,0x60($out)
1714	vmovdqu		$xa1,0x80($out)
1715	vmovdqu		$xb1,0xa0($out)
1716	vmovdqu		$xc1,0xc0($out)
1717	vmovdqu		$xd1,0xe0($out)
1718	vmovdqu		$xa2,0x100($out)
1719	vmovdqu		$xb2,0x120($out)
1720	vmovdqu		$xc2,0x140($out)
1721	vmovdqu		$xd2,0x160($out)
1722	je		.Ldone8x
1723
1724	lea		0x180($inp),$inp	# inp+=64*6
1725	xor		%r10,%r10
1726	vmovdqa		$xa3,0x00(%rsp)
1727	lea		0x180($out),$out	# out+=64*6
1728	sub		\$384,$len		# len-=64*6
1729	vmovdqa		$xb3,0x20(%rsp)
1730	jmp		.Loop_tail8x
1731
1732.align	32
1733.L448_or_more8x:
1734	vpxor		0x00($inp),$xa0,$xa0	# xor with input
1735	vpxor		0x20($inp),$xb0,$xb0
1736	vpxor		0x40($inp),$xc0,$xc0
1737	vpxor		0x60($inp),$xd0,$xd0
1738	vpxor		0x80($inp),$xa1,$xa1
1739	vpxor		0xa0($inp),$xb1,$xb1
1740	vpxor		0xc0($inp),$xc1,$xc1
1741	vpxor		0xe0($inp),$xd1,$xd1
1742	vpxor		0x100($inp),$xa2,$xa2
1743	vpxor		0x120($inp),$xb2,$xb2
1744	vpxor		0x140($inp),$xc2,$xc2
1745	vpxor		0x160($inp),$xd2,$xd2
1746	vpxor		0x180($inp),$xa3,$xa3
1747	vpxor		0x1a0($inp),$xb3,$xb3
1748	vmovdqu		$xa0,0x00($out)
1749	vmovdqu		$xb0,0x20($out)
1750	vmovdqu		$xc0,0x40($out)
1751	vmovdqu		$xd0,0x60($out)
1752	vmovdqu		$xa1,0x80($out)
1753	vmovdqu		$xb1,0xa0($out)
1754	vmovdqu		$xc1,0xc0($out)
1755	vmovdqu		$xd1,0xe0($out)
1756	vmovdqu		$xa2,0x100($out)
1757	vmovdqu		$xb2,0x120($out)
1758	vmovdqu		$xc2,0x140($out)
1759	vmovdqu		$xd2,0x160($out)
1760	vmovdqu		$xa3,0x180($out)
1761	vmovdqu		$xb3,0x1a0($out)
1762	je		.Ldone8x
1763
1764	lea		0x1c0($inp),$inp	# inp+=64*7
1765	xor		%r10,%r10
1766	vmovdqa		$xc3,0x00(%rsp)
1767	lea		0x1c0($out),$out	# out+=64*7
1768	sub		\$448,$len		# len-=64*7
1769	vmovdqa		$xd3,0x20(%rsp)
1770
1771.Loop_tail8x:
1772	movzb		($inp,%r10),%eax
1773	movzb		(%rsp,%r10),%ecx
1774	lea		1(%r10),%r10
1775	xor		%ecx,%eax
1776	mov		%al,-1($out,%r10)
1777	dec		$len
1778	jnz		.Loop_tail8x
1779
1780.Ldone8x:
1781	vzeroall
1782___
1783$code.=<<___	if ($win64);
1784	movaps		-0xa8(%r9),%xmm6
1785	movaps		-0x98(%r9),%xmm7
1786	movaps		-0x88(%r9),%xmm8
1787	movaps		-0x78(%r9),%xmm9
1788	movaps		-0x68(%r9),%xmm10
1789	movaps		-0x58(%r9),%xmm11
1790	movaps		-0x48(%r9),%xmm12
1791	movaps		-0x38(%r9),%xmm13
1792	movaps		-0x28(%r9),%xmm14
1793	movaps		-0x18(%r9),%xmm15
1794___
1795$code.=<<___;
1796	lea		(%r9),%rsp
1797.cfi_def_cfa_register	rsp
1798.L8x_epilogue:
1799	ret
1800.cfi_endproc
1801.size	ChaCha20_8x,.-ChaCha20_8x
1802___
1803}
1804
1805########################################################################
1806# AVX512 code paths were removed
1807
1808# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1809#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
1810if ($win64) {
1811$rec="%rcx";
1812$frame="%rdx";
1813$context="%r8";
1814$disp="%r9";
1815
1816$code.=<<___;
1817.extern	__imp_RtlVirtualUnwind
1818.type	se_handler,\@abi-omnipotent
1819.align	16
1820se_handler:
1821	push	%rsi
1822	push	%rdi
1823	push	%rbx
1824	push	%rbp
1825	push	%r12
1826	push	%r13
1827	push	%r14
1828	push	%r15
1829	pushfq
1830	sub	\$64,%rsp
1831
1832	mov	120($context),%rax	# pull context->Rax
1833	mov	248($context),%rbx	# pull context->Rip
1834
1835	mov	8($disp),%rsi		# disp->ImageBase
1836	mov	56($disp),%r11		# disp->HandlerData
1837
1838	lea	.Lctr32_body(%rip),%r10
1839	cmp	%r10,%rbx		# context->Rip<.Lprologue
1840	jb	.Lcommon_seh_tail
1841
1842	mov	152($context),%rax	# pull context->Rsp
1843
1844	lea	.Lno_data(%rip),%r10	# epilogue label
1845	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
1846	jae	.Lcommon_seh_tail
1847
1848	lea	64+24+48(%rax),%rax
1849
1850	mov	-8(%rax),%rbx
1851	mov	-16(%rax),%rbp
1852	mov	-24(%rax),%r12
1853	mov	-32(%rax),%r13
1854	mov	-40(%rax),%r14
1855	mov	-48(%rax),%r15
1856	mov	%rbx,144($context)	# restore context->Rbx
1857	mov	%rbp,160($context)	# restore context->Rbp
1858	mov	%r12,216($context)	# restore context->R12
1859	mov	%r13,224($context)	# restore context->R13
1860	mov	%r14,232($context)	# restore context->R14
1861	mov	%r15,240($context)	# restore context->R14
1862
1863.Lcommon_seh_tail:
1864	mov	8(%rax),%rdi
1865	mov	16(%rax),%rsi
1866	mov	%rax,152($context)	# restore context->Rsp
1867	mov	%rsi,168($context)	# restore context->Rsi
1868	mov	%rdi,176($context)	# restore context->Rdi
1869
1870	mov	40($disp),%rdi		# disp->ContextRecord
1871	mov	$context,%rsi		# context
1872	mov	\$154,%ecx		# sizeof(CONTEXT)
1873	.long	0xa548f3fc		# cld; rep movsq
1874
1875	mov	$disp,%rsi
1876	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
1877	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
1878	mov	0(%rsi),%r8		# arg3, disp->ControlPc
1879	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
1880	mov	40(%rsi),%r10		# disp->ContextRecord
1881	lea	56(%rsi),%r11		# &disp->HandlerData
1882	lea	24(%rsi),%r12		# &disp->EstablisherFrame
1883	mov	%r10,32(%rsp)		# arg5
1884	mov	%r11,40(%rsp)		# arg6
1885	mov	%r12,48(%rsp)		# arg7
1886	mov	%rcx,56(%rsp)		# arg8, (NULL)
1887	call	*__imp_RtlVirtualUnwind(%rip)
1888
1889	mov	\$1,%eax		# ExceptionContinueSearch
1890	add	\$64,%rsp
1891	popfq
1892	pop	%r15
1893	pop	%r14
1894	pop	%r13
1895	pop	%r12
1896	pop	%rbp
1897	pop	%rbx
1898	pop	%rdi
1899	pop	%rsi
1900	ret
1901.size	se_handler,.-se_handler
1902
1903.type	ssse3_handler,\@abi-omnipotent
1904.align	16
1905ssse3_handler:
1906	push	%rsi
1907	push	%rdi
1908	push	%rbx
1909	push	%rbp
1910	push	%r12
1911	push	%r13
1912	push	%r14
1913	push	%r15
1914	pushfq
1915	sub	\$64,%rsp
1916
1917	mov	120($context),%rax	# pull context->Rax
1918	mov	248($context),%rbx	# pull context->Rip
1919
1920	mov	8($disp),%rsi		# disp->ImageBase
1921	mov	56($disp),%r11		# disp->HandlerData
1922
1923	mov	0(%r11),%r10d		# HandlerData[0]
1924	lea	(%rsi,%r10),%r10	# prologue label
1925	cmp	%r10,%rbx		# context->Rip<prologue label
1926	jb	.Lcommon_seh_tail
1927
1928	mov	192($context),%rax	# pull context->R9
1929
1930	mov	4(%r11),%r10d		# HandlerData[1]
1931	lea	(%rsi,%r10),%r10	# epilogue label
1932	cmp	%r10,%rbx		# context->Rip>=epilogue label
1933	jae	.Lcommon_seh_tail
1934
1935	lea	-0x28(%rax),%rsi
1936	lea	512($context),%rdi	# &context.Xmm6
1937	mov	\$4,%ecx
1938	.long	0xa548f3fc		# cld; rep movsq
1939
1940	jmp	.Lcommon_seh_tail
1941.size	ssse3_handler,.-ssse3_handler
1942
1943.type	full_handler,\@abi-omnipotent
1944.align	16
1945full_handler:
1946	push	%rsi
1947	push	%rdi
1948	push	%rbx
1949	push	%rbp
1950	push	%r12
1951	push	%r13
1952	push	%r14
1953	push	%r15
1954	pushfq
1955	sub	\$64,%rsp
1956
1957	mov	120($context),%rax	# pull context->Rax
1958	mov	248($context),%rbx	# pull context->Rip
1959
1960	mov	8($disp),%rsi		# disp->ImageBase
1961	mov	56($disp),%r11		# disp->HandlerData
1962
1963	mov	0(%r11),%r10d		# HandlerData[0]
1964	lea	(%rsi,%r10),%r10	# prologue label
1965	cmp	%r10,%rbx		# context->Rip<prologue label
1966	jb	.Lcommon_seh_tail
1967
1968	mov	192($context),%rax	# pull context->R9
1969
1970	mov	4(%r11),%r10d		# HandlerData[1]
1971	lea	(%rsi,%r10),%r10	# epilogue label
1972	cmp	%r10,%rbx		# context->Rip>=epilogue label
1973	jae	.Lcommon_seh_tail
1974
1975	lea	-0xa8(%rax),%rsi
1976	lea	512($context),%rdi	# &context.Xmm6
1977	mov	\$20,%ecx
1978	.long	0xa548f3fc		# cld; rep movsq
1979
1980	jmp	.Lcommon_seh_tail
1981.size	full_handler,.-full_handler
1982
1983.section	.pdata
1984.align	4
1985	.rva	.LSEH_begin_GFp_ChaCha20_ctr32
1986	.rva	.LSEH_end_GFp_ChaCha20_ctr32
1987	.rva	.LSEH_info_GFp_ChaCha20_ctr32
1988
1989	.rva	.LSEH_begin_ChaCha20_ssse3
1990	.rva	.LSEH_end_ChaCha20_ssse3
1991	.rva	.LSEH_info_ChaCha20_ssse3
1992
1993	.rva	.LSEH_begin_ChaCha20_4x
1994	.rva	.LSEH_end_ChaCha20_4x
1995	.rva	.LSEH_info_ChaCha20_4x
1996___
1997$code.=<<___ if ($avx>1);
1998	.rva	.LSEH_begin_ChaCha20_8x
1999	.rva	.LSEH_end_ChaCha20_8x
2000	.rva	.LSEH_info_ChaCha20_8x
2001___
2002$code.=<<___;
2003.section	.xdata
2004.align	8
2005.LSEH_info_GFp_ChaCha20_ctr32:
2006	.byte	9,0,0,0
2007	.rva	se_handler
2008
2009.LSEH_info_ChaCha20_ssse3:
2010	.byte	9,0,0,0
2011	.rva	ssse3_handler
2012	.rva	.Lssse3_body,.Lssse3_epilogue
2013
2014.LSEH_info_ChaCha20_4x:
2015	.byte	9,0,0,0
2016	.rva	full_handler
2017	.rva	.L4x_body,.L4x_epilogue
2018___
2019$code.=<<___ if ($avx>1);
2020.LSEH_info_ChaCha20_8x:
2021	.byte	9,0,0,0
2022	.rva	full_handler
2023	.rva	.L8x_body,.L8x_epilogue			# HandlerData[]
2024___
2025}
2026
2027foreach (split("\n",$code)) {
2028	s/\`([^\`]*)\`/eval $1/ge;
2029
2030	s/%x#%[yz]/%x/g;	# "down-shift"
2031
2032	print $_,"\n";
2033}
2034
2035close STDOUT or die "error closing STDOUT";
2036